File: | llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp |
Warning: | line 2256, column 23 Access to field 'IsScheduled' results in a dereference of a null pointer (loaded from variable 'SD') |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This pass implements the Bottom Up SLP vectorizer. It detects consecutive | |||
10 | // stores that can be put together into vector-stores. Next, it attempts to | |||
11 | // construct vectorizable tree using the use-def chains. If a profitable tree | |||
12 | // was found, the SLP vectorizer performs vectorization on the tree. | |||
13 | // | |||
14 | // The pass is inspired by the work described in the paper: | |||
15 | // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. | |||
16 | // | |||
17 | //===----------------------------------------------------------------------===// | |||
18 | ||||
19 | #include "llvm/Transforms/Vectorize/SLPVectorizer.h" | |||
20 | #include "llvm/ADT/DenseMap.h" | |||
21 | #include "llvm/ADT/DenseSet.h" | |||
22 | #include "llvm/ADT/Optional.h" | |||
23 | #include "llvm/ADT/PostOrderIterator.h" | |||
24 | #include "llvm/ADT/PriorityQueue.h" | |||
25 | #include "llvm/ADT/STLExtras.h" | |||
26 | #include "llvm/ADT/SetOperations.h" | |||
27 | #include "llvm/ADT/SetVector.h" | |||
28 | #include "llvm/ADT/SmallBitVector.h" | |||
29 | #include "llvm/ADT/SmallPtrSet.h" | |||
30 | #include "llvm/ADT/SmallSet.h" | |||
31 | #include "llvm/ADT/SmallString.h" | |||
32 | #include "llvm/ADT/Statistic.h" | |||
33 | #include "llvm/ADT/iterator.h" | |||
34 | #include "llvm/ADT/iterator_range.h" | |||
35 | #include "llvm/Analysis/AliasAnalysis.h" | |||
36 | #include "llvm/Analysis/AssumptionCache.h" | |||
37 | #include "llvm/Analysis/CodeMetrics.h" | |||
38 | #include "llvm/Analysis/DemandedBits.h" | |||
39 | #include "llvm/Analysis/GlobalsModRef.h" | |||
40 | #include "llvm/Analysis/IVDescriptors.h" | |||
41 | #include "llvm/Analysis/LoopAccessAnalysis.h" | |||
42 | #include "llvm/Analysis/LoopInfo.h" | |||
43 | #include "llvm/Analysis/MemoryLocation.h" | |||
44 | #include "llvm/Analysis/OptimizationRemarkEmitter.h" | |||
45 | #include "llvm/Analysis/ScalarEvolution.h" | |||
46 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" | |||
47 | #include "llvm/Analysis/TargetLibraryInfo.h" | |||
48 | #include "llvm/Analysis/TargetTransformInfo.h" | |||
49 | #include "llvm/Analysis/ValueTracking.h" | |||
50 | #include "llvm/Analysis/VectorUtils.h" | |||
51 | #include "llvm/IR/Attributes.h" | |||
52 | #include "llvm/IR/BasicBlock.h" | |||
53 | #include "llvm/IR/Constant.h" | |||
54 | #include "llvm/IR/Constants.h" | |||
55 | #include "llvm/IR/DataLayout.h" | |||
56 | #include "llvm/IR/DebugLoc.h" | |||
57 | #include "llvm/IR/DerivedTypes.h" | |||
58 | #include "llvm/IR/Dominators.h" | |||
59 | #include "llvm/IR/Function.h" | |||
60 | #include "llvm/IR/IRBuilder.h" | |||
61 | #include "llvm/IR/InstrTypes.h" | |||
62 | #include "llvm/IR/Instruction.h" | |||
63 | #include "llvm/IR/Instructions.h" | |||
64 | #include "llvm/IR/IntrinsicInst.h" | |||
65 | #include "llvm/IR/Intrinsics.h" | |||
66 | #include "llvm/IR/Module.h" | |||
67 | #include "llvm/IR/NoFolder.h" | |||
68 | #include "llvm/IR/Operator.h" | |||
69 | #include "llvm/IR/PatternMatch.h" | |||
70 | #include "llvm/IR/Type.h" | |||
71 | #include "llvm/IR/Use.h" | |||
72 | #include "llvm/IR/User.h" | |||
73 | #include "llvm/IR/Value.h" | |||
74 | #include "llvm/IR/ValueHandle.h" | |||
75 | #include "llvm/IR/Verifier.h" | |||
76 | #include "llvm/InitializePasses.h" | |||
77 | #include "llvm/Pass.h" | |||
78 | #include "llvm/Support/Casting.h" | |||
79 | #include "llvm/Support/CommandLine.h" | |||
80 | #include "llvm/Support/Compiler.h" | |||
81 | #include "llvm/Support/DOTGraphTraits.h" | |||
82 | #include "llvm/Support/Debug.h" | |||
83 | #include "llvm/Support/ErrorHandling.h" | |||
84 | #include "llvm/Support/GraphWriter.h" | |||
85 | #include "llvm/Support/InstructionCost.h" | |||
86 | #include "llvm/Support/KnownBits.h" | |||
87 | #include "llvm/Support/MathExtras.h" | |||
88 | #include "llvm/Support/raw_ostream.h" | |||
89 | #include "llvm/Transforms/Utils/InjectTLIMappings.h" | |||
90 | #include "llvm/Transforms/Utils/LoopUtils.h" | |||
91 | #include "llvm/Transforms/Vectorize.h" | |||
92 | #include <algorithm> | |||
93 | #include <cassert> | |||
94 | #include <cstdint> | |||
95 | #include <iterator> | |||
96 | #include <memory> | |||
97 | #include <set> | |||
98 | #include <string> | |||
99 | #include <tuple> | |||
100 | #include <utility> | |||
101 | #include <vector> | |||
102 | ||||
103 | using namespace llvm; | |||
104 | using namespace llvm::PatternMatch; | |||
105 | using namespace slpvectorizer; | |||
106 | ||||
107 | #define SV_NAME"slp-vectorizer" "slp-vectorizer" | |||
108 | #define DEBUG_TYPE"SLP" "SLP" | |||
109 | ||||
110 | STATISTIC(NumVectorInstructions, "Number of vector instructions generated")static llvm::Statistic NumVectorInstructions = {"SLP", "NumVectorInstructions" , "Number of vector instructions generated"}; | |||
111 | ||||
112 | cl::opt<bool> RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden, | |||
113 | cl::desc("Run the SLP vectorization passes")); | |||
114 | ||||
115 | static cl::opt<int> | |||
116 | SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, | |||
117 | cl::desc("Only vectorize if you gain more than this " | |||
118 | "number ")); | |||
119 | ||||
120 | static cl::opt<bool> | |||
121 | ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, | |||
122 | cl::desc("Attempt to vectorize horizontal reductions")); | |||
123 | ||||
124 | static cl::opt<bool> ShouldStartVectorizeHorAtStore( | |||
125 | "slp-vectorize-hor-store", cl::init(false), cl::Hidden, | |||
126 | cl::desc( | |||
127 | "Attempt to vectorize horizontal reductions feeding into a store")); | |||
128 | ||||
129 | static cl::opt<int> | |||
130 | MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, | |||
131 | cl::desc("Attempt to vectorize for this register size in bits")); | |||
132 | ||||
133 | static cl::opt<unsigned> | |||
134 | MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden, | |||
135 | cl::desc("Maximum SLP vectorization factor (0=unlimited)")); | |||
136 | ||||
137 | static cl::opt<int> | |||
138 | MaxStoreLookup("slp-max-store-lookup", cl::init(32), cl::Hidden, | |||
139 | cl::desc("Maximum depth of the lookup for consecutive stores.")); | |||
140 | ||||
141 | /// Limits the size of scheduling regions in a block. | |||
142 | /// It avoid long compile times for _very_ large blocks where vector | |||
143 | /// instructions are spread over a wide range. | |||
144 | /// This limit is way higher than needed by real-world functions. | |||
145 | static cl::opt<int> | |||
146 | ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, | |||
147 | cl::desc("Limit the size of the SLP scheduling region per block")); | |||
148 | ||||
149 | static cl::opt<int> MinVectorRegSizeOption( | |||
150 | "slp-min-reg-size", cl::init(128), cl::Hidden, | |||
151 | cl::desc("Attempt to vectorize for this register size in bits")); | |||
152 | ||||
153 | static cl::opt<unsigned> RecursionMaxDepth( | |||
154 | "slp-recursion-max-depth", cl::init(12), cl::Hidden, | |||
155 | cl::desc("Limit the recursion depth when building a vectorizable tree")); | |||
156 | ||||
157 | static cl::opt<unsigned> MinTreeSize( | |||
158 | "slp-min-tree-size", cl::init(3), cl::Hidden, | |||
159 | cl::desc("Only vectorize small trees if they are fully vectorizable")); | |||
160 | ||||
161 | // The maximum depth that the look-ahead score heuristic will explore. | |||
162 | // The higher this value, the higher the compilation time overhead. | |||
163 | static cl::opt<int> LookAheadMaxDepth( | |||
164 | "slp-max-look-ahead-depth", cl::init(2), cl::Hidden, | |||
165 | cl::desc("The maximum look-ahead depth for operand reordering scores")); | |||
166 | ||||
167 | // The Look-ahead heuristic goes through the users of the bundle to calculate | |||
168 | // the users cost in getExternalUsesCost(). To avoid compilation time increase | |||
169 | // we limit the number of users visited to this value. | |||
170 | static cl::opt<unsigned> LookAheadUsersBudget( | |||
171 | "slp-look-ahead-users-budget", cl::init(2), cl::Hidden, | |||
172 | cl::desc("The maximum number of users to visit while visiting the " | |||
173 | "predecessors. This prevents compilation time increase.")); | |||
174 | ||||
175 | static cl::opt<bool> | |||
176 | ViewSLPTree("view-slp-tree", cl::Hidden, | |||
177 | cl::desc("Display the SLP trees with Graphviz")); | |||
178 | ||||
179 | // Limit the number of alias checks. The limit is chosen so that | |||
180 | // it has no negative effect on the llvm benchmarks. | |||
181 | static const unsigned AliasedCheckLimit = 10; | |||
182 | ||||
183 | // Another limit for the alias checks: The maximum distance between load/store | |||
184 | // instructions where alias checks are done. | |||
185 | // This limit is useful for very large basic blocks. | |||
186 | static const unsigned MaxMemDepDistance = 160; | |||
187 | ||||
188 | /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling | |||
189 | /// regions to be handled. | |||
190 | static const int MinScheduleRegionSize = 16; | |||
191 | ||||
192 | /// Predicate for the element types that the SLP vectorizer supports. | |||
193 | /// | |||
194 | /// The most important thing to filter here are types which are invalid in LLVM | |||
195 | /// vectors. We also filter target specific types which have absolutely no | |||
196 | /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just | |||
197 | /// avoids spending time checking the cost model and realizing that they will | |||
198 | /// be inevitably scalarized. | |||
199 | static bool isValidElementType(Type *Ty) { | |||
200 | return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && | |||
201 | !Ty->isPPC_FP128Ty(); | |||
202 | } | |||
203 | ||||
204 | /// \returns True if the value is a constant (but not globals/constant | |||
205 | /// expressions). | |||
206 | static bool isConstant(Value *V) { | |||
207 | return isa<Constant>(V) && !isa<ConstantExpr>(V) && !isa<GlobalValue>(V); | |||
208 | } | |||
209 | ||||
210 | /// Checks if \p V is one of vector-like instructions, i.e. undef, | |||
211 | /// insertelement/extractelement with constant indices for fixed vector type or | |||
212 | /// extractvalue instruction. | |||
213 | static bool isVectorLikeInstWithConstOps(Value *V) { | |||
214 | if (!isa<InsertElementInst, ExtractElementInst>(V) && | |||
215 | !isa<ExtractValueInst, UndefValue>(V)) | |||
216 | return false; | |||
217 | auto *I = dyn_cast<Instruction>(V); | |||
218 | if (!I || isa<ExtractValueInst>(I)) | |||
219 | return true; | |||
220 | if (!isa<FixedVectorType>(I->getOperand(0)->getType())) | |||
221 | return false; | |||
222 | if (isa<ExtractElementInst, ExtractValueInst>(I)) | |||
223 | return isConstant(I->getOperand(1)); | |||
224 | assert(isa<InsertElementInst>(V) && "Expected only insertelement.")(static_cast <bool> (isa<InsertElementInst>(V) && "Expected only insertelement.") ? void (0) : __assert_fail ( "isa<InsertElementInst>(V) && \"Expected only insertelement.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 224, __extension__ __PRETTY_FUNCTION__)); | |||
225 | return isConstant(I->getOperand(2)); | |||
226 | } | |||
227 | ||||
228 | /// \returns true if all of the instructions in \p VL are in the same block or | |||
229 | /// false otherwise. | |||
230 | static bool allSameBlock(ArrayRef<Value *> VL) { | |||
231 | Instruction *I0 = dyn_cast<Instruction>(VL[0]); | |||
232 | if (!I0) | |||
233 | return false; | |||
234 | if (all_of(VL, isVectorLikeInstWithConstOps)) | |||
235 | return true; | |||
236 | ||||
237 | BasicBlock *BB = I0->getParent(); | |||
238 | for (int I = 1, E = VL.size(); I < E; I++) { | |||
239 | auto *II = dyn_cast<Instruction>(VL[I]); | |||
240 | if (!II) | |||
241 | return false; | |||
242 | ||||
243 | if (BB != II->getParent()) | |||
244 | return false; | |||
245 | } | |||
246 | return true; | |||
247 | } | |||
248 | ||||
249 | /// \returns True if all of the values in \p VL are constants (but not | |||
250 | /// globals/constant expressions). | |||
251 | static bool allConstant(ArrayRef<Value *> VL) { | |||
252 | // Constant expressions and globals can't be vectorized like normal integer/FP | |||
253 | // constants. | |||
254 | return all_of(VL, isConstant); | |||
255 | } | |||
256 | ||||
257 | /// \returns True if all of the values in \p VL are identical. | |||
258 | static bool isSplat(ArrayRef<Value *> VL) { | |||
259 | for (unsigned i = 1, e = VL.size(); i < e; ++i) | |||
260 | if (VL[i] != VL[0]) | |||
261 | return false; | |||
262 | return true; | |||
263 | } | |||
264 | ||||
265 | /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator. | |||
266 | static bool isCommutative(Instruction *I) { | |||
267 | if (auto *Cmp = dyn_cast<CmpInst>(I)) | |||
268 | return Cmp->isCommutative(); | |||
269 | if (auto *BO = dyn_cast<BinaryOperator>(I)) | |||
270 | return BO->isCommutative(); | |||
271 | // TODO: This should check for generic Instruction::isCommutative(), but | |||
272 | // we need to confirm that the caller code correctly handles Intrinsics | |||
273 | // for example (does not have 2 operands). | |||
274 | return false; | |||
275 | } | |||
276 | ||||
277 | /// Checks if the vector of instructions can be represented as a shuffle, like: | |||
278 | /// %x0 = extractelement <4 x i8> %x, i32 0 | |||
279 | /// %x3 = extractelement <4 x i8> %x, i32 3 | |||
280 | /// %y1 = extractelement <4 x i8> %y, i32 1 | |||
281 | /// %y2 = extractelement <4 x i8> %y, i32 2 | |||
282 | /// %x0x0 = mul i8 %x0, %x0 | |||
283 | /// %x3x3 = mul i8 %x3, %x3 | |||
284 | /// %y1y1 = mul i8 %y1, %y1 | |||
285 | /// %y2y2 = mul i8 %y2, %y2 | |||
286 | /// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0 | |||
287 | /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1 | |||
288 | /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2 | |||
289 | /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3 | |||
290 | /// ret <4 x i8> %ins4 | |||
291 | /// can be transformed into: | |||
292 | /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5, | |||
293 | /// i32 6> | |||
294 | /// %2 = mul <4 x i8> %1, %1 | |||
295 | /// ret <4 x i8> %2 | |||
296 | /// We convert this initially to something like: | |||
297 | /// %x0 = extractelement <4 x i8> %x, i32 0 | |||
298 | /// %x3 = extractelement <4 x i8> %x, i32 3 | |||
299 | /// %y1 = extractelement <4 x i8> %y, i32 1 | |||
300 | /// %y2 = extractelement <4 x i8> %y, i32 2 | |||
301 | /// %1 = insertelement <4 x i8> poison, i8 %x0, i32 0 | |||
302 | /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1 | |||
303 | /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2 | |||
304 | /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3 | |||
305 | /// %5 = mul <4 x i8> %4, %4 | |||
306 | /// %6 = extractelement <4 x i8> %5, i32 0 | |||
307 | /// %ins1 = insertelement <4 x i8> poison, i8 %6, i32 0 | |||
308 | /// %7 = extractelement <4 x i8> %5, i32 1 | |||
309 | /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1 | |||
310 | /// %8 = extractelement <4 x i8> %5, i32 2 | |||
311 | /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2 | |||
312 | /// %9 = extractelement <4 x i8> %5, i32 3 | |||
313 | /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3 | |||
314 | /// ret <4 x i8> %ins4 | |||
315 | /// InstCombiner transforms this into a shuffle and vector mul | |||
316 | /// Mask will return the Shuffle Mask equivalent to the extracted elements. | |||
317 | /// TODO: Can we split off and reuse the shuffle mask detection from | |||
318 | /// TargetTransformInfo::getInstructionThroughput? | |||
319 | static Optional<TargetTransformInfo::ShuffleKind> | |||
320 | isShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) { | |||
321 | auto *EI0 = cast<ExtractElementInst>(VL[0]); | |||
322 | unsigned Size = | |||
323 | cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements(); | |||
324 | Value *Vec1 = nullptr; | |||
325 | Value *Vec2 = nullptr; | |||
326 | enum ShuffleMode { Unknown, Select, Permute }; | |||
327 | ShuffleMode CommonShuffleMode = Unknown; | |||
328 | for (unsigned I = 0, E = VL.size(); I < E; ++I) { | |||
329 | auto *EI = cast<ExtractElementInst>(VL[I]); | |||
330 | auto *Vec = EI->getVectorOperand(); | |||
331 | // All vector operands must have the same number of vector elements. | |||
332 | if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size) | |||
333 | return None; | |||
334 | auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand()); | |||
335 | if (!Idx) | |||
336 | return None; | |||
337 | // Undefined behavior if Idx is negative or >= Size. | |||
338 | if (Idx->getValue().uge(Size)) { | |||
339 | Mask.push_back(UndefMaskElem); | |||
340 | continue; | |||
341 | } | |||
342 | unsigned IntIdx = Idx->getValue().getZExtValue(); | |||
343 | Mask.push_back(IntIdx); | |||
344 | // We can extractelement from undef or poison vector. | |||
345 | if (isa<UndefValue>(Vec)) | |||
346 | continue; | |||
347 | // For correct shuffling we have to have at most 2 different vector operands | |||
348 | // in all extractelement instructions. | |||
349 | if (!Vec1 || Vec1 == Vec) | |||
350 | Vec1 = Vec; | |||
351 | else if (!Vec2 || Vec2 == Vec) | |||
352 | Vec2 = Vec; | |||
353 | else | |||
354 | return None; | |||
355 | if (CommonShuffleMode == Permute) | |||
356 | continue; | |||
357 | // If the extract index is not the same as the operation number, it is a | |||
358 | // permutation. | |||
359 | if (IntIdx != I) { | |||
360 | CommonShuffleMode = Permute; | |||
361 | continue; | |||
362 | } | |||
363 | CommonShuffleMode = Select; | |||
364 | } | |||
365 | // If we're not crossing lanes in different vectors, consider it as blending. | |||
366 | if (CommonShuffleMode == Select && Vec2) | |||
367 | return TargetTransformInfo::SK_Select; | |||
368 | // If Vec2 was never used, we have a permutation of a single vector, otherwise | |||
369 | // we have permutation of 2 vectors. | |||
370 | return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc | |||
371 | : TargetTransformInfo::SK_PermuteSingleSrc; | |||
372 | } | |||
373 | ||||
374 | namespace { | |||
375 | ||||
376 | /// Main data required for vectorization of instructions. | |||
377 | struct InstructionsState { | |||
378 | /// The very first instruction in the list with the main opcode. | |||
379 | Value *OpValue = nullptr; | |||
380 | ||||
381 | /// The main/alternate instruction. | |||
382 | Instruction *MainOp = nullptr; | |||
383 | Instruction *AltOp = nullptr; | |||
384 | ||||
385 | /// The main/alternate opcodes for the list of instructions. | |||
386 | unsigned getOpcode() const { | |||
387 | return MainOp ? MainOp->getOpcode() : 0; | |||
388 | } | |||
389 | ||||
390 | unsigned getAltOpcode() const { | |||
391 | return AltOp ? AltOp->getOpcode() : 0; | |||
392 | } | |||
393 | ||||
394 | /// Some of the instructions in the list have alternate opcodes. | |||
395 | bool isAltShuffle() const { return getOpcode() != getAltOpcode(); } | |||
396 | ||||
397 | bool isOpcodeOrAlt(Instruction *I) const { | |||
398 | unsigned CheckedOpcode = I->getOpcode(); | |||
399 | return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode; | |||
400 | } | |||
401 | ||||
402 | InstructionsState() = delete; | |||
403 | InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp) | |||
404 | : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {} | |||
405 | }; | |||
406 | ||||
407 | } // end anonymous namespace | |||
408 | ||||
409 | /// Chooses the correct key for scheduling data. If \p Op has the same (or | |||
410 | /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p | |||
411 | /// OpValue. | |||
412 | static Value *isOneOf(const InstructionsState &S, Value *Op) { | |||
413 | auto *I = dyn_cast<Instruction>(Op); | |||
414 | if (I && S.isOpcodeOrAlt(I)) | |||
415 | return Op; | |||
416 | return S.OpValue; | |||
417 | } | |||
418 | ||||
419 | /// \returns true if \p Opcode is allowed as part of of the main/alternate | |||
420 | /// instruction for SLP vectorization. | |||
421 | /// | |||
422 | /// Example of unsupported opcode is SDIV that can potentially cause UB if the | |||
423 | /// "shuffled out" lane would result in division by zero. | |||
424 | static bool isValidForAlternation(unsigned Opcode) { | |||
425 | if (Instruction::isIntDivRem(Opcode)) | |||
426 | return false; | |||
427 | ||||
428 | return true; | |||
429 | } | |||
430 | ||||
431 | /// \returns analysis of the Instructions in \p VL described in | |||
432 | /// InstructionsState, the Opcode that we suppose the whole list | |||
433 | /// could be vectorized even if its structure is diverse. | |||
434 | static InstructionsState getSameOpcode(ArrayRef<Value *> VL, | |||
435 | unsigned BaseIndex = 0) { | |||
436 | // Make sure these are all Instructions. | |||
437 | if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); })) | |||
438 | return InstructionsState(VL[BaseIndex], nullptr, nullptr); | |||
439 | ||||
440 | bool IsCastOp = isa<CastInst>(VL[BaseIndex]); | |||
441 | bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]); | |||
442 | unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode(); | |||
443 | unsigned AltOpcode = Opcode; | |||
444 | unsigned AltIndex = BaseIndex; | |||
445 | ||||
446 | // Check for one alternate opcode from another BinaryOperator. | |||
447 | // TODO - generalize to support all operators (types, calls etc.). | |||
448 | for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) { | |||
449 | unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode(); | |||
450 | if (IsBinOp && isa<BinaryOperator>(VL[Cnt])) { | |||
451 | if (InstOpcode == Opcode || InstOpcode == AltOpcode) | |||
452 | continue; | |||
453 | if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) && | |||
454 | isValidForAlternation(Opcode)) { | |||
455 | AltOpcode = InstOpcode; | |||
456 | AltIndex = Cnt; | |||
457 | continue; | |||
458 | } | |||
459 | } else if (IsCastOp && isa<CastInst>(VL[Cnt])) { | |||
460 | Type *Ty0 = cast<Instruction>(VL[BaseIndex])->getOperand(0)->getType(); | |||
461 | Type *Ty1 = cast<Instruction>(VL[Cnt])->getOperand(0)->getType(); | |||
462 | if (Ty0 == Ty1) { | |||
463 | if (InstOpcode == Opcode || InstOpcode == AltOpcode) | |||
464 | continue; | |||
465 | if (Opcode == AltOpcode) { | |||
466 | assert(isValidForAlternation(Opcode) &&(static_cast <bool> (isValidForAlternation(Opcode) && isValidForAlternation(InstOpcode) && "Cast isn't safe for alternation, logic needs to be updated!" ) ? void (0) : __assert_fail ("isValidForAlternation(Opcode) && isValidForAlternation(InstOpcode) && \"Cast isn't safe for alternation, logic needs to be updated!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 468, __extension__ __PRETTY_FUNCTION__)) | |||
467 | isValidForAlternation(InstOpcode) &&(static_cast <bool> (isValidForAlternation(Opcode) && isValidForAlternation(InstOpcode) && "Cast isn't safe for alternation, logic needs to be updated!" ) ? void (0) : __assert_fail ("isValidForAlternation(Opcode) && isValidForAlternation(InstOpcode) && \"Cast isn't safe for alternation, logic needs to be updated!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 468, __extension__ __PRETTY_FUNCTION__)) | |||
468 | "Cast isn't safe for alternation, logic needs to be updated!")(static_cast <bool> (isValidForAlternation(Opcode) && isValidForAlternation(InstOpcode) && "Cast isn't safe for alternation, logic needs to be updated!" ) ? void (0) : __assert_fail ("isValidForAlternation(Opcode) && isValidForAlternation(InstOpcode) && \"Cast isn't safe for alternation, logic needs to be updated!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 468, __extension__ __PRETTY_FUNCTION__)); | |||
469 | AltOpcode = InstOpcode; | |||
470 | AltIndex = Cnt; | |||
471 | continue; | |||
472 | } | |||
473 | } | |||
474 | } else if (InstOpcode == Opcode || InstOpcode == AltOpcode) | |||
475 | continue; | |||
476 | return InstructionsState(VL[BaseIndex], nullptr, nullptr); | |||
477 | } | |||
478 | ||||
479 | return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]), | |||
480 | cast<Instruction>(VL[AltIndex])); | |||
481 | } | |||
482 | ||||
483 | /// \returns true if all of the values in \p VL have the same type or false | |||
484 | /// otherwise. | |||
485 | static bool allSameType(ArrayRef<Value *> VL) { | |||
486 | Type *Ty = VL[0]->getType(); | |||
487 | for (int i = 1, e = VL.size(); i < e; i++) | |||
488 | if (VL[i]->getType() != Ty) | |||
489 | return false; | |||
490 | ||||
491 | return true; | |||
492 | } | |||
493 | ||||
494 | /// \returns True if Extract{Value,Element} instruction extracts element Idx. | |||
495 | static Optional<unsigned> getExtractIndex(Instruction *E) { | |||
496 | unsigned Opcode = E->getOpcode(); | |||
497 | assert((Opcode == Instruction::ExtractElement ||(static_cast <bool> ((Opcode == Instruction::ExtractElement || Opcode == Instruction::ExtractValue) && "Expected extractelement or extractvalue instruction." ) ? void (0) : __assert_fail ("(Opcode == Instruction::ExtractElement || Opcode == Instruction::ExtractValue) && \"Expected extractelement or extractvalue instruction.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 499, __extension__ __PRETTY_FUNCTION__)) | |||
498 | Opcode == Instruction::ExtractValue) &&(static_cast <bool> ((Opcode == Instruction::ExtractElement || Opcode == Instruction::ExtractValue) && "Expected extractelement or extractvalue instruction." ) ? void (0) : __assert_fail ("(Opcode == Instruction::ExtractElement || Opcode == Instruction::ExtractValue) && \"Expected extractelement or extractvalue instruction.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 499, __extension__ __PRETTY_FUNCTION__)) | |||
499 | "Expected extractelement or extractvalue instruction.")(static_cast <bool> ((Opcode == Instruction::ExtractElement || Opcode == Instruction::ExtractValue) && "Expected extractelement or extractvalue instruction." ) ? void (0) : __assert_fail ("(Opcode == Instruction::ExtractElement || Opcode == Instruction::ExtractValue) && \"Expected extractelement or extractvalue instruction.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 499, __extension__ __PRETTY_FUNCTION__)); | |||
500 | if (Opcode == Instruction::ExtractElement) { | |||
501 | auto *CI = dyn_cast<ConstantInt>(E->getOperand(1)); | |||
502 | if (!CI) | |||
503 | return None; | |||
504 | return CI->getZExtValue(); | |||
505 | } | |||
506 | ExtractValueInst *EI = cast<ExtractValueInst>(E); | |||
507 | if (EI->getNumIndices() != 1) | |||
508 | return None; | |||
509 | return *EI->idx_begin(); | |||
510 | } | |||
511 | ||||
512 | /// \returns True if in-tree use also needs extract. This refers to | |||
513 | /// possible scalar operand in vectorized instruction. | |||
514 | static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, | |||
515 | TargetLibraryInfo *TLI) { | |||
516 | unsigned Opcode = UserInst->getOpcode(); | |||
517 | switch (Opcode) { | |||
518 | case Instruction::Load: { | |||
519 | LoadInst *LI = cast<LoadInst>(UserInst); | |||
520 | return (LI->getPointerOperand() == Scalar); | |||
521 | } | |||
522 | case Instruction::Store: { | |||
523 | StoreInst *SI = cast<StoreInst>(UserInst); | |||
524 | return (SI->getPointerOperand() == Scalar); | |||
525 | } | |||
526 | case Instruction::Call: { | |||
527 | CallInst *CI = cast<CallInst>(UserInst); | |||
528 | Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); | |||
529 | for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { | |||
530 | if (hasVectorInstrinsicScalarOpd(ID, i)) | |||
531 | return (CI->getArgOperand(i) == Scalar); | |||
532 | } | |||
533 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
534 | } | |||
535 | default: | |||
536 | return false; | |||
537 | } | |||
538 | } | |||
539 | ||||
540 | /// \returns the AA location that is being access by the instruction. | |||
541 | static MemoryLocation getLocation(Instruction *I, AAResults *AA) { | |||
542 | if (StoreInst *SI = dyn_cast<StoreInst>(I)) | |||
543 | return MemoryLocation::get(SI); | |||
544 | if (LoadInst *LI = dyn_cast<LoadInst>(I)) | |||
545 | return MemoryLocation::get(LI); | |||
546 | return MemoryLocation(); | |||
547 | } | |||
548 | ||||
549 | /// \returns True if the instruction is not a volatile or atomic load/store. | |||
550 | static bool isSimple(Instruction *I) { | |||
551 | if (LoadInst *LI = dyn_cast<LoadInst>(I)) | |||
552 | return LI->isSimple(); | |||
553 | if (StoreInst *SI = dyn_cast<StoreInst>(I)) | |||
554 | return SI->isSimple(); | |||
555 | if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) | |||
556 | return !MI->isVolatile(); | |||
557 | return true; | |||
558 | } | |||
559 | ||||
560 | /// Shuffles \p Mask in accordance with the given \p SubMask. | |||
561 | static void addMask(SmallVectorImpl<int> &Mask, ArrayRef<int> SubMask) { | |||
562 | if (SubMask.empty()) | |||
563 | return; | |||
564 | if (Mask.empty()) { | |||
565 | Mask.append(SubMask.begin(), SubMask.end()); | |||
566 | return; | |||
567 | } | |||
568 | SmallVector<int> NewMask(SubMask.size(), UndefMaskElem); | |||
569 | int TermValue = std::min(Mask.size(), SubMask.size()); | |||
570 | for (int I = 0, E = SubMask.size(); I < E; ++I) { | |||
571 | if (SubMask[I] >= TermValue || SubMask[I] == UndefMaskElem || | |||
572 | Mask[SubMask[I]] >= TermValue) | |||
573 | continue; | |||
574 | NewMask[I] = Mask[SubMask[I]]; | |||
575 | } | |||
576 | Mask.swap(NewMask); | |||
577 | } | |||
578 | ||||
579 | /// Order may have elements assigned special value (size) which is out of | |||
580 | /// bounds. Such indices only appear on places which correspond to undef values | |||
581 | /// (see canReuseExtract for details) and used in order to avoid undef values | |||
582 | /// have effect on operands ordering. | |||
583 | /// The first loop below simply finds all unused indices and then the next loop | |||
584 | /// nest assigns these indices for undef values positions. | |||
585 | /// As an example below Order has two undef positions and they have assigned | |||
586 | /// values 3 and 7 respectively: | |||
587 | /// before: 6 9 5 4 9 2 1 0 | |||
588 | /// after: 6 3 5 4 7 2 1 0 | |||
589 | /// \returns Fixed ordering. | |||
590 | static void fixupOrderingIndices(SmallVectorImpl<unsigned> &Order) { | |||
591 | const unsigned Sz = Order.size(); | |||
592 | SmallBitVector UsedIndices(Sz); | |||
593 | SmallVector<int> MaskedIndices; | |||
594 | for (unsigned I = 0; I < Sz; ++I) { | |||
595 | if (Order[I] < Sz) | |||
596 | UsedIndices.set(Order[I]); | |||
597 | else | |||
598 | MaskedIndices.push_back(I); | |||
599 | } | |||
600 | if (MaskedIndices.empty()) | |||
601 | return; | |||
602 | SmallVector<int> AvailableIndices(MaskedIndices.size()); | |||
603 | unsigned Cnt = 0; | |||
604 | int Idx = UsedIndices.find_first(); | |||
605 | do { | |||
606 | AvailableIndices[Cnt] = Idx; | |||
607 | Idx = UsedIndices.find_next(Idx); | |||
608 | ++Cnt; | |||
609 | } while (Idx > 0); | |||
610 | assert(Cnt == MaskedIndices.size() && "Non-synced masked/available indices.")(static_cast <bool> (Cnt == MaskedIndices.size() && "Non-synced masked/available indices.") ? void (0) : __assert_fail ("Cnt == MaskedIndices.size() && \"Non-synced masked/available indices.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 610, __extension__ __PRETTY_FUNCTION__)); | |||
611 | for (int I = 0, E = MaskedIndices.size(); I < E; ++I) | |||
612 | Order[MaskedIndices[I]] = AvailableIndices[I]; | |||
613 | } | |||
614 | ||||
615 | namespace llvm { | |||
616 | ||||
617 | static void inversePermutation(ArrayRef<unsigned> Indices, | |||
618 | SmallVectorImpl<int> &Mask) { | |||
619 | Mask.clear(); | |||
620 | const unsigned E = Indices.size(); | |||
621 | Mask.resize(E, UndefMaskElem); | |||
622 | for (unsigned I = 0; I < E; ++I) | |||
623 | Mask[Indices[I]] = I; | |||
624 | } | |||
625 | ||||
626 | /// \returns inserting index of InsertElement or InsertValue instruction, | |||
627 | /// using Offset as base offset for index. | |||
628 | static Optional<int> getInsertIndex(Value *InsertInst, unsigned Offset) { | |||
629 | int Index = Offset; | |||
630 | if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) { | |||
631 | if (auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2))) { | |||
632 | auto *VT = cast<FixedVectorType>(IE->getType()); | |||
633 | if (CI->getValue().uge(VT->getNumElements())) | |||
634 | return UndefMaskElem; | |||
635 | Index *= VT->getNumElements(); | |||
636 | Index += CI->getZExtValue(); | |||
637 | return Index; | |||
638 | } | |||
639 | if (isa<UndefValue>(IE->getOperand(2))) | |||
640 | return UndefMaskElem; | |||
641 | return None; | |||
642 | } | |||
643 | ||||
644 | auto *IV = cast<InsertValueInst>(InsertInst); | |||
645 | Type *CurrentType = IV->getType(); | |||
646 | for (unsigned I : IV->indices()) { | |||
647 | if (auto *ST = dyn_cast<StructType>(CurrentType)) { | |||
648 | Index *= ST->getNumElements(); | |||
649 | CurrentType = ST->getElementType(I); | |||
650 | } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { | |||
651 | Index *= AT->getNumElements(); | |||
652 | CurrentType = AT->getElementType(); | |||
653 | } else { | |||
654 | return None; | |||
655 | } | |||
656 | Index += I; | |||
657 | } | |||
658 | return Index; | |||
659 | } | |||
660 | ||||
661 | /// Reorders the list of scalars in accordance with the given \p Order and then | |||
662 | /// the \p Mask. \p Order - is the original order of the scalars, need to | |||
663 | /// reorder scalars into an unordered state at first according to the given | |||
664 | /// order. Then the ordered scalars are shuffled once again in accordance with | |||
665 | /// the provided mask. | |||
666 | static void reorderScalars(SmallVectorImpl<Value *> &Scalars, | |||
667 | ArrayRef<int> Mask) { | |||
668 | assert(!Mask.empty() && "Expected non-empty mask.")(static_cast <bool> (!Mask.empty() && "Expected non-empty mask." ) ? void (0) : __assert_fail ("!Mask.empty() && \"Expected non-empty mask.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 668, __extension__ __PRETTY_FUNCTION__)); | |||
669 | SmallVector<Value *> Prev(Scalars.size(), | |||
670 | UndefValue::get(Scalars.front()->getType())); | |||
671 | Prev.swap(Scalars); | |||
672 | for (unsigned I = 0, E = Prev.size(); I < E; ++I) | |||
673 | if (Mask[I] != UndefMaskElem) | |||
674 | Scalars[Mask[I]] = Prev[I]; | |||
675 | } | |||
676 | ||||
677 | namespace slpvectorizer { | |||
678 | ||||
679 | /// Bottom Up SLP Vectorizer. | |||
680 | class BoUpSLP { | |||
681 | struct TreeEntry; | |||
682 | struct ScheduleData; | |||
683 | ||||
684 | public: | |||
685 | using ValueList = SmallVector<Value *, 8>; | |||
686 | using InstrList = SmallVector<Instruction *, 16>; | |||
687 | using ValueSet = SmallPtrSet<Value *, 16>; | |||
688 | using StoreList = SmallVector<StoreInst *, 8>; | |||
689 | using ExtraValueToDebugLocsMap = | |||
690 | MapVector<Value *, SmallVector<Instruction *, 2>>; | |||
691 | using OrdersType = SmallVector<unsigned, 4>; | |||
692 | ||||
693 | BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, | |||
694 | TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li, | |||
695 | DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, | |||
696 | const DataLayout *DL, OptimizationRemarkEmitter *ORE) | |||
697 | : F(Func), SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), | |||
698 | DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) { | |||
699 | CodeMetrics::collectEphemeralValues(F, AC, EphValues); | |||
700 | // Use the vector register size specified by the target unless overridden | |||
701 | // by a command-line option. | |||
702 | // TODO: It would be better to limit the vectorization factor based on | |||
703 | // data type rather than just register size. For example, x86 AVX has | |||
704 | // 256-bit registers, but it does not support integer operations | |||
705 | // at that width (that requires AVX2). | |||
706 | if (MaxVectorRegSizeOption.getNumOccurrences()) | |||
707 | MaxVecRegSize = MaxVectorRegSizeOption; | |||
708 | else | |||
709 | MaxVecRegSize = | |||
710 | TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) | |||
711 | .getFixedSize(); | |||
712 | ||||
713 | if (MinVectorRegSizeOption.getNumOccurrences()) | |||
714 | MinVecRegSize = MinVectorRegSizeOption; | |||
715 | else | |||
716 | MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); | |||
717 | } | |||
718 | ||||
719 | /// Vectorize the tree that starts with the elements in \p VL. | |||
720 | /// Returns the vectorized root. | |||
721 | Value *vectorizeTree(); | |||
722 | ||||
723 | /// Vectorize the tree but with the list of externally used values \p | |||
724 | /// ExternallyUsedValues. Values in this MapVector can be replaced but the | |||
725 | /// generated extractvalue instructions. | |||
726 | Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues); | |||
727 | ||||
728 | /// \returns the cost incurred by unwanted spills and fills, caused by | |||
729 | /// holding live values over call sites. | |||
730 | InstructionCost getSpillCost() const; | |||
731 | ||||
732 | /// \returns the vectorization cost of the subtree that starts at \p VL. | |||
733 | /// A negative number means that this is profitable. | |||
734 | InstructionCost getTreeCost(ArrayRef<Value *> VectorizedVals = None); | |||
735 | ||||
736 | /// Construct a vectorizable tree that starts at \p Roots, ignoring users for | |||
737 | /// the purpose of scheduling and extraction in the \p UserIgnoreLst. | |||
738 | void buildTree(ArrayRef<Value *> Roots, | |||
739 | ArrayRef<Value *> UserIgnoreLst = None); | |||
740 | ||||
741 | /// Builds external uses of the vectorized scalars, i.e. the list of | |||
742 | /// vectorized scalars to be extracted, their lanes and their scalar users. \p | |||
743 | /// ExternallyUsedValues contains additional list of external uses to handle | |||
744 | /// vectorization of reductions. | |||
745 | void | |||
746 | buildExternalUses(const ExtraValueToDebugLocsMap &ExternallyUsedValues = {}); | |||
747 | ||||
748 | /// Clear the internal data structures that are created by 'buildTree'. | |||
749 | void deleteTree() { | |||
750 | VectorizableTree.clear(); | |||
751 | ScalarToTreeEntry.clear(); | |||
752 | MustGather.clear(); | |||
753 | ExternalUses.clear(); | |||
754 | for (auto &Iter : BlocksSchedules) { | |||
755 | BlockScheduling *BS = Iter.second.get(); | |||
756 | BS->clear(); | |||
757 | } | |||
758 | MinBWs.clear(); | |||
759 | InstrElementSize.clear(); | |||
760 | } | |||
761 | ||||
762 | unsigned getTreeSize() const { return VectorizableTree.size(); } | |||
763 | ||||
764 | /// Perform LICM and CSE on the newly generated gather sequences. | |||
765 | void optimizeGatherSequence(); | |||
766 | ||||
767 | /// Reorders the current graph to the most profitable order starting from the | |||
768 | /// root node to the leaf nodes. The best order is chosen only from the nodes | |||
769 | /// of the same size (vectorization factor). Smaller nodes are considered | |||
770 | /// parts of subgraph with smaller VF and they are reordered independently. We | |||
771 | /// can make it because we still need to extend smaller nodes to the wider VF | |||
772 | /// and we can merge reordering shuffles with the widening shuffles. | |||
773 | void reorderTopToBottom(); | |||
774 | ||||
775 | /// Reorders the current graph to the most profitable order starting from | |||
776 | /// leaves to the root. It allows to rotate small subgraphs and reduce the | |||
777 | /// number of reshuffles if the leaf nodes use the same order. In this case we | |||
778 | /// can merge the orders and just shuffle user node instead of shuffling its | |||
779 | /// operands. Plus, even the leaf nodes have different orders, it allows to | |||
780 | /// sink reordering in the graph closer to the root node and merge it later | |||
781 | /// during analysis. | |||
782 | void reorderBottomToTop(); | |||
783 | ||||
784 | /// \return The vector element size in bits to use when vectorizing the | |||
785 | /// expression tree ending at \p V. If V is a store, the size is the width of | |||
786 | /// the stored value. Otherwise, the size is the width of the largest loaded | |||
787 | /// value reaching V. This method is used by the vectorizer to calculate | |||
788 | /// vectorization factors. | |||
789 | unsigned getVectorElementSize(Value *V); | |||
790 | ||||
791 | /// Compute the minimum type sizes required to represent the entries in a | |||
792 | /// vectorizable tree. | |||
793 | void computeMinimumValueSizes(); | |||
794 | ||||
795 | // \returns maximum vector register size as set by TTI or overridden by cl::opt. | |||
796 | unsigned getMaxVecRegSize() const { | |||
797 | return MaxVecRegSize; | |||
798 | } | |||
799 | ||||
800 | // \returns minimum vector register size as set by cl::opt. | |||
801 | unsigned getMinVecRegSize() const { | |||
802 | return MinVecRegSize; | |||
803 | } | |||
804 | ||||
805 | unsigned getMinVF(unsigned Sz) const { | |||
806 | return std::max(2U, getMinVecRegSize() / Sz); | |||
807 | } | |||
808 | ||||
809 | unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const { | |||
810 | unsigned MaxVF = MaxVFOption.getNumOccurrences() ? | |||
811 | MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode); | |||
812 | return MaxVF ? MaxVF : UINT_MAX(2147483647 *2U +1U); | |||
813 | } | |||
814 | ||||
815 | /// Check if homogeneous aggregate is isomorphic to some VectorType. | |||
816 | /// Accepts homogeneous multidimensional aggregate of scalars/vectors like | |||
817 | /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> }, | |||
818 | /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on. | |||
819 | /// | |||
820 | /// \returns number of elements in vector if isomorphism exists, 0 otherwise. | |||
821 | unsigned canMapToVector(Type *T, const DataLayout &DL) const; | |||
822 | ||||
823 | /// \returns True if the VectorizableTree is both tiny and not fully | |||
824 | /// vectorizable. We do not vectorize such trees. | |||
825 | bool isTreeTinyAndNotFullyVectorizable() const; | |||
826 | ||||
827 | /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values | |||
828 | /// can be load combined in the backend. Load combining may not be allowed in | |||
829 | /// the IR optimizer, so we do not want to alter the pattern. For example, | |||
830 | /// partially transforming a scalar bswap() pattern into vector code is | |||
831 | /// effectively impossible for the backend to undo. | |||
832 | /// TODO: If load combining is allowed in the IR optimizer, this analysis | |||
833 | /// may not be necessary. | |||
834 | bool isLoadCombineReductionCandidate(RecurKind RdxKind) const; | |||
835 | ||||
836 | /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values | |||
837 | /// can be load combined in the backend. Load combining may not be allowed in | |||
838 | /// the IR optimizer, so we do not want to alter the pattern. For example, | |||
839 | /// partially transforming a scalar bswap() pattern into vector code is | |||
840 | /// effectively impossible for the backend to undo. | |||
841 | /// TODO: If load combining is allowed in the IR optimizer, this analysis | |||
842 | /// may not be necessary. | |||
843 | bool isLoadCombineCandidate() const; | |||
844 | ||||
845 | OptimizationRemarkEmitter *getORE() { return ORE; } | |||
846 | ||||
847 | /// This structure holds any data we need about the edges being traversed | |||
848 | /// during buildTree_rec(). We keep track of: | |||
849 | /// (i) the user TreeEntry index, and | |||
850 | /// (ii) the index of the edge. | |||
851 | struct EdgeInfo { | |||
852 | EdgeInfo() = default; | |||
853 | EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx) | |||
854 | : UserTE(UserTE), EdgeIdx(EdgeIdx) {} | |||
855 | /// The user TreeEntry. | |||
856 | TreeEntry *UserTE = nullptr; | |||
857 | /// The operand index of the use. | |||
858 | unsigned EdgeIdx = UINT_MAX(2147483647 *2U +1U); | |||
859 | #ifndef NDEBUG | |||
860 | friend inline raw_ostream &operator<<(raw_ostream &OS, | |||
861 | const BoUpSLP::EdgeInfo &EI) { | |||
862 | EI.dump(OS); | |||
863 | return OS; | |||
864 | } | |||
865 | /// Debug print. | |||
866 | void dump(raw_ostream &OS) const { | |||
867 | OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null") | |||
868 | << " EdgeIdx:" << EdgeIdx << "}"; | |||
869 | } | |||
870 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dump() const { dump(dbgs()); } | |||
871 | #endif | |||
872 | }; | |||
873 | ||||
874 | /// A helper data structure to hold the operands of a vector of instructions. | |||
875 | /// This supports a fixed vector length for all operand vectors. | |||
876 | class VLOperands { | |||
877 | /// For each operand we need (i) the value, and (ii) the opcode that it | |||
878 | /// would be attached to if the expression was in a left-linearized form. | |||
879 | /// This is required to avoid illegal operand reordering. | |||
880 | /// For example: | |||
881 | /// \verbatim | |||
882 | /// 0 Op1 | |||
883 | /// |/ | |||
884 | /// Op1 Op2 Linearized + Op2 | |||
885 | /// \ / ----------> |/ | |||
886 | /// - - | |||
887 | /// | |||
888 | /// Op1 - Op2 (0 + Op1) - Op2 | |||
889 | /// \endverbatim | |||
890 | /// | |||
891 | /// Value Op1 is attached to a '+' operation, and Op2 to a '-'. | |||
892 | /// | |||
893 | /// Another way to think of this is to track all the operations across the | |||
894 | /// path from the operand all the way to the root of the tree and to | |||
895 | /// calculate the operation that corresponds to this path. For example, the | |||
896 | /// path from Op2 to the root crosses the RHS of the '-', therefore the | |||
897 | /// corresponding operation is a '-' (which matches the one in the | |||
898 | /// linearized tree, as shown above). | |||
899 | /// | |||
900 | /// For lack of a better term, we refer to this operation as Accumulated | |||
901 | /// Path Operation (APO). | |||
902 | struct OperandData { | |||
903 | OperandData() = default; | |||
904 | OperandData(Value *V, bool APO, bool IsUsed) | |||
905 | : V(V), APO(APO), IsUsed(IsUsed) {} | |||
906 | /// The operand value. | |||
907 | Value *V = nullptr; | |||
908 | /// TreeEntries only allow a single opcode, or an alternate sequence of | |||
909 | /// them (e.g, +, -). Therefore, we can safely use a boolean value for the | |||
910 | /// APO. It is set to 'true' if 'V' is attached to an inverse operation | |||
911 | /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise | |||
912 | /// (e.g., Add/Mul) | |||
913 | bool APO = false; | |||
914 | /// Helper data for the reordering function. | |||
915 | bool IsUsed = false; | |||
916 | }; | |||
917 | ||||
918 | /// During operand reordering, we are trying to select the operand at lane | |||
919 | /// that matches best with the operand at the neighboring lane. Our | |||
920 | /// selection is based on the type of value we are looking for. For example, | |||
921 | /// if the neighboring lane has a load, we need to look for a load that is | |||
922 | /// accessing a consecutive address. These strategies are summarized in the | |||
923 | /// 'ReorderingMode' enumerator. | |||
924 | enum class ReorderingMode { | |||
925 | Load, ///< Matching loads to consecutive memory addresses | |||
926 | Opcode, ///< Matching instructions based on opcode (same or alternate) | |||
927 | Constant, ///< Matching constants | |||
928 | Splat, ///< Matching the same instruction multiple times (broadcast) | |||
929 | Failed, ///< We failed to create a vectorizable group | |||
930 | }; | |||
931 | ||||
932 | using OperandDataVec = SmallVector<OperandData, 2>; | |||
933 | ||||
934 | /// A vector of operand vectors. | |||
935 | SmallVector<OperandDataVec, 4> OpsVec; | |||
936 | ||||
937 | const DataLayout &DL; | |||
938 | ScalarEvolution &SE; | |||
939 | const BoUpSLP &R; | |||
940 | ||||
941 | /// \returns the operand data at \p OpIdx and \p Lane. | |||
942 | OperandData &getData(unsigned OpIdx, unsigned Lane) { | |||
943 | return OpsVec[OpIdx][Lane]; | |||
944 | } | |||
945 | ||||
946 | /// \returns the operand data at \p OpIdx and \p Lane. Const version. | |||
947 | const OperandData &getData(unsigned OpIdx, unsigned Lane) const { | |||
948 | return OpsVec[OpIdx][Lane]; | |||
949 | } | |||
950 | ||||
951 | /// Clears the used flag for all entries. | |||
952 | void clearUsed() { | |||
953 | for (unsigned OpIdx = 0, NumOperands = getNumOperands(); | |||
954 | OpIdx != NumOperands; ++OpIdx) | |||
955 | for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; | |||
956 | ++Lane) | |||
957 | OpsVec[OpIdx][Lane].IsUsed = false; | |||
958 | } | |||
959 | ||||
960 | /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2. | |||
961 | void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) { | |||
962 | std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]); | |||
963 | } | |||
964 | ||||
965 | // The hard-coded scores listed here are not very important. When computing | |||
966 | // the scores of matching one sub-tree with another, we are basically | |||
967 | // counting the number of values that are matching. So even if all scores | |||
968 | // are set to 1, we would still get a decent matching result. | |||
969 | // However, sometimes we have to break ties. For example we may have to | |||
970 | // choose between matching loads vs matching opcodes. This is what these | |||
971 | // scores are helping us with: they provide the order of preference. | |||
972 | ||||
973 | /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]). | |||
974 | static const int ScoreConsecutiveLoads = 3; | |||
975 | /// ExtractElementInst from same vector and consecutive indexes. | |||
976 | static const int ScoreConsecutiveExtracts = 3; | |||
977 | /// Constants. | |||
978 | static const int ScoreConstants = 2; | |||
979 | /// Instructions with the same opcode. | |||
980 | static const int ScoreSameOpcode = 2; | |||
981 | /// Instructions with alt opcodes (e.g, add + sub). | |||
982 | static const int ScoreAltOpcodes = 1; | |||
983 | /// Identical instructions (a.k.a. splat or broadcast). | |||
984 | static const int ScoreSplat = 1; | |||
985 | /// Matching with an undef is preferable to failing. | |||
986 | static const int ScoreUndef = 1; | |||
987 | /// Score for failing to find a decent match. | |||
988 | static const int ScoreFail = 0; | |||
989 | /// User exteranl to the vectorized code. | |||
990 | static const int ExternalUseCost = 1; | |||
991 | /// The user is internal but in a different lane. | |||
992 | static const int UserInDiffLaneCost = ExternalUseCost; | |||
993 | ||||
994 | /// \returns the score of placing \p V1 and \p V2 in consecutive lanes. | |||
995 | static int getShallowScore(Value *V1, Value *V2, const DataLayout &DL, | |||
996 | ScalarEvolution &SE) { | |||
997 | auto *LI1 = dyn_cast<LoadInst>(V1); | |||
998 | auto *LI2 = dyn_cast<LoadInst>(V2); | |||
999 | if (LI1 && LI2) { | |||
1000 | if (LI1->getParent() != LI2->getParent()) | |||
1001 | return VLOperands::ScoreFail; | |||
1002 | ||||
1003 | Optional<int> Dist = getPointersDiff( | |||
1004 | LI1->getType(), LI1->getPointerOperand(), LI2->getType(), | |||
1005 | LI2->getPointerOperand(), DL, SE, /*StrictCheck=*/true); | |||
1006 | return (Dist && *Dist == 1) ? VLOperands::ScoreConsecutiveLoads | |||
1007 | : VLOperands::ScoreFail; | |||
1008 | } | |||
1009 | ||||
1010 | auto *C1 = dyn_cast<Constant>(V1); | |||
1011 | auto *C2 = dyn_cast<Constant>(V2); | |||
1012 | if (C1 && C2) | |||
1013 | return VLOperands::ScoreConstants; | |||
1014 | ||||
1015 | // Extracts from consecutive indexes of the same vector better score as | |||
1016 | // the extracts could be optimized away. | |||
1017 | Value *EV; | |||
1018 | ConstantInt *Ex1Idx, *Ex2Idx; | |||
1019 | if (match(V1, m_ExtractElt(m_Value(EV), m_ConstantInt(Ex1Idx))) && | |||
1020 | match(V2, m_ExtractElt(m_Deferred(EV), m_ConstantInt(Ex2Idx))) && | |||
1021 | Ex1Idx->getZExtValue() + 1 == Ex2Idx->getZExtValue()) | |||
1022 | return VLOperands::ScoreConsecutiveExtracts; | |||
1023 | ||||
1024 | auto *I1 = dyn_cast<Instruction>(V1); | |||
1025 | auto *I2 = dyn_cast<Instruction>(V2); | |||
1026 | if (I1 && I2) { | |||
1027 | if (I1 == I2) | |||
1028 | return VLOperands::ScoreSplat; | |||
1029 | InstructionsState S = getSameOpcode({I1, I2}); | |||
1030 | // Note: Only consider instructions with <= 2 operands to avoid | |||
1031 | // complexity explosion. | |||
1032 | if (S.getOpcode() && S.MainOp->getNumOperands() <= 2) | |||
1033 | return S.isAltShuffle() ? VLOperands::ScoreAltOpcodes | |||
1034 | : VLOperands::ScoreSameOpcode; | |||
1035 | } | |||
1036 | ||||
1037 | if (isa<UndefValue>(V2)) | |||
1038 | return VLOperands::ScoreUndef; | |||
1039 | ||||
1040 | return VLOperands::ScoreFail; | |||
1041 | } | |||
1042 | ||||
1043 | /// Holds the values and their lane that are taking part in the look-ahead | |||
1044 | /// score calculation. This is used in the external uses cost calculation. | |||
1045 | SmallDenseMap<Value *, int> InLookAheadValues; | |||
1046 | ||||
1047 | /// \Returns the additinal cost due to uses of \p LHS and \p RHS that are | |||
1048 | /// either external to the vectorized code, or require shuffling. | |||
1049 | int getExternalUsesCost(const std::pair<Value *, int> &LHS, | |||
1050 | const std::pair<Value *, int> &RHS) { | |||
1051 | int Cost = 0; | |||
1052 | std::array<std::pair<Value *, int>, 2> Values = {{LHS, RHS}}; | |||
1053 | for (int Idx = 0, IdxE = Values.size(); Idx != IdxE; ++Idx) { | |||
1054 | Value *V = Values[Idx].first; | |||
1055 | if (isa<Constant>(V)) { | |||
1056 | // Since this is a function pass, it doesn't make semantic sense to | |||
1057 | // walk the users of a subclass of Constant. The users could be in | |||
1058 | // another function, or even another module that happens to be in | |||
1059 | // the same LLVMContext. | |||
1060 | continue; | |||
1061 | } | |||
1062 | ||||
1063 | // Calculate the absolute lane, using the minimum relative lane of LHS | |||
1064 | // and RHS as base and Idx as the offset. | |||
1065 | int Ln = std::min(LHS.second, RHS.second) + Idx; | |||
1066 | assert(Ln >= 0 && "Bad lane calculation")(static_cast <bool> (Ln >= 0 && "Bad lane calculation" ) ? void (0) : __assert_fail ("Ln >= 0 && \"Bad lane calculation\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1066, __extension__ __PRETTY_FUNCTION__)); | |||
1067 | unsigned UsersBudget = LookAheadUsersBudget; | |||
1068 | for (User *U : V->users()) { | |||
1069 | if (const TreeEntry *UserTE = R.getTreeEntry(U)) { | |||
1070 | // The user is in the VectorizableTree. Check if we need to insert. | |||
1071 | auto It = llvm::find(UserTE->Scalars, U); | |||
1072 | assert(It != UserTE->Scalars.end() && "U is in UserTE")(static_cast <bool> (It != UserTE->Scalars.end() && "U is in UserTE") ? void (0) : __assert_fail ("It != UserTE->Scalars.end() && \"U is in UserTE\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1072, __extension__ __PRETTY_FUNCTION__)); | |||
1073 | int UserLn = std::distance(UserTE->Scalars.begin(), It); | |||
1074 | assert(UserLn >= 0 && "Bad lane")(static_cast <bool> (UserLn >= 0 && "Bad lane" ) ? void (0) : __assert_fail ("UserLn >= 0 && \"Bad lane\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1074, __extension__ __PRETTY_FUNCTION__)); | |||
1075 | if (UserLn != Ln) | |||
1076 | Cost += UserInDiffLaneCost; | |||
1077 | } else { | |||
1078 | // Check if the user is in the look-ahead code. | |||
1079 | auto It2 = InLookAheadValues.find(U); | |||
1080 | if (It2 != InLookAheadValues.end()) { | |||
1081 | // The user is in the look-ahead code. Check the lane. | |||
1082 | if (It2->second != Ln) | |||
1083 | Cost += UserInDiffLaneCost; | |||
1084 | } else { | |||
1085 | // The user is neither in SLP tree nor in the look-ahead code. | |||
1086 | Cost += ExternalUseCost; | |||
1087 | } | |||
1088 | } | |||
1089 | // Limit the number of visited uses to cap compilation time. | |||
1090 | if (--UsersBudget == 0) | |||
1091 | break; | |||
1092 | } | |||
1093 | } | |||
1094 | return Cost; | |||
1095 | } | |||
1096 | ||||
1097 | /// Go through the operands of \p LHS and \p RHS recursively until \p | |||
1098 | /// MaxLevel, and return the cummulative score. For example: | |||
1099 | /// \verbatim | |||
1100 | /// A[0] B[0] A[1] B[1] C[0] D[0] B[1] A[1] | |||
1101 | /// \ / \ / \ / \ / | |||
1102 | /// + + + + | |||
1103 | /// G1 G2 G3 G4 | |||
1104 | /// \endverbatim | |||
1105 | /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at | |||
1106 | /// each level recursively, accumulating the score. It starts from matching | |||
1107 | /// the additions at level 0, then moves on to the loads (level 1). The | |||
1108 | /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and | |||
1109 | /// {B[0],B[1]} match with VLOperands::ScoreConsecutiveLoads, while | |||
1110 | /// {A[0],C[0]} has a score of VLOperands::ScoreFail. | |||
1111 | /// Please note that the order of the operands does not matter, as we | |||
1112 | /// evaluate the score of all profitable combinations of operands. In | |||
1113 | /// other words the score of G1 and G4 is the same as G1 and G2. This | |||
1114 | /// heuristic is based on ideas described in: | |||
1115 | /// Look-ahead SLP: Auto-vectorization in the presence of commutative | |||
1116 | /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha, | |||
1117 | /// Luís F. W. Góes | |||
1118 | int getScoreAtLevelRec(const std::pair<Value *, int> &LHS, | |||
1119 | const std::pair<Value *, int> &RHS, int CurrLevel, | |||
1120 | int MaxLevel) { | |||
1121 | ||||
1122 | Value *V1 = LHS.first; | |||
1123 | Value *V2 = RHS.first; | |||
1124 | // Get the shallow score of V1 and V2. | |||
1125 | int ShallowScoreAtThisLevel = | |||
1126 | std::max((int)ScoreFail, getShallowScore(V1, V2, DL, SE) - | |||
1127 | getExternalUsesCost(LHS, RHS)); | |||
1128 | int Lane1 = LHS.second; | |||
1129 | int Lane2 = RHS.second; | |||
1130 | ||||
1131 | // If reached MaxLevel, | |||
1132 | // or if V1 and V2 are not instructions, | |||
1133 | // or if they are SPLAT, | |||
1134 | // or if they are not consecutive, early return the current cost. | |||
1135 | auto *I1 = dyn_cast<Instruction>(V1); | |||
1136 | auto *I2 = dyn_cast<Instruction>(V2); | |||
1137 | if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 || | |||
1138 | ShallowScoreAtThisLevel == VLOperands::ScoreFail || | |||
1139 | (isa<LoadInst>(I1) && isa<LoadInst>(I2) && ShallowScoreAtThisLevel)) | |||
1140 | return ShallowScoreAtThisLevel; | |||
1141 | assert(I1 && I2 && "Should have early exited.")(static_cast <bool> (I1 && I2 && "Should have early exited." ) ? void (0) : __assert_fail ("I1 && I2 && \"Should have early exited.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1141, __extension__ __PRETTY_FUNCTION__)); | |||
1142 | ||||
1143 | // Keep track of in-tree values for determining the external-use cost. | |||
1144 | InLookAheadValues[V1] = Lane1; | |||
1145 | InLookAheadValues[V2] = Lane2; | |||
1146 | ||||
1147 | // Contains the I2 operand indexes that got matched with I1 operands. | |||
1148 | SmallSet<unsigned, 4> Op2Used; | |||
1149 | ||||
1150 | // Recursion towards the operands of I1 and I2. We are trying all possbile | |||
1151 | // operand pairs, and keeping track of the best score. | |||
1152 | for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands(); | |||
1153 | OpIdx1 != NumOperands1; ++OpIdx1) { | |||
1154 | // Try to pair op1I with the best operand of I2. | |||
1155 | int MaxTmpScore = 0; | |||
1156 | unsigned MaxOpIdx2 = 0; | |||
1157 | bool FoundBest = false; | |||
1158 | // If I2 is commutative try all combinations. | |||
1159 | unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1; | |||
1160 | unsigned ToIdx = isCommutative(I2) | |||
1161 | ? I2->getNumOperands() | |||
1162 | : std::min(I2->getNumOperands(), OpIdx1 + 1); | |||
1163 | assert(FromIdx <= ToIdx && "Bad index")(static_cast <bool> (FromIdx <= ToIdx && "Bad index" ) ? void (0) : __assert_fail ("FromIdx <= ToIdx && \"Bad index\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1163, __extension__ __PRETTY_FUNCTION__)); | |||
1164 | for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) { | |||
1165 | // Skip operands already paired with OpIdx1. | |||
1166 | if (Op2Used.count(OpIdx2)) | |||
1167 | continue; | |||
1168 | // Recursively calculate the cost at each level | |||
1169 | int TmpScore = getScoreAtLevelRec({I1->getOperand(OpIdx1), Lane1}, | |||
1170 | {I2->getOperand(OpIdx2), Lane2}, | |||
1171 | CurrLevel + 1, MaxLevel); | |||
1172 | // Look for the best score. | |||
1173 | if (TmpScore > VLOperands::ScoreFail && TmpScore > MaxTmpScore) { | |||
1174 | MaxTmpScore = TmpScore; | |||
1175 | MaxOpIdx2 = OpIdx2; | |||
1176 | FoundBest = true; | |||
1177 | } | |||
1178 | } | |||
1179 | if (FoundBest) { | |||
1180 | // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it. | |||
1181 | Op2Used.insert(MaxOpIdx2); | |||
1182 | ShallowScoreAtThisLevel += MaxTmpScore; | |||
1183 | } | |||
1184 | } | |||
1185 | return ShallowScoreAtThisLevel; | |||
1186 | } | |||
1187 | ||||
1188 | /// \Returns the look-ahead score, which tells us how much the sub-trees | |||
1189 | /// rooted at \p LHS and \p RHS match, the more they match the higher the | |||
1190 | /// score. This helps break ties in an informed way when we cannot decide on | |||
1191 | /// the order of the operands by just considering the immediate | |||
1192 | /// predecessors. | |||
1193 | int getLookAheadScore(const std::pair<Value *, int> &LHS, | |||
1194 | const std::pair<Value *, int> &RHS) { | |||
1195 | InLookAheadValues.clear(); | |||
1196 | return getScoreAtLevelRec(LHS, RHS, 1, LookAheadMaxDepth); | |||
1197 | } | |||
1198 | ||||
1199 | // Search all operands in Ops[*][Lane] for the one that matches best | |||
1200 | // Ops[OpIdx][LastLane] and return its opreand index. | |||
1201 | // If no good match can be found, return None. | |||
1202 | Optional<unsigned> | |||
1203 | getBestOperand(unsigned OpIdx, int Lane, int LastLane, | |||
1204 | ArrayRef<ReorderingMode> ReorderingModes) { | |||
1205 | unsigned NumOperands = getNumOperands(); | |||
1206 | ||||
1207 | // The operand of the previous lane at OpIdx. | |||
1208 | Value *OpLastLane = getData(OpIdx, LastLane).V; | |||
1209 | ||||
1210 | // Our strategy mode for OpIdx. | |||
1211 | ReorderingMode RMode = ReorderingModes[OpIdx]; | |||
1212 | ||||
1213 | // The linearized opcode of the operand at OpIdx, Lane. | |||
1214 | bool OpIdxAPO = getData(OpIdx, Lane).APO; | |||
1215 | ||||
1216 | // The best operand index and its score. | |||
1217 | // Sometimes we have more than one option (e.g., Opcode and Undefs), so we | |||
1218 | // are using the score to differentiate between the two. | |||
1219 | struct BestOpData { | |||
1220 | Optional<unsigned> Idx = None; | |||
1221 | unsigned Score = 0; | |||
1222 | } BestOp; | |||
1223 | ||||
1224 | // Iterate through all unused operands and look for the best. | |||
1225 | for (unsigned Idx = 0; Idx != NumOperands; ++Idx) { | |||
1226 | // Get the operand at Idx and Lane. | |||
1227 | OperandData &OpData = getData(Idx, Lane); | |||
1228 | Value *Op = OpData.V; | |||
1229 | bool OpAPO = OpData.APO; | |||
1230 | ||||
1231 | // Skip already selected operands. | |||
1232 | if (OpData.IsUsed) | |||
1233 | continue; | |||
1234 | ||||
1235 | // Skip if we are trying to move the operand to a position with a | |||
1236 | // different opcode in the linearized tree form. This would break the | |||
1237 | // semantics. | |||
1238 | if (OpAPO != OpIdxAPO) | |||
1239 | continue; | |||
1240 | ||||
1241 | // Look for an operand that matches the current mode. | |||
1242 | switch (RMode) { | |||
1243 | case ReorderingMode::Load: | |||
1244 | case ReorderingMode::Constant: | |||
1245 | case ReorderingMode::Opcode: { | |||
1246 | bool LeftToRight = Lane > LastLane; | |||
1247 | Value *OpLeft = (LeftToRight) ? OpLastLane : Op; | |||
1248 | Value *OpRight = (LeftToRight) ? Op : OpLastLane; | |||
1249 | unsigned Score = | |||
1250 | getLookAheadScore({OpLeft, LastLane}, {OpRight, Lane}); | |||
1251 | if (Score > BestOp.Score) { | |||
1252 | BestOp.Idx = Idx; | |||
1253 | BestOp.Score = Score; | |||
1254 | } | |||
1255 | break; | |||
1256 | } | |||
1257 | case ReorderingMode::Splat: | |||
1258 | if (Op == OpLastLane) | |||
1259 | BestOp.Idx = Idx; | |||
1260 | break; | |||
1261 | case ReorderingMode::Failed: | |||
1262 | return None; | |||
1263 | } | |||
1264 | } | |||
1265 | ||||
1266 | if (BestOp.Idx) { | |||
1267 | getData(BestOp.Idx.getValue(), Lane).IsUsed = true; | |||
1268 | return BestOp.Idx; | |||
1269 | } | |||
1270 | // If we could not find a good match return None. | |||
1271 | return None; | |||
1272 | } | |||
1273 | ||||
1274 | /// Helper for reorderOperandVecs. \Returns the lane that we should start | |||
1275 | /// reordering from. This is the one which has the least number of operands | |||
1276 | /// that can freely move about. | |||
1277 | unsigned getBestLaneToStartReordering() const { | |||
1278 | unsigned BestLane = 0; | |||
1279 | unsigned Min = UINT_MAX(2147483647 *2U +1U); | |||
1280 | for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; | |||
1281 | ++Lane) { | |||
1282 | unsigned NumFreeOps = getMaxNumOperandsThatCanBeReordered(Lane); | |||
1283 | if (NumFreeOps < Min) { | |||
1284 | Min = NumFreeOps; | |||
1285 | BestLane = Lane; | |||
1286 | } | |||
1287 | } | |||
1288 | return BestLane; | |||
1289 | } | |||
1290 | ||||
1291 | /// \Returns the maximum number of operands that are allowed to be reordered | |||
1292 | /// for \p Lane. This is used as a heuristic for selecting the first lane to | |||
1293 | /// start operand reordering. | |||
1294 | unsigned getMaxNumOperandsThatCanBeReordered(unsigned Lane) const { | |||
1295 | unsigned CntTrue = 0; | |||
1296 | unsigned NumOperands = getNumOperands(); | |||
1297 | // Operands with the same APO can be reordered. We therefore need to count | |||
1298 | // how many of them we have for each APO, like this: Cnt[APO] = x. | |||
1299 | // Since we only have two APOs, namely true and false, we can avoid using | |||
1300 | // a map. Instead we can simply count the number of operands that | |||
1301 | // correspond to one of them (in this case the 'true' APO), and calculate | |||
1302 | // the other by subtracting it from the total number of operands. | |||
1303 | for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) | |||
1304 | if (getData(OpIdx, Lane).APO) | |||
1305 | ++CntTrue; | |||
1306 | unsigned CntFalse = NumOperands - CntTrue; | |||
1307 | return std::max(CntTrue, CntFalse); | |||
1308 | } | |||
1309 | ||||
1310 | /// Go through the instructions in VL and append their operands. | |||
1311 | void appendOperandsOfVL(ArrayRef<Value *> VL) { | |||
1312 | assert(!VL.empty() && "Bad VL")(static_cast <bool> (!VL.empty() && "Bad VL") ? void (0) : __assert_fail ("!VL.empty() && \"Bad VL\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1312, __extension__ __PRETTY_FUNCTION__)); | |||
1313 | assert((empty() || VL.size() == getNumLanes()) &&(static_cast <bool> ((empty() || VL.size() == getNumLanes ()) && "Expected same number of lanes") ? void (0) : __assert_fail ("(empty() || VL.size() == getNumLanes()) && \"Expected same number of lanes\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1314, __extension__ __PRETTY_FUNCTION__)) | |||
1314 | "Expected same number of lanes")(static_cast <bool> ((empty() || VL.size() == getNumLanes ()) && "Expected same number of lanes") ? void (0) : __assert_fail ("(empty() || VL.size() == getNumLanes()) && \"Expected same number of lanes\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1314, __extension__ __PRETTY_FUNCTION__)); | |||
1315 | assert(isa<Instruction>(VL[0]) && "Expected instruction")(static_cast <bool> (isa<Instruction>(VL[0]) && "Expected instruction") ? void (0) : __assert_fail ("isa<Instruction>(VL[0]) && \"Expected instruction\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1315, __extension__ __PRETTY_FUNCTION__)); | |||
1316 | unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands(); | |||
1317 | OpsVec.resize(NumOperands); | |||
1318 | unsigned NumLanes = VL.size(); | |||
1319 | for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { | |||
1320 | OpsVec[OpIdx].resize(NumLanes); | |||
1321 | for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { | |||
1322 | assert(isa<Instruction>(VL[Lane]) && "Expected instruction")(static_cast <bool> (isa<Instruction>(VL[Lane]) && "Expected instruction") ? void (0) : __assert_fail ("isa<Instruction>(VL[Lane]) && \"Expected instruction\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1322, __extension__ __PRETTY_FUNCTION__)); | |||
1323 | // Our tree has just 3 nodes: the root and two operands. | |||
1324 | // It is therefore trivial to get the APO. We only need to check the | |||
1325 | // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or | |||
1326 | // RHS operand. The LHS operand of both add and sub is never attached | |||
1327 | // to an inversese operation in the linearized form, therefore its APO | |||
1328 | // is false. The RHS is true only if VL[Lane] is an inverse operation. | |||
1329 | ||||
1330 | // Since operand reordering is performed on groups of commutative | |||
1331 | // operations or alternating sequences (e.g., +, -), we can safely | |||
1332 | // tell the inverse operations by checking commutativity. | |||
1333 | bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane])); | |||
1334 | bool APO = (OpIdx == 0) ? false : IsInverseOperation; | |||
1335 | OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx), | |||
1336 | APO, false}; | |||
1337 | } | |||
1338 | } | |||
1339 | } | |||
1340 | ||||
1341 | /// \returns the number of operands. | |||
1342 | unsigned getNumOperands() const { return OpsVec.size(); } | |||
1343 | ||||
1344 | /// \returns the number of lanes. | |||
1345 | unsigned getNumLanes() const { return OpsVec[0].size(); } | |||
1346 | ||||
1347 | /// \returns the operand value at \p OpIdx and \p Lane. | |||
1348 | Value *getValue(unsigned OpIdx, unsigned Lane) const { | |||
1349 | return getData(OpIdx, Lane).V; | |||
1350 | } | |||
1351 | ||||
1352 | /// \returns true if the data structure is empty. | |||
1353 | bool empty() const { return OpsVec.empty(); } | |||
1354 | ||||
1355 | /// Clears the data. | |||
1356 | void clear() { OpsVec.clear(); } | |||
1357 | ||||
1358 | /// \Returns true if there are enough operands identical to \p Op to fill | |||
1359 | /// the whole vector. | |||
1360 | /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow. | |||
1361 | bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) { | |||
1362 | bool OpAPO = getData(OpIdx, Lane).APO; | |||
1363 | for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) { | |||
1364 | if (Ln == Lane) | |||
1365 | continue; | |||
1366 | // This is set to true if we found a candidate for broadcast at Lane. | |||
1367 | bool FoundCandidate = false; | |||
1368 | for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) { | |||
1369 | OperandData &Data = getData(OpI, Ln); | |||
1370 | if (Data.APO != OpAPO || Data.IsUsed) | |||
1371 | continue; | |||
1372 | if (Data.V == Op) { | |||
1373 | FoundCandidate = true; | |||
1374 | Data.IsUsed = true; | |||
1375 | break; | |||
1376 | } | |||
1377 | } | |||
1378 | if (!FoundCandidate) | |||
1379 | return false; | |||
1380 | } | |||
1381 | return true; | |||
1382 | } | |||
1383 | ||||
1384 | public: | |||
1385 | /// Initialize with all the operands of the instruction vector \p RootVL. | |||
1386 | VLOperands(ArrayRef<Value *> RootVL, const DataLayout &DL, | |||
1387 | ScalarEvolution &SE, const BoUpSLP &R) | |||
1388 | : DL(DL), SE(SE), R(R) { | |||
1389 | // Append all the operands of RootVL. | |||
1390 | appendOperandsOfVL(RootVL); | |||
1391 | } | |||
1392 | ||||
1393 | /// \Returns a value vector with the operands across all lanes for the | |||
1394 | /// opearnd at \p OpIdx. | |||
1395 | ValueList getVL(unsigned OpIdx) const { | |||
1396 | ValueList OpVL(OpsVec[OpIdx].size()); | |||
1397 | assert(OpsVec[OpIdx].size() == getNumLanes() &&(static_cast <bool> (OpsVec[OpIdx].size() == getNumLanes () && "Expected same num of lanes across all operands" ) ? void (0) : __assert_fail ("OpsVec[OpIdx].size() == getNumLanes() && \"Expected same num of lanes across all operands\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1398, __extension__ __PRETTY_FUNCTION__)) | |||
1398 | "Expected same num of lanes across all operands")(static_cast <bool> (OpsVec[OpIdx].size() == getNumLanes () && "Expected same num of lanes across all operands" ) ? void (0) : __assert_fail ("OpsVec[OpIdx].size() == getNumLanes() && \"Expected same num of lanes across all operands\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1398, __extension__ __PRETTY_FUNCTION__)); | |||
1399 | for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane) | |||
1400 | OpVL[Lane] = OpsVec[OpIdx][Lane].V; | |||
1401 | return OpVL; | |||
1402 | } | |||
1403 | ||||
1404 | // Performs operand reordering for 2 or more operands. | |||
1405 | // The original operands are in OrigOps[OpIdx][Lane]. | |||
1406 | // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'. | |||
1407 | void reorder() { | |||
1408 | unsigned NumOperands = getNumOperands(); | |||
1409 | unsigned NumLanes = getNumLanes(); | |||
1410 | // Each operand has its own mode. We are using this mode to help us select | |||
1411 | // the instructions for each lane, so that they match best with the ones | |||
1412 | // we have selected so far. | |||
1413 | SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands); | |||
1414 | ||||
1415 | // This is a greedy single-pass algorithm. We are going over each lane | |||
1416 | // once and deciding on the best order right away with no back-tracking. | |||
1417 | // However, in order to increase its effectiveness, we start with the lane | |||
1418 | // that has operands that can move the least. For example, given the | |||
1419 | // following lanes: | |||
1420 | // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd | |||
1421 | // Lane 1 : A[1] = C[1] - B[1] // Visited 1st | |||
1422 | // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd | |||
1423 | // Lane 3 : A[3] = C[3] - B[3] // Visited 4th | |||
1424 | // we will start at Lane 1, since the operands of the subtraction cannot | |||
1425 | // be reordered. Then we will visit the rest of the lanes in a circular | |||
1426 | // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3. | |||
1427 | ||||
1428 | // Find the first lane that we will start our search from. | |||
1429 | unsigned FirstLane = getBestLaneToStartReordering(); | |||
1430 | ||||
1431 | // Initialize the modes. | |||
1432 | for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { | |||
1433 | Value *OpLane0 = getValue(OpIdx, FirstLane); | |||
1434 | // Keep track if we have instructions with all the same opcode on one | |||
1435 | // side. | |||
1436 | if (isa<LoadInst>(OpLane0)) | |||
1437 | ReorderingModes[OpIdx] = ReorderingMode::Load; | |||
1438 | else if (isa<Instruction>(OpLane0)) { | |||
1439 | // Check if OpLane0 should be broadcast. | |||
1440 | if (shouldBroadcast(OpLane0, OpIdx, FirstLane)) | |||
1441 | ReorderingModes[OpIdx] = ReorderingMode::Splat; | |||
1442 | else | |||
1443 | ReorderingModes[OpIdx] = ReorderingMode::Opcode; | |||
1444 | } | |||
1445 | else if (isa<Constant>(OpLane0)) | |||
1446 | ReorderingModes[OpIdx] = ReorderingMode::Constant; | |||
1447 | else if (isa<Argument>(OpLane0)) | |||
1448 | // Our best hope is a Splat. It may save some cost in some cases. | |||
1449 | ReorderingModes[OpIdx] = ReorderingMode::Splat; | |||
1450 | else | |||
1451 | // NOTE: This should be unreachable. | |||
1452 | ReorderingModes[OpIdx] = ReorderingMode::Failed; | |||
1453 | } | |||
1454 | ||||
1455 | // If the initial strategy fails for any of the operand indexes, then we | |||
1456 | // perform reordering again in a second pass. This helps avoid assigning | |||
1457 | // high priority to the failed strategy, and should improve reordering for | |||
1458 | // the non-failed operand indexes. | |||
1459 | for (int Pass = 0; Pass != 2; ++Pass) { | |||
1460 | // Skip the second pass if the first pass did not fail. | |||
1461 | bool StrategyFailed = false; | |||
1462 | // Mark all operand data as free to use. | |||
1463 | clearUsed(); | |||
1464 | // We keep the original operand order for the FirstLane, so reorder the | |||
1465 | // rest of the lanes. We are visiting the nodes in a circular fashion, | |||
1466 | // using FirstLane as the center point and increasing the radius | |||
1467 | // distance. | |||
1468 | for (unsigned Distance = 1; Distance != NumLanes; ++Distance) { | |||
1469 | // Visit the lane on the right and then the lane on the left. | |||
1470 | for (int Direction : {+1, -1}) { | |||
1471 | int Lane = FirstLane + Direction * Distance; | |||
1472 | if (Lane < 0 || Lane >= (int)NumLanes) | |||
1473 | continue; | |||
1474 | int LastLane = Lane - Direction; | |||
1475 | assert(LastLane >= 0 && LastLane < (int)NumLanes &&(static_cast <bool> (LastLane >= 0 && LastLane < (int)NumLanes && "Out of bounds") ? void (0) : __assert_fail ("LastLane >= 0 && LastLane < (int)NumLanes && \"Out of bounds\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1476, __extension__ __PRETTY_FUNCTION__)) | |||
1476 | "Out of bounds")(static_cast <bool> (LastLane >= 0 && LastLane < (int)NumLanes && "Out of bounds") ? void (0) : __assert_fail ("LastLane >= 0 && LastLane < (int)NumLanes && \"Out of bounds\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1476, __extension__ __PRETTY_FUNCTION__)); | |||
1477 | // Look for a good match for each operand. | |||
1478 | for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { | |||
1479 | // Search for the operand that matches SortedOps[OpIdx][Lane-1]. | |||
1480 | Optional<unsigned> BestIdx = | |||
1481 | getBestOperand(OpIdx, Lane, LastLane, ReorderingModes); | |||
1482 | // By not selecting a value, we allow the operands that follow to | |||
1483 | // select a better matching value. We will get a non-null value in | |||
1484 | // the next run of getBestOperand(). | |||
1485 | if (BestIdx) { | |||
1486 | // Swap the current operand with the one returned by | |||
1487 | // getBestOperand(). | |||
1488 | swap(OpIdx, BestIdx.getValue(), Lane); | |||
1489 | } else { | |||
1490 | // We failed to find a best operand, set mode to 'Failed'. | |||
1491 | ReorderingModes[OpIdx] = ReorderingMode::Failed; | |||
1492 | // Enable the second pass. | |||
1493 | StrategyFailed = true; | |||
1494 | } | |||
1495 | } | |||
1496 | } | |||
1497 | } | |||
1498 | // Skip second pass if the strategy did not fail. | |||
1499 | if (!StrategyFailed) | |||
1500 | break; | |||
1501 | } | |||
1502 | } | |||
1503 | ||||
1504 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | |||
1505 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) static StringRef getModeStr(ReorderingMode RMode) { | |||
1506 | switch (RMode) { | |||
1507 | case ReorderingMode::Load: | |||
1508 | return "Load"; | |||
1509 | case ReorderingMode::Opcode: | |||
1510 | return "Opcode"; | |||
1511 | case ReorderingMode::Constant: | |||
1512 | return "Constant"; | |||
1513 | case ReorderingMode::Splat: | |||
1514 | return "Splat"; | |||
1515 | case ReorderingMode::Failed: | |||
1516 | return "Failed"; | |||
1517 | } | |||
1518 | llvm_unreachable("Unimplemented Reordering Type")::llvm::llvm_unreachable_internal("Unimplemented Reordering Type" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1518); | |||
1519 | } | |||
1520 | ||||
1521 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) static raw_ostream &printMode(ReorderingMode RMode, | |||
1522 | raw_ostream &OS) { | |||
1523 | return OS << getModeStr(RMode); | |||
1524 | } | |||
1525 | ||||
1526 | /// Debug print. | |||
1527 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) static void dumpMode(ReorderingMode RMode) { | |||
1528 | printMode(RMode, dbgs()); | |||
1529 | } | |||
1530 | ||||
1531 | friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) { | |||
1532 | return printMode(RMode, OS); | |||
1533 | } | |||
1534 | ||||
1535 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) raw_ostream &print(raw_ostream &OS) const { | |||
1536 | const unsigned Indent = 2; | |||
1537 | unsigned Cnt = 0; | |||
1538 | for (const OperandDataVec &OpDataVec : OpsVec) { | |||
1539 | OS << "Operand " << Cnt++ << "\n"; | |||
1540 | for (const OperandData &OpData : OpDataVec) { | |||
1541 | OS.indent(Indent) << "{"; | |||
1542 | if (Value *V = OpData.V) | |||
1543 | OS << *V; | |||
1544 | else | |||
1545 | OS << "null"; | |||
1546 | OS << ", APO:" << OpData.APO << "}\n"; | |||
1547 | } | |||
1548 | OS << "\n"; | |||
1549 | } | |||
1550 | return OS; | |||
1551 | } | |||
1552 | ||||
1553 | /// Debug print. | |||
1554 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dump() const { print(dbgs()); } | |||
1555 | #endif | |||
1556 | }; | |||
1557 | ||||
1558 | /// Checks if the instruction is marked for deletion. | |||
1559 | bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); } | |||
1560 | ||||
1561 | /// Marks values operands for later deletion by replacing them with Undefs. | |||
1562 | void eraseInstructions(ArrayRef<Value *> AV); | |||
1563 | ||||
1564 | ~BoUpSLP(); | |||
1565 | ||||
1566 | private: | |||
1567 | /// Checks if all users of \p I are the part of the vectorization tree. | |||
1568 | bool areAllUsersVectorized(Instruction *I, | |||
1569 | ArrayRef<Value *> VectorizedVals) const; | |||
1570 | ||||
1571 | /// \returns the cost of the vectorizable entry. | |||
1572 | InstructionCost getEntryCost(const TreeEntry *E, | |||
1573 | ArrayRef<Value *> VectorizedVals); | |||
1574 | ||||
1575 | /// This is the recursive part of buildTree. | |||
1576 | void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, | |||
1577 | const EdgeInfo &EI); | |||
1578 | ||||
1579 | /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can | |||
1580 | /// be vectorized to use the original vector (or aggregate "bitcast" to a | |||
1581 | /// vector) and sets \p CurrentOrder to the identity permutation; otherwise | |||
1582 | /// returns false, setting \p CurrentOrder to either an empty vector or a | |||
1583 | /// non-identity permutation that allows to reuse extract instructions. | |||
1584 | bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, | |||
1585 | SmallVectorImpl<unsigned> &CurrentOrder) const; | |||
1586 | ||||
1587 | /// Vectorize a single entry in the tree. | |||
1588 | Value *vectorizeTree(TreeEntry *E); | |||
1589 | ||||
1590 | /// Vectorize a single entry in the tree, starting in \p VL. | |||
1591 | Value *vectorizeTree(ArrayRef<Value *> VL); | |||
1592 | ||||
1593 | /// \returns the scalarization cost for this type. Scalarization in this | |||
1594 | /// context means the creation of vectors from a group of scalars. | |||
1595 | InstructionCost | |||
1596 | getGatherCost(FixedVectorType *Ty, | |||
1597 | const DenseSet<unsigned> &ShuffledIndices) const; | |||
1598 | ||||
1599 | /// Checks if the gathered \p VL can be represented as shuffle(s) of previous | |||
1600 | /// tree entries. | |||
1601 | /// \returns ShuffleKind, if gathered values can be represented as shuffles of | |||
1602 | /// previous tree entries. \p Mask is filled with the shuffle mask. | |||
1603 | Optional<TargetTransformInfo::ShuffleKind> | |||
1604 | isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask, | |||
1605 | SmallVectorImpl<const TreeEntry *> &Entries); | |||
1606 | ||||
1607 | /// \returns the scalarization cost for this list of values. Assuming that | |||
1608 | /// this subtree gets vectorized, we may need to extract the values from the | |||
1609 | /// roots. This method calculates the cost of extracting the values. | |||
1610 | InstructionCost getGatherCost(ArrayRef<Value *> VL) const; | |||
1611 | ||||
1612 | /// Set the Builder insert point to one after the last instruction in | |||
1613 | /// the bundle | |||
1614 | void setInsertPointAfterBundle(const TreeEntry *E); | |||
1615 | ||||
1616 | /// \returns a vector from a collection of scalars in \p VL. | |||
1617 | Value *gather(ArrayRef<Value *> VL); | |||
1618 | ||||
1619 | /// \returns whether the VectorizableTree is fully vectorizable and will | |||
1620 | /// be beneficial even the tree height is tiny. | |||
1621 | bool isFullyVectorizableTinyTree() const; | |||
1622 | ||||
1623 | /// Reorder commutative or alt operands to get better probability of | |||
1624 | /// generating vectorized code. | |||
1625 | static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, | |||
1626 | SmallVectorImpl<Value *> &Left, | |||
1627 | SmallVectorImpl<Value *> &Right, | |||
1628 | const DataLayout &DL, | |||
1629 | ScalarEvolution &SE, | |||
1630 | const BoUpSLP &R); | |||
1631 | struct TreeEntry { | |||
1632 | using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>; | |||
1633 | TreeEntry(VecTreeTy &Container) : Container(Container) {} | |||
1634 | ||||
1635 | /// \returns true if the scalars in VL are equal to this entry. | |||
1636 | bool isSame(ArrayRef<Value *> VL) const { | |||
1637 | auto &&IsSame = [VL](ArrayRef<Value *> Scalars, ArrayRef<int> Mask) { | |||
1638 | if (Mask.size() != VL.size() && VL.size() == Scalars.size()) | |||
1639 | return std::equal(VL.begin(), VL.end(), Scalars.begin()); | |||
1640 | return VL.size() == Mask.size() && std::equal( | |||
1641 | VL.begin(), VL.end(), Mask.begin(), | |||
1642 | [Scalars](Value *V, int Idx) { return V == Scalars[Idx]; }); | |||
1643 | }; | |||
1644 | if (!ReorderIndices.empty()) { | |||
1645 | // TODO: implement matching if the nodes are just reordered, still can | |||
1646 | // treat the vector as the same if the list of scalars matches VL | |||
1647 | // directly, without reordering. | |||
1648 | SmallVector<int> Mask; | |||
1649 | inversePermutation(ReorderIndices, Mask); | |||
1650 | if (VL.size() == Scalars.size()) | |||
1651 | return IsSame(Scalars, Mask); | |||
1652 | if (VL.size() == ReuseShuffleIndices.size()) { | |||
1653 | ::addMask(Mask, ReuseShuffleIndices); | |||
1654 | return IsSame(Scalars, Mask); | |||
1655 | } | |||
1656 | return false; | |||
1657 | } | |||
1658 | return IsSame(Scalars, ReuseShuffleIndices); | |||
1659 | } | |||
1660 | ||||
1661 | /// A vector of scalars. | |||
1662 | ValueList Scalars; | |||
1663 | ||||
1664 | /// The Scalars are vectorized into this value. It is initialized to Null. | |||
1665 | Value *VectorizedValue = nullptr; | |||
1666 | ||||
1667 | /// Do we need to gather this sequence or vectorize it | |||
1668 | /// (either with vector instruction or with scatter/gather | |||
1669 | /// intrinsics for store/load)? | |||
1670 | enum EntryState { Vectorize, ScatterVectorize, NeedToGather }; | |||
1671 | EntryState State; | |||
1672 | ||||
1673 | /// Does this sequence require some shuffling? | |||
1674 | SmallVector<int, 4> ReuseShuffleIndices; | |||
1675 | ||||
1676 | /// Does this entry require reordering? | |||
1677 | SmallVector<unsigned, 4> ReorderIndices; | |||
1678 | ||||
1679 | /// Points back to the VectorizableTree. | |||
1680 | /// | |||
1681 | /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has | |||
1682 | /// to be a pointer and needs to be able to initialize the child iterator. | |||
1683 | /// Thus we need a reference back to the container to translate the indices | |||
1684 | /// to entries. | |||
1685 | VecTreeTy &Container; | |||
1686 | ||||
1687 | /// The TreeEntry index containing the user of this entry. We can actually | |||
1688 | /// have multiple users so the data structure is not truly a tree. | |||
1689 | SmallVector<EdgeInfo, 1> UserTreeIndices; | |||
1690 | ||||
1691 | /// The index of this treeEntry in VectorizableTree. | |||
1692 | int Idx = -1; | |||
1693 | ||||
1694 | private: | |||
1695 | /// The operands of each instruction in each lane Operands[op_index][lane]. | |||
1696 | /// Note: This helps avoid the replication of the code that performs the | |||
1697 | /// reordering of operands during buildTree_rec() and vectorizeTree(). | |||
1698 | SmallVector<ValueList, 2> Operands; | |||
1699 | ||||
1700 | /// The main/alternate instruction. | |||
1701 | Instruction *MainOp = nullptr; | |||
1702 | Instruction *AltOp = nullptr; | |||
1703 | ||||
1704 | public: | |||
1705 | /// Set this bundle's \p OpIdx'th operand to \p OpVL. | |||
1706 | void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) { | |||
1707 | if (Operands.size() < OpIdx + 1) | |||
1708 | Operands.resize(OpIdx + 1); | |||
1709 | assert(Operands[OpIdx].empty() && "Already resized?")(static_cast <bool> (Operands[OpIdx].empty() && "Already resized?") ? void (0) : __assert_fail ("Operands[OpIdx].empty() && \"Already resized?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1709, __extension__ __PRETTY_FUNCTION__)); | |||
1710 | Operands[OpIdx].resize(Scalars.size()); | |||
1711 | for (unsigned Lane = 0, E = Scalars.size(); Lane != E; ++Lane) | |||
1712 | Operands[OpIdx][Lane] = OpVL[Lane]; | |||
1713 | } | |||
1714 | ||||
1715 | /// Set the operands of this bundle in their original order. | |||
1716 | void setOperandsInOrder() { | |||
1717 | assert(Operands.empty() && "Already initialized?")(static_cast <bool> (Operands.empty() && "Already initialized?" ) ? void (0) : __assert_fail ("Operands.empty() && \"Already initialized?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1717, __extension__ __PRETTY_FUNCTION__)); | |||
1718 | auto *I0 = cast<Instruction>(Scalars[0]); | |||
1719 | Operands.resize(I0->getNumOperands()); | |||
1720 | unsigned NumLanes = Scalars.size(); | |||
1721 | for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands(); | |||
1722 | OpIdx != NumOperands; ++OpIdx) { | |||
1723 | Operands[OpIdx].resize(NumLanes); | |||
1724 | for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { | |||
1725 | auto *I = cast<Instruction>(Scalars[Lane]); | |||
1726 | assert(I->getNumOperands() == NumOperands &&(static_cast <bool> (I->getNumOperands() == NumOperands && "Expected same number of operands") ? void (0) : __assert_fail ("I->getNumOperands() == NumOperands && \"Expected same number of operands\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1727, __extension__ __PRETTY_FUNCTION__)) | |||
1727 | "Expected same number of operands")(static_cast <bool> (I->getNumOperands() == NumOperands && "Expected same number of operands") ? void (0) : __assert_fail ("I->getNumOperands() == NumOperands && \"Expected same number of operands\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1727, __extension__ __PRETTY_FUNCTION__)); | |||
1728 | Operands[OpIdx][Lane] = I->getOperand(OpIdx); | |||
1729 | } | |||
1730 | } | |||
1731 | } | |||
1732 | ||||
1733 | /// Reorders operands of the node to the given mask \p Mask. | |||
1734 | void reorderOperands(ArrayRef<int> Mask) { | |||
1735 | for (ValueList &Operand : Operands) | |||
1736 | reorderScalars(Operand, Mask); | |||
1737 | } | |||
1738 | ||||
1739 | /// \returns the \p OpIdx operand of this TreeEntry. | |||
1740 | ValueList &getOperand(unsigned OpIdx) { | |||
1741 | assert(OpIdx < Operands.size() && "Off bounds")(static_cast <bool> (OpIdx < Operands.size() && "Off bounds") ? void (0) : __assert_fail ("OpIdx < Operands.size() && \"Off bounds\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1741, __extension__ __PRETTY_FUNCTION__)); | |||
1742 | return Operands[OpIdx]; | |||
1743 | } | |||
1744 | ||||
1745 | /// \returns the number of operands. | |||
1746 | unsigned getNumOperands() const { return Operands.size(); } | |||
1747 | ||||
1748 | /// \return the single \p OpIdx operand. | |||
1749 | Value *getSingleOperand(unsigned OpIdx) const { | |||
1750 | assert(OpIdx < Operands.size() && "Off bounds")(static_cast <bool> (OpIdx < Operands.size() && "Off bounds") ? void (0) : __assert_fail ("OpIdx < Operands.size() && \"Off bounds\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1750, __extension__ __PRETTY_FUNCTION__)); | |||
1751 | assert(!Operands[OpIdx].empty() && "No operand available")(static_cast <bool> (!Operands[OpIdx].empty() && "No operand available") ? void (0) : __assert_fail ("!Operands[OpIdx].empty() && \"No operand available\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1751, __extension__ __PRETTY_FUNCTION__)); | |||
1752 | return Operands[OpIdx][0]; | |||
1753 | } | |||
1754 | ||||
1755 | /// Some of the instructions in the list have alternate opcodes. | |||
1756 | bool isAltShuffle() const { | |||
1757 | return getOpcode() != getAltOpcode(); | |||
1758 | } | |||
1759 | ||||
1760 | bool isOpcodeOrAlt(Instruction *I) const { | |||
1761 | unsigned CheckedOpcode = I->getOpcode(); | |||
1762 | return (getOpcode() == CheckedOpcode || | |||
1763 | getAltOpcode() == CheckedOpcode); | |||
1764 | } | |||
1765 | ||||
1766 | /// Chooses the correct key for scheduling data. If \p Op has the same (or | |||
1767 | /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is | |||
1768 | /// \p OpValue. | |||
1769 | Value *isOneOf(Value *Op) const { | |||
1770 | auto *I = dyn_cast<Instruction>(Op); | |||
1771 | if (I && isOpcodeOrAlt(I)) | |||
1772 | return Op; | |||
1773 | return MainOp; | |||
1774 | } | |||
1775 | ||||
1776 | void setOperations(const InstructionsState &S) { | |||
1777 | MainOp = S.MainOp; | |||
1778 | AltOp = S.AltOp; | |||
1779 | } | |||
1780 | ||||
1781 | Instruction *getMainOp() const { | |||
1782 | return MainOp; | |||
1783 | } | |||
1784 | ||||
1785 | Instruction *getAltOp() const { | |||
1786 | return AltOp; | |||
1787 | } | |||
1788 | ||||
1789 | /// The main/alternate opcodes for the list of instructions. | |||
1790 | unsigned getOpcode() const { | |||
1791 | return MainOp ? MainOp->getOpcode() : 0; | |||
1792 | } | |||
1793 | ||||
1794 | unsigned getAltOpcode() const { | |||
1795 | return AltOp ? AltOp->getOpcode() : 0; | |||
1796 | } | |||
1797 | ||||
1798 | /// When ReuseReorderShuffleIndices is empty it just returns position of \p | |||
1799 | /// V within vector of Scalars. Otherwise, try to remap on its reuse index. | |||
1800 | int findLaneForValue(Value *V) const { | |||
1801 | unsigned FoundLane = std::distance(Scalars.begin(), find(Scalars, V)); | |||
1802 | assert(FoundLane < Scalars.size() && "Couldn't find extract lane")(static_cast <bool> (FoundLane < Scalars.size() && "Couldn't find extract lane") ? void (0) : __assert_fail ("FoundLane < Scalars.size() && \"Couldn't find extract lane\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1802, __extension__ __PRETTY_FUNCTION__)); | |||
1803 | if (!ReorderIndices.empty()) | |||
1804 | FoundLane = ReorderIndices[FoundLane]; | |||
1805 | assert(FoundLane < Scalars.size() && "Couldn't find extract lane")(static_cast <bool> (FoundLane < Scalars.size() && "Couldn't find extract lane") ? void (0) : __assert_fail ("FoundLane < Scalars.size() && \"Couldn't find extract lane\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1805, __extension__ __PRETTY_FUNCTION__)); | |||
1806 | if (!ReuseShuffleIndices.empty()) { | |||
1807 | FoundLane = std::distance(ReuseShuffleIndices.begin(), | |||
1808 | find(ReuseShuffleIndices, FoundLane)); | |||
1809 | } | |||
1810 | return FoundLane; | |||
1811 | } | |||
1812 | ||||
1813 | #ifndef NDEBUG | |||
1814 | /// Debug printer. | |||
1815 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dump() const { | |||
1816 | dbgs() << Idx << ".\n"; | |||
1817 | for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) { | |||
1818 | dbgs() << "Operand " << OpI << ":\n"; | |||
1819 | for (const Value *V : Operands[OpI]) | |||
1820 | dbgs().indent(2) << *V << "\n"; | |||
1821 | } | |||
1822 | dbgs() << "Scalars: \n"; | |||
1823 | for (Value *V : Scalars) | |||
1824 | dbgs().indent(2) << *V << "\n"; | |||
1825 | dbgs() << "State: "; | |||
1826 | switch (State) { | |||
1827 | case Vectorize: | |||
1828 | dbgs() << "Vectorize\n"; | |||
1829 | break; | |||
1830 | case ScatterVectorize: | |||
1831 | dbgs() << "ScatterVectorize\n"; | |||
1832 | break; | |||
1833 | case NeedToGather: | |||
1834 | dbgs() << "NeedToGather\n"; | |||
1835 | break; | |||
1836 | } | |||
1837 | dbgs() << "MainOp: "; | |||
1838 | if (MainOp) | |||
1839 | dbgs() << *MainOp << "\n"; | |||
1840 | else | |||
1841 | dbgs() << "NULL\n"; | |||
1842 | dbgs() << "AltOp: "; | |||
1843 | if (AltOp) | |||
1844 | dbgs() << *AltOp << "\n"; | |||
1845 | else | |||
1846 | dbgs() << "NULL\n"; | |||
1847 | dbgs() << "VectorizedValue: "; | |||
1848 | if (VectorizedValue) | |||
1849 | dbgs() << *VectorizedValue << "\n"; | |||
1850 | else | |||
1851 | dbgs() << "NULL\n"; | |||
1852 | dbgs() << "ReuseShuffleIndices: "; | |||
1853 | if (ReuseShuffleIndices.empty()) | |||
1854 | dbgs() << "Empty"; | |||
1855 | else | |||
1856 | for (unsigned ReuseIdx : ReuseShuffleIndices) | |||
1857 | dbgs() << ReuseIdx << ", "; | |||
1858 | dbgs() << "\n"; | |||
1859 | dbgs() << "ReorderIndices: "; | |||
1860 | for (unsigned ReorderIdx : ReorderIndices) | |||
1861 | dbgs() << ReorderIdx << ", "; | |||
1862 | dbgs() << "\n"; | |||
1863 | dbgs() << "UserTreeIndices: "; | |||
1864 | for (const auto &EInfo : UserTreeIndices) | |||
1865 | dbgs() << EInfo << ", "; | |||
1866 | dbgs() << "\n"; | |||
1867 | } | |||
1868 | #endif | |||
1869 | }; | |||
1870 | ||||
1871 | #ifndef NDEBUG | |||
1872 | void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost, | |||
1873 | InstructionCost VecCost, | |||
1874 | InstructionCost ScalarCost) const { | |||
1875 | dbgs() << "SLP: Calculated costs for Tree:\n"; E->dump(); | |||
1876 | dbgs() << "SLP: Costs:\n"; | |||
1877 | dbgs() << "SLP: ReuseShuffleCost = " << ReuseShuffleCost << "\n"; | |||
1878 | dbgs() << "SLP: VectorCost = " << VecCost << "\n"; | |||
1879 | dbgs() << "SLP: ScalarCost = " << ScalarCost << "\n"; | |||
1880 | dbgs() << "SLP: ReuseShuffleCost + VecCost - ScalarCost = " << | |||
1881 | ReuseShuffleCost + VecCost - ScalarCost << "\n"; | |||
1882 | } | |||
1883 | #endif | |||
1884 | ||||
1885 | /// Create a new VectorizableTree entry. | |||
1886 | TreeEntry *newTreeEntry(ArrayRef<Value *> VL, Optional<ScheduleData *> Bundle, | |||
1887 | const InstructionsState &S, | |||
1888 | const EdgeInfo &UserTreeIdx, | |||
1889 | ArrayRef<int> ReuseShuffleIndices = None, | |||
1890 | ArrayRef<unsigned> ReorderIndices = None) { | |||
1891 | TreeEntry::EntryState EntryState = | |||
1892 | Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather; | |||
1893 | return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx, | |||
1894 | ReuseShuffleIndices, ReorderIndices); | |||
1895 | } | |||
1896 | ||||
1897 | TreeEntry *newTreeEntry(ArrayRef<Value *> VL, | |||
1898 | TreeEntry::EntryState EntryState, | |||
1899 | Optional<ScheduleData *> Bundle, | |||
1900 | const InstructionsState &S, | |||
1901 | const EdgeInfo &UserTreeIdx, | |||
1902 | ArrayRef<int> ReuseShuffleIndices = None, | |||
1903 | ArrayRef<unsigned> ReorderIndices = None) { | |||
1904 | assert(((!Bundle && EntryState == TreeEntry::NeedToGather) ||(static_cast <bool> (((!Bundle && EntryState == TreeEntry::NeedToGather) || (Bundle && EntryState != TreeEntry::NeedToGather)) && "Need to vectorize gather entry?" ) ? void (0) : __assert_fail ("((!Bundle && EntryState == TreeEntry::NeedToGather) || (Bundle && EntryState != TreeEntry::NeedToGather)) && \"Need to vectorize gather entry?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1906, __extension__ __PRETTY_FUNCTION__)) | |||
1905 | (Bundle && EntryState != TreeEntry::NeedToGather)) &&(static_cast <bool> (((!Bundle && EntryState == TreeEntry::NeedToGather) || (Bundle && EntryState != TreeEntry::NeedToGather)) && "Need to vectorize gather entry?" ) ? void (0) : __assert_fail ("((!Bundle && EntryState == TreeEntry::NeedToGather) || (Bundle && EntryState != TreeEntry::NeedToGather)) && \"Need to vectorize gather entry?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1906, __extension__ __PRETTY_FUNCTION__)) | |||
1906 | "Need to vectorize gather entry?")(static_cast <bool> (((!Bundle && EntryState == TreeEntry::NeedToGather) || (Bundle && EntryState != TreeEntry::NeedToGather)) && "Need to vectorize gather entry?" ) ? void (0) : __assert_fail ("((!Bundle && EntryState == TreeEntry::NeedToGather) || (Bundle && EntryState != TreeEntry::NeedToGather)) && \"Need to vectorize gather entry?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1906, __extension__ __PRETTY_FUNCTION__)); | |||
1907 | VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree)); | |||
1908 | TreeEntry *Last = VectorizableTree.back().get(); | |||
1909 | Last->Idx = VectorizableTree.size() - 1; | |||
1910 | Last->State = EntryState; | |||
1911 | Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(), | |||
1912 | ReuseShuffleIndices.end()); | |||
1913 | if (ReorderIndices.empty()) { | |||
1914 | Last->Scalars.assign(VL.begin(), VL.end()); | |||
1915 | Last->setOperations(S); | |||
1916 | } else { | |||
1917 | // Reorder scalars and build final mask. | |||
1918 | Last->Scalars.assign(VL.size(), nullptr); | |||
1919 | transform(ReorderIndices, Last->Scalars.begin(), | |||
1920 | [VL](unsigned Idx) -> Value * { | |||
1921 | if (Idx >= VL.size()) | |||
1922 | return UndefValue::get(VL.front()->getType()); | |||
1923 | return VL[Idx]; | |||
1924 | }); | |||
1925 | InstructionsState S = getSameOpcode(Last->Scalars); | |||
1926 | Last->setOperations(S); | |||
1927 | Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end()); | |||
1928 | } | |||
1929 | if (Last->State != TreeEntry::NeedToGather) { | |||
1930 | for (Value *V : VL) { | |||
1931 | assert(!getTreeEntry(V) && "Scalar already in tree!")(static_cast <bool> (!getTreeEntry(V) && "Scalar already in tree!" ) ? void (0) : __assert_fail ("!getTreeEntry(V) && \"Scalar already in tree!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1931, __extension__ __PRETTY_FUNCTION__)); | |||
1932 | ScalarToTreeEntry[V] = Last; | |||
1933 | } | |||
1934 | // Update the scheduler bundle to point to this TreeEntry. | |||
1935 | unsigned Lane = 0; | |||
1936 | for (ScheduleData *BundleMember = Bundle.getValue(); BundleMember; | |||
1937 | BundleMember = BundleMember->NextInBundle) { | |||
1938 | BundleMember->TE = Last; | |||
1939 | BundleMember->Lane = Lane; | |||
1940 | ++Lane; | |||
1941 | } | |||
1942 | assert((!Bundle.getValue() || Lane == VL.size()) &&(static_cast <bool> ((!Bundle.getValue() || Lane == VL. size()) && "Bundle and VL out of sync") ? void (0) : __assert_fail ("(!Bundle.getValue() || Lane == VL.size()) && \"Bundle and VL out of sync\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1943, __extension__ __PRETTY_FUNCTION__)) | |||
1943 | "Bundle and VL out of sync")(static_cast <bool> ((!Bundle.getValue() || Lane == VL. size()) && "Bundle and VL out of sync") ? void (0) : __assert_fail ("(!Bundle.getValue() || Lane == VL.size()) && \"Bundle and VL out of sync\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 1943, __extension__ __PRETTY_FUNCTION__)); | |||
1944 | } else { | |||
1945 | MustGather.insert(VL.begin(), VL.end()); | |||
1946 | } | |||
1947 | ||||
1948 | if (UserTreeIdx.UserTE) | |||
1949 | Last->UserTreeIndices.push_back(UserTreeIdx); | |||
1950 | ||||
1951 | return Last; | |||
1952 | } | |||
1953 | ||||
1954 | /// -- Vectorization State -- | |||
1955 | /// Holds all of the tree entries. | |||
1956 | TreeEntry::VecTreeTy VectorizableTree; | |||
1957 | ||||
1958 | #ifndef NDEBUG | |||
1959 | /// Debug printer. | |||
1960 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dumpVectorizableTree() const { | |||
1961 | for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) { | |||
1962 | VectorizableTree[Id]->dump(); | |||
1963 | dbgs() << "\n"; | |||
1964 | } | |||
1965 | } | |||
1966 | #endif | |||
1967 | ||||
1968 | TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); } | |||
1969 | ||||
1970 | const TreeEntry *getTreeEntry(Value *V) const { | |||
1971 | return ScalarToTreeEntry.lookup(V); | |||
1972 | } | |||
1973 | ||||
1974 | /// Maps a specific scalar to its tree entry. | |||
1975 | SmallDenseMap<Value*, TreeEntry *> ScalarToTreeEntry; | |||
1976 | ||||
1977 | /// Maps a value to the proposed vectorizable size. | |||
1978 | SmallDenseMap<Value *, unsigned> InstrElementSize; | |||
1979 | ||||
1980 | /// A list of scalars that we found that we need to keep as scalars. | |||
1981 | ValueSet MustGather; | |||
1982 | ||||
1983 | /// This POD struct describes one external user in the vectorized tree. | |||
1984 | struct ExternalUser { | |||
1985 | ExternalUser(Value *S, llvm::User *U, int L) | |||
1986 | : Scalar(S), User(U), Lane(L) {} | |||
1987 | ||||
1988 | // Which scalar in our function. | |||
1989 | Value *Scalar; | |||
1990 | ||||
1991 | // Which user that uses the scalar. | |||
1992 | llvm::User *User; | |||
1993 | ||||
1994 | // Which lane does the scalar belong to. | |||
1995 | int Lane; | |||
1996 | }; | |||
1997 | using UserList = SmallVector<ExternalUser, 16>; | |||
1998 | ||||
1999 | /// Checks if two instructions may access the same memory. | |||
2000 | /// | |||
2001 | /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it | |||
2002 | /// is invariant in the calling loop. | |||
2003 | bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, | |||
2004 | Instruction *Inst2) { | |||
2005 | // First check if the result is already in the cache. | |||
2006 | AliasCacheKey key = std::make_pair(Inst1, Inst2); | |||
2007 | Optional<bool> &result = AliasCache[key]; | |||
2008 | if (result.hasValue()) { | |||
2009 | return result.getValue(); | |||
2010 | } | |||
2011 | MemoryLocation Loc2 = getLocation(Inst2, AA); | |||
2012 | bool aliased = true; | |||
2013 | if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) { | |||
2014 | // Do the alias check. | |||
2015 | aliased = !AA->isNoAlias(Loc1, Loc2); | |||
2016 | } | |||
2017 | // Store the result in the cache. | |||
2018 | result = aliased; | |||
2019 | return aliased; | |||
2020 | } | |||
2021 | ||||
2022 | using AliasCacheKey = std::pair<Instruction *, Instruction *>; | |||
2023 | ||||
2024 | /// Cache for alias results. | |||
2025 | /// TODO: consider moving this to the AliasAnalysis itself. | |||
2026 | DenseMap<AliasCacheKey, Optional<bool>> AliasCache; | |||
2027 | ||||
2028 | /// Removes an instruction from its block and eventually deletes it. | |||
2029 | /// It's like Instruction::eraseFromParent() except that the actual deletion | |||
2030 | /// is delayed until BoUpSLP is destructed. | |||
2031 | /// This is required to ensure that there are no incorrect collisions in the | |||
2032 | /// AliasCache, which can happen if a new instruction is allocated at the | |||
2033 | /// same address as a previously deleted instruction. | |||
2034 | void eraseInstruction(Instruction *I, bool ReplaceOpsWithUndef = false) { | |||
2035 | auto It = DeletedInstructions.try_emplace(I, ReplaceOpsWithUndef).first; | |||
2036 | It->getSecond() = It->getSecond() && ReplaceOpsWithUndef; | |||
2037 | } | |||
2038 | ||||
2039 | /// Temporary store for deleted instructions. Instructions will be deleted | |||
2040 | /// eventually when the BoUpSLP is destructed. | |||
2041 | DenseMap<Instruction *, bool> DeletedInstructions; | |||
2042 | ||||
2043 | /// A list of values that need to extracted out of the tree. | |||
2044 | /// This list holds pairs of (Internal Scalar : External User). External User | |||
2045 | /// can be nullptr, it means that this Internal Scalar will be used later, | |||
2046 | /// after vectorization. | |||
2047 | UserList ExternalUses; | |||
2048 | ||||
2049 | /// Values used only by @llvm.assume calls. | |||
2050 | SmallPtrSet<const Value *, 32> EphValues; | |||
2051 | ||||
2052 | /// Holds all of the instructions that we gathered. | |||
2053 | SetVector<Instruction *> GatherSeq; | |||
2054 | ||||
2055 | /// A list of blocks that we are going to CSE. | |||
2056 | SetVector<BasicBlock *> CSEBlocks; | |||
2057 | ||||
2058 | /// Contains all scheduling relevant data for an instruction. | |||
2059 | /// A ScheduleData either represents a single instruction or a member of an | |||
2060 | /// instruction bundle (= a group of instructions which is combined into a | |||
2061 | /// vector instruction). | |||
2062 | struct ScheduleData { | |||
2063 | // The initial value for the dependency counters. It means that the | |||
2064 | // dependencies are not calculated yet. | |||
2065 | enum { InvalidDeps = -1 }; | |||
2066 | ||||
2067 | ScheduleData() = default; | |||
2068 | ||||
2069 | void init(int BlockSchedulingRegionID, Value *OpVal) { | |||
2070 | FirstInBundle = this; | |||
2071 | NextInBundle = nullptr; | |||
2072 | NextLoadStore = nullptr; | |||
2073 | IsScheduled = false; | |||
2074 | SchedulingRegionID = BlockSchedulingRegionID; | |||
2075 | UnscheduledDepsInBundle = UnscheduledDeps; | |||
2076 | clearDependencies(); | |||
2077 | OpValue = OpVal; | |||
2078 | TE = nullptr; | |||
2079 | Lane = -1; | |||
2080 | } | |||
2081 | ||||
2082 | /// Returns true if the dependency information has been calculated. | |||
2083 | bool hasValidDependencies() const { return Dependencies != InvalidDeps; } | |||
2084 | ||||
2085 | /// Returns true for single instructions and for bundle representatives | |||
2086 | /// (= the head of a bundle). | |||
2087 | bool isSchedulingEntity() const { return FirstInBundle == this; } | |||
2088 | ||||
2089 | /// Returns true if it represents an instruction bundle and not only a | |||
2090 | /// single instruction. | |||
2091 | bool isPartOfBundle() const { | |||
2092 | return NextInBundle != nullptr || FirstInBundle != this; | |||
2093 | } | |||
2094 | ||||
2095 | /// Returns true if it is ready for scheduling, i.e. it has no more | |||
2096 | /// unscheduled depending instructions/bundles. | |||
2097 | bool isReady() const { | |||
2098 | assert(isSchedulingEntity() &&(static_cast <bool> (isSchedulingEntity() && "can't consider non-scheduling entity for ready list" ) ? void (0) : __assert_fail ("isSchedulingEntity() && \"can't consider non-scheduling entity for ready list\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2099, __extension__ __PRETTY_FUNCTION__)) | |||
2099 | "can't consider non-scheduling entity for ready list")(static_cast <bool> (isSchedulingEntity() && "can't consider non-scheduling entity for ready list" ) ? void (0) : __assert_fail ("isSchedulingEntity() && \"can't consider non-scheduling entity for ready list\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2099, __extension__ __PRETTY_FUNCTION__)); | |||
2100 | return UnscheduledDepsInBundle == 0 && !IsScheduled; | |||
2101 | } | |||
2102 | ||||
2103 | /// Modifies the number of unscheduled dependencies, also updating it for | |||
2104 | /// the whole bundle. | |||
2105 | int incrementUnscheduledDeps(int Incr) { | |||
2106 | UnscheduledDeps += Incr; | |||
2107 | return FirstInBundle->UnscheduledDepsInBundle += Incr; | |||
2108 | } | |||
2109 | ||||
2110 | /// Sets the number of unscheduled dependencies to the number of | |||
2111 | /// dependencies. | |||
2112 | void resetUnscheduledDeps() { | |||
2113 | incrementUnscheduledDeps(Dependencies - UnscheduledDeps); | |||
2114 | } | |||
2115 | ||||
2116 | /// Clears all dependency information. | |||
2117 | void clearDependencies() { | |||
2118 | Dependencies = InvalidDeps; | |||
2119 | resetUnscheduledDeps(); | |||
2120 | MemoryDependencies.clear(); | |||
2121 | } | |||
2122 | ||||
2123 | void dump(raw_ostream &os) const { | |||
2124 | if (!isSchedulingEntity()) { | |||
2125 | os << "/ " << *Inst; | |||
2126 | } else if (NextInBundle) { | |||
2127 | os << '[' << *Inst; | |||
2128 | ScheduleData *SD = NextInBundle; | |||
2129 | while (SD) { | |||
2130 | os << ';' << *SD->Inst; | |||
2131 | SD = SD->NextInBundle; | |||
2132 | } | |||
2133 | os << ']'; | |||
2134 | } else { | |||
2135 | os << *Inst; | |||
2136 | } | |||
2137 | } | |||
2138 | ||||
2139 | Instruction *Inst = nullptr; | |||
2140 | ||||
2141 | /// Points to the head in an instruction bundle (and always to this for | |||
2142 | /// single instructions). | |||
2143 | ScheduleData *FirstInBundle = nullptr; | |||
2144 | ||||
2145 | /// Single linked list of all instructions in a bundle. Null if it is a | |||
2146 | /// single instruction. | |||
2147 | ScheduleData *NextInBundle = nullptr; | |||
2148 | ||||
2149 | /// Single linked list of all memory instructions (e.g. load, store, call) | |||
2150 | /// in the block - until the end of the scheduling region. | |||
2151 | ScheduleData *NextLoadStore = nullptr; | |||
2152 | ||||
2153 | /// The dependent memory instructions. | |||
2154 | /// This list is derived on demand in calculateDependencies(). | |||
2155 | SmallVector<ScheduleData *, 4> MemoryDependencies; | |||
2156 | ||||
2157 | /// This ScheduleData is in the current scheduling region if this matches | |||
2158 | /// the current SchedulingRegionID of BlockScheduling. | |||
2159 | int SchedulingRegionID = 0; | |||
2160 | ||||
2161 | /// Used for getting a "good" final ordering of instructions. | |||
2162 | int SchedulingPriority = 0; | |||
2163 | ||||
2164 | /// The number of dependencies. Constitutes of the number of users of the | |||
2165 | /// instruction plus the number of dependent memory instructions (if any). | |||
2166 | /// This value is calculated on demand. | |||
2167 | /// If InvalidDeps, the number of dependencies is not calculated yet. | |||
2168 | int Dependencies = InvalidDeps; | |||
2169 | ||||
2170 | /// The number of dependencies minus the number of dependencies of scheduled | |||
2171 | /// instructions. As soon as this is zero, the instruction/bundle gets ready | |||
2172 | /// for scheduling. | |||
2173 | /// Note that this is negative as long as Dependencies is not calculated. | |||
2174 | int UnscheduledDeps = InvalidDeps; | |||
2175 | ||||
2176 | /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for | |||
2177 | /// single instructions. | |||
2178 | int UnscheduledDepsInBundle = InvalidDeps; | |||
2179 | ||||
2180 | /// True if this instruction is scheduled (or considered as scheduled in the | |||
2181 | /// dry-run). | |||
2182 | bool IsScheduled = false; | |||
2183 | ||||
2184 | /// Opcode of the current instruction in the schedule data. | |||
2185 | Value *OpValue = nullptr; | |||
2186 | ||||
2187 | /// The TreeEntry that this instruction corresponds to. | |||
2188 | TreeEntry *TE = nullptr; | |||
2189 | ||||
2190 | /// The lane of this node in the TreeEntry. | |||
2191 | int Lane = -1; | |||
2192 | }; | |||
2193 | ||||
2194 | #ifndef NDEBUG | |||
2195 | friend inline raw_ostream &operator<<(raw_ostream &os, | |||
2196 | const BoUpSLP::ScheduleData &SD) { | |||
2197 | SD.dump(os); | |||
2198 | return os; | |||
2199 | } | |||
2200 | #endif | |||
2201 | ||||
2202 | friend struct GraphTraits<BoUpSLP *>; | |||
2203 | friend struct DOTGraphTraits<BoUpSLP *>; | |||
2204 | ||||
2205 | /// Contains all scheduling data for a basic block. | |||
2206 | struct BlockScheduling { | |||
2207 | BlockScheduling(BasicBlock *BB) | |||
2208 | : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {} | |||
2209 | ||||
2210 | void clear() { | |||
2211 | ReadyInsts.clear(); | |||
2212 | ScheduleStart = nullptr; | |||
2213 | ScheduleEnd = nullptr; | |||
2214 | FirstLoadStoreInRegion = nullptr; | |||
2215 | LastLoadStoreInRegion = nullptr; | |||
2216 | ||||
2217 | // Reduce the maximum schedule region size by the size of the | |||
2218 | // previous scheduling run. | |||
2219 | ScheduleRegionSizeLimit -= ScheduleRegionSize; | |||
2220 | if (ScheduleRegionSizeLimit < MinScheduleRegionSize) | |||
2221 | ScheduleRegionSizeLimit = MinScheduleRegionSize; | |||
2222 | ScheduleRegionSize = 0; | |||
2223 | ||||
2224 | // Make a new scheduling region, i.e. all existing ScheduleData is not | |||
2225 | // in the new region yet. | |||
2226 | ++SchedulingRegionID; | |||
2227 | } | |||
2228 | ||||
2229 | ScheduleData *getScheduleData(Value *V) { | |||
2230 | ScheduleData *SD = ScheduleDataMap[V]; | |||
2231 | if (SD && SD->SchedulingRegionID == SchedulingRegionID) | |||
2232 | return SD; | |||
2233 | return nullptr; | |||
2234 | } | |||
2235 | ||||
2236 | ScheduleData *getScheduleData(Value *V, Value *Key) { | |||
2237 | if (V == Key) | |||
2238 | return getScheduleData(V); | |||
2239 | auto I = ExtraScheduleDataMap.find(V); | |||
2240 | if (I != ExtraScheduleDataMap.end()) { | |||
2241 | ScheduleData *SD = I->second[Key]; | |||
2242 | if (SD && SD->SchedulingRegionID == SchedulingRegionID) | |||
2243 | return SD; | |||
2244 | } | |||
2245 | return nullptr; | |||
2246 | } | |||
2247 | ||||
2248 | bool isInSchedulingRegion(ScheduleData *SD) const { | |||
2249 | return SD->SchedulingRegionID == SchedulingRegionID; | |||
2250 | } | |||
2251 | ||||
2252 | /// Marks an instruction as scheduled and puts all dependent ready | |||
2253 | /// instructions into the ready-list. | |||
2254 | template <typename ReadyListType> | |||
2255 | void schedule(ScheduleData *SD, ReadyListType &ReadyList) { | |||
2256 | SD->IsScheduled = true; | |||
| ||||
2257 | LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: schedule " << *SD << "\n"; } } while (false); | |||
2258 | ||||
2259 | ScheduleData *BundleMember = SD; | |||
2260 | while (BundleMember) { | |||
2261 | if (BundleMember->Inst != BundleMember->OpValue) { | |||
2262 | BundleMember = BundleMember->NextInBundle; | |||
2263 | continue; | |||
2264 | } | |||
2265 | // Handle the def-use chain dependencies. | |||
2266 | ||||
2267 | // Decrement the unscheduled counter and insert to ready list if ready. | |||
2268 | auto &&DecrUnsched = [this, &ReadyList](Instruction *I) { | |||
2269 | doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) { | |||
2270 | if (OpDef && OpDef->hasValidDependencies() && | |||
2271 | OpDef->incrementUnscheduledDeps(-1) == 0) { | |||
2272 | // There are no more unscheduled dependencies after | |||
2273 | // decrementing, so we can put the dependent instruction | |||
2274 | // into the ready list. | |||
2275 | ScheduleData *DepBundle = OpDef->FirstInBundle; | |||
2276 | assert(!DepBundle->IsScheduled &&(static_cast <bool> (!DepBundle->IsScheduled && "already scheduled bundle gets ready") ? void (0) : __assert_fail ("!DepBundle->IsScheduled && \"already scheduled bundle gets ready\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2277, __extension__ __PRETTY_FUNCTION__)) | |||
2277 | "already scheduled bundle gets ready")(static_cast <bool> (!DepBundle->IsScheduled && "already scheduled bundle gets ready") ? void (0) : __assert_fail ("!DepBundle->IsScheduled && \"already scheduled bundle gets ready\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2277, __extension__ __PRETTY_FUNCTION__)); | |||
2278 | ReadyList.insert(DepBundle); | |||
2279 | LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: gets ready (def): " << *DepBundle << "\n"; } } while (false) | |||
2280 | << "SLP: gets ready (def): " << *DepBundle << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: gets ready (def): " << *DepBundle << "\n"; } } while (false); | |||
2281 | } | |||
2282 | }); | |||
2283 | }; | |||
2284 | ||||
2285 | // If BundleMember is a vector bundle, its operands may have been | |||
2286 | // reordered duiring buildTree(). We therefore need to get its operands | |||
2287 | // through the TreeEntry. | |||
2288 | if (TreeEntry *TE = BundleMember->TE) { | |||
2289 | int Lane = BundleMember->Lane; | |||
2290 | assert(Lane >= 0 && "Lane not set")(static_cast <bool> (Lane >= 0 && "Lane not set" ) ? void (0) : __assert_fail ("Lane >= 0 && \"Lane not set\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2290, __extension__ __PRETTY_FUNCTION__)); | |||
2291 | ||||
2292 | // Since vectorization tree is being built recursively this assertion | |||
2293 | // ensures that the tree entry has all operands set before reaching | |||
2294 | // this code. Couple of exceptions known at the moment are extracts | |||
2295 | // where their second (immediate) operand is not added. Since | |||
2296 | // immediates do not affect scheduler behavior this is considered | |||
2297 | // okay. | |||
2298 | auto *In = TE->getMainOp(); | |||
2299 | assert(In &&(static_cast <bool> (In && (isa<ExtractValueInst >(In) || isa<ExtractElementInst>(In) || In->getNumOperands () == TE->getNumOperands()) && "Missed TreeEntry operands?" ) ? void (0) : __assert_fail ("In && (isa<ExtractValueInst>(In) || isa<ExtractElementInst>(In) || In->getNumOperands() == TE->getNumOperands()) && \"Missed TreeEntry operands?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2302, __extension__ __PRETTY_FUNCTION__)) | |||
2300 | (isa<ExtractValueInst>(In) || isa<ExtractElementInst>(In) ||(static_cast <bool> (In && (isa<ExtractValueInst >(In) || isa<ExtractElementInst>(In) || In->getNumOperands () == TE->getNumOperands()) && "Missed TreeEntry operands?" ) ? void (0) : __assert_fail ("In && (isa<ExtractValueInst>(In) || isa<ExtractElementInst>(In) || In->getNumOperands() == TE->getNumOperands()) && \"Missed TreeEntry operands?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2302, __extension__ __PRETTY_FUNCTION__)) | |||
2301 | In->getNumOperands() == TE->getNumOperands()) &&(static_cast <bool> (In && (isa<ExtractValueInst >(In) || isa<ExtractElementInst>(In) || In->getNumOperands () == TE->getNumOperands()) && "Missed TreeEntry operands?" ) ? void (0) : __assert_fail ("In && (isa<ExtractValueInst>(In) || isa<ExtractElementInst>(In) || In->getNumOperands() == TE->getNumOperands()) && \"Missed TreeEntry operands?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2302, __extension__ __PRETTY_FUNCTION__)) | |||
2302 | "Missed TreeEntry operands?")(static_cast <bool> (In && (isa<ExtractValueInst >(In) || isa<ExtractElementInst>(In) || In->getNumOperands () == TE->getNumOperands()) && "Missed TreeEntry operands?" ) ? void (0) : __assert_fail ("In && (isa<ExtractValueInst>(In) || isa<ExtractElementInst>(In) || In->getNumOperands() == TE->getNumOperands()) && \"Missed TreeEntry operands?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2302, __extension__ __PRETTY_FUNCTION__)); | |||
2303 | (void)In; // fake use to avoid build failure when assertions disabled | |||
2304 | ||||
2305 | for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands(); | |||
2306 | OpIdx != NumOperands; ++OpIdx) | |||
2307 | if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane])) | |||
2308 | DecrUnsched(I); | |||
2309 | } else { | |||
2310 | // If BundleMember is a stand-alone instruction, no operand reordering | |||
2311 | // has taken place, so we directly access its operands. | |||
2312 | for (Use &U : BundleMember->Inst->operands()) | |||
2313 | if (auto *I = dyn_cast<Instruction>(U.get())) | |||
2314 | DecrUnsched(I); | |||
2315 | } | |||
2316 | // Handle the memory dependencies. | |||
2317 | for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { | |||
2318 | if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { | |||
2319 | // There are no more unscheduled dependencies after decrementing, | |||
2320 | // so we can put the dependent instruction into the ready list. | |||
2321 | ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; | |||
2322 | assert(!DepBundle->IsScheduled &&(static_cast <bool> (!DepBundle->IsScheduled && "already scheduled bundle gets ready") ? void (0) : __assert_fail ("!DepBundle->IsScheduled && \"already scheduled bundle gets ready\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2323, __extension__ __PRETTY_FUNCTION__)) | |||
2323 | "already scheduled bundle gets ready")(static_cast <bool> (!DepBundle->IsScheduled && "already scheduled bundle gets ready") ? void (0) : __assert_fail ("!DepBundle->IsScheduled && \"already scheduled bundle gets ready\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2323, __extension__ __PRETTY_FUNCTION__)); | |||
2324 | ReadyList.insert(DepBundle); | |||
2325 | LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: gets ready (mem): " << *DepBundle << "\n"; } } while (false) | |||
2326 | << "SLP: gets ready (mem): " << *DepBundle << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: gets ready (mem): " << *DepBundle << "\n"; } } while (false); | |||
2327 | } | |||
2328 | } | |||
2329 | BundleMember = BundleMember->NextInBundle; | |||
2330 | } | |||
2331 | } | |||
2332 | ||||
2333 | void doForAllOpcodes(Value *V, | |||
2334 | function_ref<void(ScheduleData *SD)> Action) { | |||
2335 | if (ScheduleData *SD = getScheduleData(V)) | |||
2336 | Action(SD); | |||
2337 | auto I = ExtraScheduleDataMap.find(V); | |||
2338 | if (I != ExtraScheduleDataMap.end()) | |||
2339 | for (auto &P : I->second) | |||
2340 | if (P.second->SchedulingRegionID == SchedulingRegionID) | |||
2341 | Action(P.second); | |||
2342 | } | |||
2343 | ||||
2344 | /// Put all instructions into the ReadyList which are ready for scheduling. | |||
2345 | template <typename ReadyListType> | |||
2346 | void initialFillReadyList(ReadyListType &ReadyList) { | |||
2347 | for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { | |||
2348 | doForAllOpcodes(I, [&](ScheduleData *SD) { | |||
2349 | if (SD->isSchedulingEntity() && SD->isReady()) { | |||
2350 | ReadyList.insert(SD); | |||
2351 | LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: initially in ready list: " << *I << "\n"; } } while (false) | |||
2352 | << "SLP: initially in ready list: " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: initially in ready list: " << *I << "\n"; } } while (false); | |||
2353 | } | |||
2354 | }); | |||
2355 | } | |||
2356 | } | |||
2357 | ||||
2358 | /// Checks if a bundle of instructions can be scheduled, i.e. has no | |||
2359 | /// cyclic dependencies. This is only a dry-run, no instructions are | |||
2360 | /// actually moved at this stage. | |||
2361 | /// \returns the scheduling bundle. The returned Optional value is non-None | |||
2362 | /// if \p VL is allowed to be scheduled. | |||
2363 | Optional<ScheduleData *> | |||
2364 | tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, | |||
2365 | const InstructionsState &S); | |||
2366 | ||||
2367 | /// Un-bundles a group of instructions. | |||
2368 | void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue); | |||
2369 | ||||
2370 | /// Allocates schedule data chunk. | |||
2371 | ScheduleData *allocateScheduleDataChunks(); | |||
2372 | ||||
2373 | /// Extends the scheduling region so that V is inside the region. | |||
2374 | /// \returns true if the region size is within the limit. | |||
2375 | bool extendSchedulingRegion(Value *V, const InstructionsState &S); | |||
2376 | ||||
2377 | /// Initialize the ScheduleData structures for new instructions in the | |||
2378 | /// scheduling region. | |||
2379 | void initScheduleData(Instruction *FromI, Instruction *ToI, | |||
2380 | ScheduleData *PrevLoadStore, | |||
2381 | ScheduleData *NextLoadStore); | |||
2382 | ||||
2383 | /// Updates the dependency information of a bundle and of all instructions/ | |||
2384 | /// bundles which depend on the original bundle. | |||
2385 | void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, | |||
2386 | BoUpSLP *SLP); | |||
2387 | ||||
2388 | /// Sets all instruction in the scheduling region to un-scheduled. | |||
2389 | void resetSchedule(); | |||
2390 | ||||
2391 | BasicBlock *BB; | |||
2392 | ||||
2393 | /// Simple memory allocation for ScheduleData. | |||
2394 | std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; | |||
2395 | ||||
2396 | /// The size of a ScheduleData array in ScheduleDataChunks. | |||
2397 | int ChunkSize; | |||
2398 | ||||
2399 | /// The allocator position in the current chunk, which is the last entry | |||
2400 | /// of ScheduleDataChunks. | |||
2401 | int ChunkPos; | |||
2402 | ||||
2403 | /// Attaches ScheduleData to Instruction. | |||
2404 | /// Note that the mapping survives during all vectorization iterations, i.e. | |||
2405 | /// ScheduleData structures are recycled. | |||
2406 | DenseMap<Value *, ScheduleData *> ScheduleDataMap; | |||
2407 | ||||
2408 | /// Attaches ScheduleData to Instruction with the leading key. | |||
2409 | DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>> | |||
2410 | ExtraScheduleDataMap; | |||
2411 | ||||
2412 | struct ReadyList : SmallVector<ScheduleData *, 8> { | |||
2413 | void insert(ScheduleData *SD) { push_back(SD); } | |||
2414 | }; | |||
2415 | ||||
2416 | /// The ready-list for scheduling (only used for the dry-run). | |||
2417 | ReadyList ReadyInsts; | |||
2418 | ||||
2419 | /// The first instruction of the scheduling region. | |||
2420 | Instruction *ScheduleStart = nullptr; | |||
2421 | ||||
2422 | /// The first instruction _after_ the scheduling region. | |||
2423 | Instruction *ScheduleEnd = nullptr; | |||
2424 | ||||
2425 | /// The first memory accessing instruction in the scheduling region | |||
2426 | /// (can be null). | |||
2427 | ScheduleData *FirstLoadStoreInRegion = nullptr; | |||
2428 | ||||
2429 | /// The last memory accessing instruction in the scheduling region | |||
2430 | /// (can be null). | |||
2431 | ScheduleData *LastLoadStoreInRegion = nullptr; | |||
2432 | ||||
2433 | /// The current size of the scheduling region. | |||
2434 | int ScheduleRegionSize = 0; | |||
2435 | ||||
2436 | /// The maximum size allowed for the scheduling region. | |||
2437 | int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget; | |||
2438 | ||||
2439 | /// The ID of the scheduling region. For a new vectorization iteration this | |||
2440 | /// is incremented which "removes" all ScheduleData from the region. | |||
2441 | // Make sure that the initial SchedulingRegionID is greater than the | |||
2442 | // initial SchedulingRegionID in ScheduleData (which is 0). | |||
2443 | int SchedulingRegionID = 1; | |||
2444 | }; | |||
2445 | ||||
2446 | /// Attaches the BlockScheduling structures to basic blocks. | |||
2447 | MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; | |||
2448 | ||||
2449 | /// Performs the "real" scheduling. Done before vectorization is actually | |||
2450 | /// performed in a basic block. | |||
2451 | void scheduleBlock(BlockScheduling *BS); | |||
2452 | ||||
2453 | /// List of users to ignore during scheduling and that don't need extracting. | |||
2454 | ArrayRef<Value *> UserIgnoreList; | |||
2455 | ||||
2456 | /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of | |||
2457 | /// sorted SmallVectors of unsigned. | |||
2458 | struct OrdersTypeDenseMapInfo { | |||
2459 | static OrdersType getEmptyKey() { | |||
2460 | OrdersType V; | |||
2461 | V.push_back(~1U); | |||
2462 | return V; | |||
2463 | } | |||
2464 | ||||
2465 | static OrdersType getTombstoneKey() { | |||
2466 | OrdersType V; | |||
2467 | V.push_back(~2U); | |||
2468 | return V; | |||
2469 | } | |||
2470 | ||||
2471 | static unsigned getHashValue(const OrdersType &V) { | |||
2472 | return static_cast<unsigned>(hash_combine_range(V.begin(), V.end())); | |||
2473 | } | |||
2474 | ||||
2475 | static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) { | |||
2476 | return LHS == RHS; | |||
2477 | } | |||
2478 | }; | |||
2479 | ||||
2480 | // Analysis and block reference. | |||
2481 | Function *F; | |||
2482 | ScalarEvolution *SE; | |||
2483 | TargetTransformInfo *TTI; | |||
2484 | TargetLibraryInfo *TLI; | |||
2485 | AAResults *AA; | |||
2486 | LoopInfo *LI; | |||
2487 | DominatorTree *DT; | |||
2488 | AssumptionCache *AC; | |||
2489 | DemandedBits *DB; | |||
2490 | const DataLayout *DL; | |||
2491 | OptimizationRemarkEmitter *ORE; | |||
2492 | ||||
2493 | unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. | |||
2494 | unsigned MinVecRegSize; // Set by cl::opt (default: 128). | |||
2495 | ||||
2496 | /// Instruction builder to construct the vectorized tree. | |||
2497 | IRBuilder<> Builder; | |||
2498 | ||||
2499 | /// A map of scalar integer values to the smallest bit width with which they | |||
2500 | /// can legally be represented. The values map to (width, signed) pairs, | |||
2501 | /// where "width" indicates the minimum bit width and "signed" is True if the | |||
2502 | /// value must be signed-extended, rather than zero-extended, back to its | |||
2503 | /// original width. | |||
2504 | MapVector<Value *, std::pair<uint64_t, bool>> MinBWs; | |||
2505 | }; | |||
2506 | ||||
2507 | } // end namespace slpvectorizer | |||
2508 | ||||
2509 | template <> struct GraphTraits<BoUpSLP *> { | |||
2510 | using TreeEntry = BoUpSLP::TreeEntry; | |||
2511 | ||||
2512 | /// NodeRef has to be a pointer per the GraphWriter. | |||
2513 | using NodeRef = TreeEntry *; | |||
2514 | ||||
2515 | using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy; | |||
2516 | ||||
2517 | /// Add the VectorizableTree to the index iterator to be able to return | |||
2518 | /// TreeEntry pointers. | |||
2519 | struct ChildIteratorType | |||
2520 | : public iterator_adaptor_base< | |||
2521 | ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> { | |||
2522 | ContainerTy &VectorizableTree; | |||
2523 | ||||
2524 | ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W, | |||
2525 | ContainerTy &VT) | |||
2526 | : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} | |||
2527 | ||||
2528 | NodeRef operator*() { return I->UserTE; } | |||
2529 | }; | |||
2530 | ||||
2531 | static NodeRef getEntryNode(BoUpSLP &R) { | |||
2532 | return R.VectorizableTree[0].get(); | |||
2533 | } | |||
2534 | ||||
2535 | static ChildIteratorType child_begin(NodeRef N) { | |||
2536 | return {N->UserTreeIndices.begin(), N->Container}; | |||
2537 | } | |||
2538 | ||||
2539 | static ChildIteratorType child_end(NodeRef N) { | |||
2540 | return {N->UserTreeIndices.end(), N->Container}; | |||
2541 | } | |||
2542 | ||||
2543 | /// For the node iterator we just need to turn the TreeEntry iterator into a | |||
2544 | /// TreeEntry* iterator so that it dereferences to NodeRef. | |||
2545 | class nodes_iterator { | |||
2546 | using ItTy = ContainerTy::iterator; | |||
2547 | ItTy It; | |||
2548 | ||||
2549 | public: | |||
2550 | nodes_iterator(const ItTy &It2) : It(It2) {} | |||
2551 | NodeRef operator*() { return It->get(); } | |||
2552 | nodes_iterator operator++() { | |||
2553 | ++It; | |||
2554 | return *this; | |||
2555 | } | |||
2556 | bool operator!=(const nodes_iterator &N2) const { return N2.It != It; } | |||
2557 | }; | |||
2558 | ||||
2559 | static nodes_iterator nodes_begin(BoUpSLP *R) { | |||
2560 | return nodes_iterator(R->VectorizableTree.begin()); | |||
2561 | } | |||
2562 | ||||
2563 | static nodes_iterator nodes_end(BoUpSLP *R) { | |||
2564 | return nodes_iterator(R->VectorizableTree.end()); | |||
2565 | } | |||
2566 | ||||
2567 | static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } | |||
2568 | }; | |||
2569 | ||||
2570 | template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { | |||
2571 | using TreeEntry = BoUpSLP::TreeEntry; | |||
2572 | ||||
2573 | DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} | |||
2574 | ||||
2575 | std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { | |||
2576 | std::string Str; | |||
2577 | raw_string_ostream OS(Str); | |||
2578 | if (isSplat(Entry->Scalars)) { | |||
2579 | OS << "<splat> " << *Entry->Scalars[0]; | |||
2580 | return Str; | |||
2581 | } | |||
2582 | for (auto V : Entry->Scalars) { | |||
2583 | OS << *V; | |||
2584 | if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) { | |||
2585 | return EU.Scalar == V; | |||
2586 | })) | |||
2587 | OS << " <extract>"; | |||
2588 | OS << "\n"; | |||
2589 | } | |||
2590 | return Str; | |||
2591 | } | |||
2592 | ||||
2593 | static std::string getNodeAttributes(const TreeEntry *Entry, | |||
2594 | const BoUpSLP *) { | |||
2595 | if (Entry->State == TreeEntry::NeedToGather) | |||
2596 | return "color=red"; | |||
2597 | return ""; | |||
2598 | } | |||
2599 | }; | |||
2600 | ||||
2601 | } // end namespace llvm | |||
2602 | ||||
2603 | BoUpSLP::~BoUpSLP() { | |||
2604 | for (const auto &Pair : DeletedInstructions) { | |||
2605 | // Replace operands of ignored instructions with Undefs in case if they were | |||
2606 | // marked for deletion. | |||
2607 | if (Pair.getSecond()) { | |||
2608 | Value *Undef = UndefValue::get(Pair.getFirst()->getType()); | |||
2609 | Pair.getFirst()->replaceAllUsesWith(Undef); | |||
2610 | } | |||
2611 | Pair.getFirst()->dropAllReferences(); | |||
2612 | } | |||
2613 | for (const auto &Pair : DeletedInstructions) { | |||
2614 | assert(Pair.getFirst()->use_empty() &&(static_cast <bool> (Pair.getFirst()->use_empty() && "trying to erase instruction with users.") ? void (0) : __assert_fail ("Pair.getFirst()->use_empty() && \"trying to erase instruction with users.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2615, __extension__ __PRETTY_FUNCTION__)) | |||
2615 | "trying to erase instruction with users.")(static_cast <bool> (Pair.getFirst()->use_empty() && "trying to erase instruction with users.") ? void (0) : __assert_fail ("Pair.getFirst()->use_empty() && \"trying to erase instruction with users.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2615, __extension__ __PRETTY_FUNCTION__)); | |||
2616 | Pair.getFirst()->eraseFromParent(); | |||
2617 | } | |||
2618 | #ifdef EXPENSIVE_CHECKS | |||
2619 | // If we could guarantee that this call is not extremely slow, we could | |||
2620 | // remove the ifdef limitation (see PR47712). | |||
2621 | assert(!verifyFunction(*F, &dbgs()))(static_cast <bool> (!verifyFunction(*F, &dbgs())) ? void (0) : __assert_fail ("!verifyFunction(*F, &dbgs())" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2621, __extension__ __PRETTY_FUNCTION__)); | |||
2622 | #endif | |||
2623 | } | |||
2624 | ||||
2625 | void BoUpSLP::eraseInstructions(ArrayRef<Value *> AV) { | |||
2626 | for (auto *V : AV) { | |||
2627 | if (auto *I = dyn_cast<Instruction>(V)) | |||
2628 | eraseInstruction(I, /*ReplaceOpsWithUndef=*/true); | |||
2629 | }; | |||
2630 | } | |||
2631 | ||||
2632 | /// Reorders the given \p Reuses mask according to the given \p Mask. \p Reuses | |||
2633 | /// contains original mask for the scalars reused in the node. Procedure | |||
2634 | /// transform this mask in accordance with the given \p Mask. | |||
2635 | static void reorderReuses(SmallVectorImpl<int> &Reuses, ArrayRef<int> Mask) { | |||
2636 | assert(!Mask.empty() && Reuses.size() == Mask.size() &&(static_cast <bool> (!Mask.empty() && Reuses.size () == Mask.size() && "Expected non-empty mask.") ? void (0) : __assert_fail ("!Mask.empty() && Reuses.size() == Mask.size() && \"Expected non-empty mask.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2637, __extension__ __PRETTY_FUNCTION__)) | |||
2637 | "Expected non-empty mask.")(static_cast <bool> (!Mask.empty() && Reuses.size () == Mask.size() && "Expected non-empty mask.") ? void (0) : __assert_fail ("!Mask.empty() && Reuses.size() == Mask.size() && \"Expected non-empty mask.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2637, __extension__ __PRETTY_FUNCTION__)); | |||
2638 | SmallVector<int> Prev(Reuses.begin(), Reuses.end()); | |||
2639 | Prev.swap(Reuses); | |||
2640 | for (unsigned I = 0, E = Prev.size(); I < E; ++I) | |||
2641 | if (Mask[I] != UndefMaskElem) | |||
2642 | Reuses[Mask[I]] = Prev[I]; | |||
2643 | } | |||
2644 | ||||
2645 | /// Reorders the given \p Order according to the given \p Mask. \p Order - is | |||
2646 | /// the original order of the scalars. Procedure transforms the provided order | |||
2647 | /// in accordance with the given \p Mask. If the resulting \p Order is just an | |||
2648 | /// identity order, \p Order is cleared. | |||
2649 | static void reorderOrder(SmallVectorImpl<unsigned> &Order, ArrayRef<int> Mask) { | |||
2650 | assert(!Mask.empty() && "Expected non-empty mask.")(static_cast <bool> (!Mask.empty() && "Expected non-empty mask." ) ? void (0) : __assert_fail ("!Mask.empty() && \"Expected non-empty mask.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2650, __extension__ __PRETTY_FUNCTION__)); | |||
2651 | SmallVector<int> MaskOrder; | |||
2652 | if (Order.empty()) { | |||
2653 | MaskOrder.resize(Mask.size()); | |||
2654 | std::iota(MaskOrder.begin(), MaskOrder.end(), 0); | |||
2655 | } else { | |||
2656 | inversePermutation(Order, MaskOrder); | |||
2657 | } | |||
2658 | reorderReuses(MaskOrder, Mask); | |||
2659 | if (ShuffleVectorInst::isIdentityMask(MaskOrder)) { | |||
2660 | Order.clear(); | |||
2661 | return; | |||
2662 | } | |||
2663 | Order.assign(Mask.size(), Mask.size()); | |||
2664 | for (unsigned I = 0, E = Mask.size(); I < E; ++I) | |||
2665 | if (MaskOrder[I] != UndefMaskElem) | |||
2666 | Order[MaskOrder[I]] = I; | |||
2667 | fixupOrderingIndices(Order); | |||
2668 | } | |||
2669 | ||||
2670 | void BoUpSLP::reorderTopToBottom() { | |||
2671 | // Maps VF to the graph nodes. | |||
2672 | DenseMap<unsigned, SmallPtrSet<TreeEntry *, 4>> VFToOrderedEntries; | |||
2673 | // ExtractElement gather nodes which can be vectorized and need to handle | |||
2674 | // their ordering. | |||
2675 | DenseMap<const TreeEntry *, OrdersType> GathersToOrders; | |||
2676 | // Find all reorderable nodes with the given VF. | |||
2677 | // Currently the are vectorized loads,extracts + some gathering of extracts. | |||
2678 | for_each(VectorizableTree, [this, &VFToOrderedEntries, &GathersToOrders]( | |||
2679 | const std::unique_ptr<TreeEntry> &TE) { | |||
2680 | // No need to reorder if need to shuffle reuses, still need to shuffle the | |||
2681 | // node. | |||
2682 | if (!TE->ReuseShuffleIndices.empty()) | |||
2683 | return; | |||
2684 | if (TE->State == TreeEntry::Vectorize && | |||
2685 | isa<LoadInst, ExtractElementInst, ExtractValueInst, StoreInst, | |||
2686 | InsertElementInst>(TE->getMainOp()) && | |||
2687 | !TE->isAltShuffle()) { | |||
2688 | VFToOrderedEntries[TE->Scalars.size()].insert(TE.get()); | |||
2689 | } else if (TE->State == TreeEntry::NeedToGather && | |||
2690 | TE->getOpcode() == Instruction::ExtractElement && | |||
2691 | !TE->isAltShuffle() && | |||
2692 | isa<FixedVectorType>(cast<ExtractElementInst>(TE->getMainOp()) | |||
2693 | ->getVectorOperandType()) && | |||
2694 | allSameType(TE->Scalars) && allSameBlock(TE->Scalars)) { | |||
2695 | // Check that gather of extractelements can be represented as | |||
2696 | // just a shuffle of a single vector. | |||
2697 | OrdersType CurrentOrder; | |||
2698 | bool Reuse = canReuseExtract(TE->Scalars, TE->getMainOp(), CurrentOrder); | |||
2699 | if (Reuse || !CurrentOrder.empty()) { | |||
2700 | VFToOrderedEntries[TE->Scalars.size()].insert(TE.get()); | |||
2701 | GathersToOrders.try_emplace(TE.get(), CurrentOrder); | |||
2702 | } | |||
2703 | } | |||
2704 | }); | |||
2705 | ||||
2706 | // Reorder the graph nodes according to their vectorization factor. | |||
2707 | for (unsigned VF = VectorizableTree.front()->Scalars.size(); VF > 1; | |||
2708 | VF /= 2) { | |||
2709 | auto It = VFToOrderedEntries.find(VF); | |||
2710 | if (It == VFToOrderedEntries.end()) | |||
2711 | continue; | |||
2712 | // Try to find the most profitable order. We just are looking for the most | |||
2713 | // used order and reorder scalar elements in the nodes according to this | |||
2714 | // mostly used order. | |||
2715 | const SmallPtrSetImpl<TreeEntry *> &OrderedEntries = It->getSecond(); | |||
2716 | // All operands are reordered and used only in this node - propagate the | |||
2717 | // most used order to the user node. | |||
2718 | DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo> OrdersUses; | |||
2719 | SmallPtrSet<const TreeEntry *, 4> VisitedOps; | |||
2720 | for (const TreeEntry *OpTE : OrderedEntries) { | |||
2721 | // No need to reorder this nodes, still need to extend and to use shuffle, | |||
2722 | // just need to merge reordering shuffle and the reuse shuffle. | |||
2723 | if (!OpTE->ReuseShuffleIndices.empty()) | |||
2724 | continue; | |||
2725 | // Count number of orders uses. | |||
2726 | const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & { | |||
2727 | if (OpTE->State == TreeEntry::NeedToGather) | |||
2728 | return GathersToOrders.find(OpTE)->second; | |||
2729 | return OpTE->ReorderIndices; | |||
2730 | }(); | |||
2731 | // Stores actually store the mask, not the order, need to invert. | |||
2732 | if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && | |||
2733 | OpTE->getOpcode() == Instruction::Store && !Order.empty()) { | |||
2734 | SmallVector<int> Mask; | |||
2735 | inversePermutation(Order, Mask); | |||
2736 | unsigned E = Order.size(); | |||
2737 | OrdersType CurrentOrder(E, E); | |||
2738 | transform(Mask, CurrentOrder.begin(), [E](int Idx) { | |||
2739 | return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx); | |||
2740 | }); | |||
2741 | fixupOrderingIndices(CurrentOrder); | |||
2742 | ++OrdersUses.try_emplace(CurrentOrder).first->getSecond(); | |||
2743 | } else { | |||
2744 | ++OrdersUses.try_emplace(Order).first->getSecond(); | |||
2745 | } | |||
2746 | } | |||
2747 | // Set order of the user node. | |||
2748 | if (OrdersUses.empty()) | |||
2749 | continue; | |||
2750 | // Choose the most used order. | |||
2751 | ArrayRef<unsigned> BestOrder = OrdersUses.begin()->first; | |||
2752 | unsigned Cnt = OrdersUses.begin()->second; | |||
2753 | for (const auto &Pair : llvm::drop_begin(OrdersUses)) { | |||
2754 | if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { | |||
2755 | BestOrder = Pair.first; | |||
2756 | Cnt = Pair.second; | |||
2757 | } | |||
2758 | } | |||
2759 | // Set order of the user node. | |||
2760 | if (BestOrder.empty()) | |||
2761 | continue; | |||
2762 | SmallVector<int> Mask; | |||
2763 | inversePermutation(BestOrder, Mask); | |||
2764 | SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem); | |||
2765 | unsigned E = BestOrder.size(); | |||
2766 | transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { | |||
2767 | return I < E ? static_cast<int>(I) : UndefMaskElem; | |||
2768 | }); | |||
2769 | // Do an actual reordering, if profitable. | |||
2770 | for (std::unique_ptr<TreeEntry> &TE : VectorizableTree) { | |||
2771 | // Just do the reordering for the nodes with the given VF. | |||
2772 | if (TE->Scalars.size() != VF) { | |||
2773 | if (TE->ReuseShuffleIndices.size() == VF) { | |||
2774 | // Need to reorder the reuses masks of the operands with smaller VF to | |||
2775 | // be able to find the match between the graph nodes and scalar | |||
2776 | // operands of the given node during vectorization/cost estimation. | |||
2777 | assert(all_of(TE->UserTreeIndices,(static_cast <bool> (all_of(TE->UserTreeIndices, [VF , &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars .size() == VF || EI.UserTE->Scalars.size() == TE->Scalars .size(); }) && "All users must be of VF size.") ? void (0) : __assert_fail ("all_of(TE->UserTreeIndices, [VF, &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars.size() == VF || EI.UserTE->Scalars.size() == TE->Scalars.size(); }) && \"All users must be of VF size.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2783, __extension__ __PRETTY_FUNCTION__)) | |||
2778 | [VF, &TE](const EdgeInfo &EI) {(static_cast <bool> (all_of(TE->UserTreeIndices, [VF , &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars .size() == VF || EI.UserTE->Scalars.size() == TE->Scalars .size(); }) && "All users must be of VF size.") ? void (0) : __assert_fail ("all_of(TE->UserTreeIndices, [VF, &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars.size() == VF || EI.UserTE->Scalars.size() == TE->Scalars.size(); }) && \"All users must be of VF size.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2783, __extension__ __PRETTY_FUNCTION__)) | |||
2779 | return EI.UserTE->Scalars.size() == VF ||(static_cast <bool> (all_of(TE->UserTreeIndices, [VF , &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars .size() == VF || EI.UserTE->Scalars.size() == TE->Scalars .size(); }) && "All users must be of VF size.") ? void (0) : __assert_fail ("all_of(TE->UserTreeIndices, [VF, &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars.size() == VF || EI.UserTE->Scalars.size() == TE->Scalars.size(); }) && \"All users must be of VF size.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2783, __extension__ __PRETTY_FUNCTION__)) | |||
2780 | EI.UserTE->Scalars.size() ==(static_cast <bool> (all_of(TE->UserTreeIndices, [VF , &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars .size() == VF || EI.UserTE->Scalars.size() == TE->Scalars .size(); }) && "All users must be of VF size.") ? void (0) : __assert_fail ("all_of(TE->UserTreeIndices, [VF, &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars.size() == VF || EI.UserTE->Scalars.size() == TE->Scalars.size(); }) && \"All users must be of VF size.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2783, __extension__ __PRETTY_FUNCTION__)) | |||
2781 | TE->Scalars.size();(static_cast <bool> (all_of(TE->UserTreeIndices, [VF , &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars .size() == VF || EI.UserTE->Scalars.size() == TE->Scalars .size(); }) && "All users must be of VF size.") ? void (0) : __assert_fail ("all_of(TE->UserTreeIndices, [VF, &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars.size() == VF || EI.UserTE->Scalars.size() == TE->Scalars.size(); }) && \"All users must be of VF size.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2783, __extension__ __PRETTY_FUNCTION__)) | |||
2782 | }) &&(static_cast <bool> (all_of(TE->UserTreeIndices, [VF , &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars .size() == VF || EI.UserTE->Scalars.size() == TE->Scalars .size(); }) && "All users must be of VF size.") ? void (0) : __assert_fail ("all_of(TE->UserTreeIndices, [VF, &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars.size() == VF || EI.UserTE->Scalars.size() == TE->Scalars.size(); }) && \"All users must be of VF size.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2783, __extension__ __PRETTY_FUNCTION__)) | |||
2783 | "All users must be of VF size.")(static_cast <bool> (all_of(TE->UserTreeIndices, [VF , &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars .size() == VF || EI.UserTE->Scalars.size() == TE->Scalars .size(); }) && "All users must be of VF size.") ? void (0) : __assert_fail ("all_of(TE->UserTreeIndices, [VF, &TE](const EdgeInfo &EI) { return EI.UserTE->Scalars.size() == VF || EI.UserTE->Scalars.size() == TE->Scalars.size(); }) && \"All users must be of VF size.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2783, __extension__ __PRETTY_FUNCTION__)); | |||
2784 | // Update ordering of the operands with the smaller VF than the given | |||
2785 | // one. | |||
2786 | reorderReuses(TE->ReuseShuffleIndices, Mask); | |||
2787 | } | |||
2788 | continue; | |||
2789 | } | |||
2790 | if (TE->State == TreeEntry::Vectorize && | |||
2791 | isa<ExtractElementInst, ExtractValueInst, LoadInst, StoreInst, | |||
2792 | InsertElementInst>(TE->getMainOp()) && | |||
2793 | !TE->isAltShuffle()) { | |||
2794 | // Build correct orders for extract{element,value}, loads and | |||
2795 | // stores. | |||
2796 | reorderOrder(TE->ReorderIndices, Mask); | |||
2797 | if (isa<InsertElementInst, StoreInst>(TE->getMainOp())) | |||
2798 | TE->reorderOperands(Mask); | |||
2799 | } else { | |||
2800 | // Reorder the node and its operands. | |||
2801 | TE->reorderOperands(Mask); | |||
2802 | assert(TE->ReorderIndices.empty() &&(static_cast <bool> (TE->ReorderIndices.empty() && "Expected empty reorder sequence.") ? void (0) : __assert_fail ("TE->ReorderIndices.empty() && \"Expected empty reorder sequence.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2803, __extension__ __PRETTY_FUNCTION__)) | |||
2803 | "Expected empty reorder sequence.")(static_cast <bool> (TE->ReorderIndices.empty() && "Expected empty reorder sequence.") ? void (0) : __assert_fail ("TE->ReorderIndices.empty() && \"Expected empty reorder sequence.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2803, __extension__ __PRETTY_FUNCTION__)); | |||
2804 | reorderScalars(TE->Scalars, Mask); | |||
2805 | } | |||
2806 | if (!TE->ReuseShuffleIndices.empty()) { | |||
2807 | // Apply reversed order to keep the original ordering of the reused | |||
2808 | // elements to avoid extra reorder indices shuffling. | |||
2809 | OrdersType CurrentOrder; | |||
2810 | reorderOrder(CurrentOrder, MaskOrder); | |||
2811 | SmallVector<int> NewReuses; | |||
2812 | inversePermutation(CurrentOrder, NewReuses); | |||
2813 | addMask(NewReuses, TE->ReuseShuffleIndices); | |||
2814 | TE->ReuseShuffleIndices.swap(NewReuses); | |||
2815 | } | |||
2816 | } | |||
2817 | } | |||
2818 | } | |||
2819 | ||||
2820 | void BoUpSLP::reorderBottomToTop() { | |||
2821 | SetVector<TreeEntry *> OrderedEntries; | |||
2822 | DenseMap<const TreeEntry *, OrdersType> GathersToOrders; | |||
2823 | // Find all reorderable leaf nodes with the given VF. | |||
2824 | // Currently the are vectorized loads,extracts without alternate operands + | |||
2825 | // some gathering of extracts. | |||
2826 | SmallVector<TreeEntry *> NonVectorized; | |||
2827 | for_each(VectorizableTree, [this, &OrderedEntries, &GathersToOrders, | |||
2828 | &NonVectorized]( | |||
2829 | const std::unique_ptr<TreeEntry> &TE) { | |||
2830 | // No need to reorder if need to shuffle reuses, still need to shuffle the | |||
2831 | // node. | |||
2832 | if (!TE->ReuseShuffleIndices.empty()) | |||
2833 | return; | |||
2834 | if (TE->State == TreeEntry::Vectorize && | |||
2835 | isa<LoadInst, ExtractElementInst, ExtractValueInst>(TE->getMainOp()) && | |||
2836 | !TE->isAltShuffle()) { | |||
2837 | OrderedEntries.insert(TE.get()); | |||
2838 | } else if (TE->State == TreeEntry::NeedToGather && | |||
2839 | TE->getOpcode() == Instruction::ExtractElement && | |||
2840 | !TE->isAltShuffle() && | |||
2841 | isa<FixedVectorType>(cast<ExtractElementInst>(TE->getMainOp()) | |||
2842 | ->getVectorOperandType()) && | |||
2843 | allSameType(TE->Scalars) && allSameBlock(TE->Scalars)) { | |||
2844 | // Check that gather of extractelements can be represented as | |||
2845 | // just a shuffle of a single vector with a single user only. | |||
2846 | OrdersType CurrentOrder; | |||
2847 | bool Reuse = canReuseExtract(TE->Scalars, TE->getMainOp(), CurrentOrder); | |||
2848 | if ((Reuse || !CurrentOrder.empty()) && | |||
2849 | !any_of( | |||
2850 | VectorizableTree, [&TE](const std::unique_ptr<TreeEntry> &Entry) { | |||
2851 | return Entry->State == TreeEntry::NeedToGather && | |||
2852 | Entry.get() != TE.get() && Entry->isSame(TE->Scalars); | |||
2853 | })) { | |||
2854 | OrderedEntries.insert(TE.get()); | |||
2855 | GathersToOrders.try_emplace(TE.get(), CurrentOrder); | |||
2856 | } | |||
2857 | } | |||
2858 | if (TE->State != TreeEntry::Vectorize) | |||
2859 | NonVectorized.push_back(TE.get()); | |||
2860 | }); | |||
2861 | ||||
2862 | // Checks if the operands of the users are reordarable and have only single | |||
2863 | // use. | |||
2864 | auto &&CheckOperands = | |||
2865 | [this, &NonVectorized](const auto &Data, | |||
2866 | SmallVectorImpl<TreeEntry *> &GatherOps) { | |||
2867 | for (unsigned I = 0, E = Data.first->getNumOperands(); I < E; ++I) { | |||
2868 | if (any_of(Data.second, | |||
2869 | [I](const std::pair<unsigned, TreeEntry *> &OpData) { | |||
2870 | return OpData.first == I && | |||
2871 | OpData.second->State == TreeEntry::Vectorize; | |||
2872 | })) | |||
2873 | continue; | |||
2874 | ArrayRef<Value *> VL = Data.first->getOperand(I); | |||
2875 | const TreeEntry *TE = nullptr; | |||
2876 | const auto *It = find_if(VL, [this, &TE](Value *V) { | |||
2877 | TE = getTreeEntry(V); | |||
2878 | return TE; | |||
2879 | }); | |||
2880 | if (It != VL.end() && TE->isSame(VL)) | |||
2881 | return false; | |||
2882 | TreeEntry *Gather = nullptr; | |||
2883 | if (count_if(NonVectorized, [VL, &Gather](TreeEntry *TE) { | |||
2884 | assert(TE->State != TreeEntry::Vectorize &&(static_cast <bool> (TE->State != TreeEntry::Vectorize && "Only non-vectorized nodes are expected.") ? void (0) : __assert_fail ("TE->State != TreeEntry::Vectorize && \"Only non-vectorized nodes are expected.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2885, __extension__ __PRETTY_FUNCTION__)) | |||
2885 | "Only non-vectorized nodes are expected.")(static_cast <bool> (TE->State != TreeEntry::Vectorize && "Only non-vectorized nodes are expected.") ? void (0) : __assert_fail ("TE->State != TreeEntry::Vectorize && \"Only non-vectorized nodes are expected.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 2885, __extension__ __PRETTY_FUNCTION__)); | |||
2886 | if (TE->isSame(VL)) { | |||
2887 | Gather = TE; | |||
2888 | return true; | |||
2889 | } | |||
2890 | return false; | |||
2891 | }) > 1) | |||
2892 | return false; | |||
2893 | if (Gather) | |||
2894 | GatherOps.push_back(Gather); | |||
2895 | } | |||
2896 | return true; | |||
2897 | }; | |||
2898 | // 1. Propagate order to the graph nodes, which use only reordered nodes. | |||
2899 | // I.e., if the node has operands, that are reordered, try to make at least | |||
2900 | // one operand order in the natural order and reorder others + reorder the | |||
2901 | // user node itself. | |||
2902 | SmallPtrSet<const TreeEntry *, 4> Visited; | |||
2903 | while (!OrderedEntries.empty()) { | |||
2904 | // 1. Filter out only reordered nodes. | |||
2905 | // 2. If the entry has multiple uses - skip it and jump to the next node. | |||
2906 | MapVector<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>> Users; | |||
2907 | SmallVector<TreeEntry *> Filtered; | |||
2908 | for (TreeEntry *TE : OrderedEntries) { | |||
2909 | if (!(TE->State == TreeEntry::Vectorize || | |||
2910 | (TE->State == TreeEntry::NeedToGather && | |||
2911 | TE->getOpcode() == Instruction::ExtractElement)) || | |||
2912 | TE->UserTreeIndices.empty() || !TE->ReuseShuffleIndices.empty() || | |||
2913 | !all_of(drop_begin(TE->UserTreeIndices), | |||
2914 | [TE](const EdgeInfo &EI) { | |||
2915 | return EI.UserTE == TE->UserTreeIndices.front().UserTE; | |||
2916 | }) || | |||
2917 | !Visited.insert(TE).second) { | |||
2918 | Filtered.push_back(TE); | |||
2919 | continue; | |||
2920 | } | |||
2921 | // Build a map between user nodes and their operands order to speedup | |||
2922 | // search. The graph currently does not provide this dependency directly. | |||
2923 | for (EdgeInfo &EI : TE->UserTreeIndices) { | |||
2924 | TreeEntry *UserTE = EI.UserTE; | |||
2925 | auto It = Users.find(UserTE); | |||
2926 | if (It == Users.end()) | |||
2927 | It = Users.insert({UserTE, {}}).first; | |||
2928 | It->second.emplace_back(EI.EdgeIdx, TE); | |||
2929 | } | |||
2930 | } | |||
2931 | // Erase filtered entries. | |||
2932 | for_each(Filtered, | |||
2933 | [&OrderedEntries](TreeEntry *TE) { OrderedEntries.remove(TE); }); | |||
2934 | for (const auto &Data : Users) { | |||
2935 | // Check that operands are used only in the User node. | |||
2936 | SmallVector<TreeEntry *> GatherOps; | |||
2937 | if (!CheckOperands(Data, GatherOps)) { | |||
2938 | for_each(Data.second, | |||
2939 | [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) { | |||
2940 | OrderedEntries.remove(Op.second); | |||
2941 | }); | |||
2942 | continue; | |||
2943 | } | |||
2944 | // All operands are reordered and used only in this node - propagate the | |||
2945 | // most used order to the user node. | |||
2946 | DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo> OrdersUses; | |||
2947 | SmallPtrSet<const TreeEntry *, 4> VisitedOps; | |||
2948 | for (const auto &Op : Data.second) { | |||
2949 | TreeEntry *OpTE = Op.second; | |||
2950 | if (!OpTE->ReuseShuffleIndices.empty()) | |||
2951 | continue; | |||
2952 | const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & { | |||
2953 | if (OpTE->State == TreeEntry::NeedToGather) | |||
2954 | return GathersToOrders.find(OpTE)->second; | |||
2955 | return OpTE->ReorderIndices; | |||
2956 | }(); | |||
2957 | // Stores actually store the mask, not the order, need to invert. | |||
2958 | if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && | |||
2959 | OpTE->getOpcode() == Instruction::Store && !Order.empty()) { | |||
2960 | SmallVector<int> Mask; | |||
2961 | inversePermutation(Order, Mask); | |||
2962 | unsigned E = Order.size(); | |||
2963 | OrdersType CurrentOrder(E, E); | |||
2964 | transform(Mask, CurrentOrder.begin(), [E](int Idx) { | |||
2965 | return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx); | |||
2966 | }); | |||
2967 | fixupOrderingIndices(CurrentOrder); | |||
2968 | ++OrdersUses.try_emplace(CurrentOrder).first->getSecond(); | |||
2969 | } else { | |||
2970 | ++OrdersUses.try_emplace(Order).first->getSecond(); | |||
2971 | } | |||
2972 | if (VisitedOps.insert(OpTE).second) | |||
2973 | OrdersUses.try_emplace({}, 0).first->getSecond() += | |||
2974 | OpTE->UserTreeIndices.size(); | |||
2975 | --OrdersUses[{}]; | |||
2976 | } | |||
2977 | // If no orders - skip current nodes and jump to the next one, if any. | |||
2978 | if (OrdersUses.empty()) { | |||
2979 | for_each(Data.second, | |||
2980 | [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) { | |||
2981 | OrderedEntries.remove(Op.second); | |||
2982 | }); | |||
2983 | continue; | |||
2984 | } | |||
2985 | // Choose the best order. | |||
2986 | ArrayRef<unsigned> BestOrder = OrdersUses.begin()->first; | |||
2987 | unsigned Cnt = OrdersUses.begin()->second; | |||
2988 | for (const auto &Pair : llvm::drop_begin(OrdersUses)) { | |||
2989 | if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { | |||
2990 | BestOrder = Pair.first; | |||
2991 | Cnt = Pair.second; | |||
2992 | } | |||
2993 | } | |||
2994 | // Set order of the user node (reordering of operands and user nodes). | |||
2995 | if (BestOrder.empty()) { | |||
2996 | for_each(Data.second, | |||
2997 | [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) { | |||
2998 | OrderedEntries.remove(Op.second); | |||
2999 | }); | |||
3000 | continue; | |||
3001 | } | |||
3002 | // Erase operands from OrderedEntries list and adjust their orders. | |||
3003 | VisitedOps.clear(); | |||
3004 | SmallVector<int> Mask; | |||
3005 | inversePermutation(BestOrder, Mask); | |||
3006 | SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem); | |||
3007 | unsigned E = BestOrder.size(); | |||
3008 | transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { | |||
3009 | return I < E ? static_cast<int>(I) : UndefMaskElem; | |||
3010 | }); | |||
3011 | for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) { | |||
3012 | TreeEntry *TE = Op.second; | |||
3013 | OrderedEntries.remove(TE); | |||
3014 | if (!VisitedOps.insert(TE).second) | |||
3015 | continue; | |||
3016 | if (!TE->ReuseShuffleIndices.empty() && TE->ReorderIndices.empty()) { | |||
3017 | // Just reorder reuses indices. | |||
3018 | reorderReuses(TE->ReuseShuffleIndices, Mask); | |||
3019 | continue; | |||
3020 | } | |||
3021 | // Gathers are processed separately. | |||
3022 | if (TE->State != TreeEntry::Vectorize) | |||
3023 | continue; | |||
3024 | assert((BestOrder.size() == TE->ReorderIndices.size() ||(static_cast <bool> ((BestOrder.size() == TE->ReorderIndices .size() || TE->ReorderIndices.empty()) && "Non-matching sizes of user/operand entries." ) ? void (0) : __assert_fail ("(BestOrder.size() == TE->ReorderIndices.size() || TE->ReorderIndices.empty()) && \"Non-matching sizes of user/operand entries.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3026, __extension__ __PRETTY_FUNCTION__)) | |||
3025 | TE->ReorderIndices.empty()) &&(static_cast <bool> ((BestOrder.size() == TE->ReorderIndices .size() || TE->ReorderIndices.empty()) && "Non-matching sizes of user/operand entries." ) ? void (0) : __assert_fail ("(BestOrder.size() == TE->ReorderIndices.size() || TE->ReorderIndices.empty()) && \"Non-matching sizes of user/operand entries.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3026, __extension__ __PRETTY_FUNCTION__)) | |||
3026 | "Non-matching sizes of user/operand entries.")(static_cast <bool> ((BestOrder.size() == TE->ReorderIndices .size() || TE->ReorderIndices.empty()) && "Non-matching sizes of user/operand entries." ) ? void (0) : __assert_fail ("(BestOrder.size() == TE->ReorderIndices.size() || TE->ReorderIndices.empty()) && \"Non-matching sizes of user/operand entries.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3026, __extension__ __PRETTY_FUNCTION__)); | |||
3027 | reorderOrder(TE->ReorderIndices, Mask); | |||
3028 | } | |||
3029 | // For gathers just need to reorder its scalars. | |||
3030 | for (TreeEntry *Gather : GatherOps) { | |||
3031 | if (!Gather->ReuseShuffleIndices.empty()) | |||
3032 | continue; | |||
3033 | assert(Gather->ReorderIndices.empty() &&(static_cast <bool> (Gather->ReorderIndices.empty() && "Unexpected reordering of gathers.") ? void (0) : __assert_fail ("Gather->ReorderIndices.empty() && \"Unexpected reordering of gathers.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3034, __extension__ __PRETTY_FUNCTION__)) | |||
3034 | "Unexpected reordering of gathers.")(static_cast <bool> (Gather->ReorderIndices.empty() && "Unexpected reordering of gathers.") ? void (0) : __assert_fail ("Gather->ReorderIndices.empty() && \"Unexpected reordering of gathers.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3034, __extension__ __PRETTY_FUNCTION__)); | |||
3035 | reorderScalars(Gather->Scalars, Mask); | |||
3036 | OrderedEntries.remove(Gather); | |||
3037 | } | |||
3038 | // Reorder operands of the user node and set the ordering for the user | |||
3039 | // node itself. | |||
3040 | if (Data.first->State != TreeEntry::Vectorize || | |||
3041 | !isa<ExtractElementInst, ExtractValueInst, LoadInst>( | |||
3042 | Data.first->getMainOp()) || | |||
3043 | Data.first->isAltShuffle()) | |||
3044 | Data.first->reorderOperands(Mask); | |||
3045 | if (!isa<InsertElementInst, StoreInst>(Data.first->getMainOp()) || | |||
3046 | Data.first->isAltShuffle()) { | |||
3047 | reorderScalars(Data.first->Scalars, Mask); | |||
3048 | reorderOrder(Data.first->ReorderIndices, MaskOrder); | |||
3049 | if (Data.first->ReuseShuffleIndices.empty() && | |||
3050 | !Data.first->ReorderIndices.empty()) { | |||
3051 | // Insert user node to the list to try to sink reordering deeper in | |||
3052 | // the graph. | |||
3053 | OrderedEntries.insert(Data.first); | |||
3054 | } | |||
3055 | } else { | |||
3056 | reorderOrder(Data.first->ReorderIndices, Mask); | |||
3057 | } | |||
3058 | } | |||
3059 | } | |||
3060 | } | |||
3061 | ||||
3062 | void BoUpSLP::buildExternalUses( | |||
3063 | const ExtraValueToDebugLocsMap &ExternallyUsedValues) { | |||
3064 | // Collect the values that we need to extract from the tree. | |||
3065 | for (auto &TEPtr : VectorizableTree) { | |||
3066 | TreeEntry *Entry = TEPtr.get(); | |||
3067 | ||||
3068 | // No need to handle users of gathered values. | |||
3069 | if (Entry->State == TreeEntry::NeedToGather) | |||
3070 | continue; | |||
3071 | ||||
3072 | // For each lane: | |||
3073 | for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { | |||
3074 | Value *Scalar = Entry->Scalars[Lane]; | |||
3075 | int FoundLane = Entry->findLaneForValue(Scalar); | |||
3076 | ||||
3077 | // Check if the scalar is externally used as an extra arg. | |||
3078 | auto ExtI = ExternallyUsedValues.find(Scalar); | |||
3079 | if (ExtI != ExternallyUsedValues.end()) { | |||
3080 | LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Need to extract: Extra arg from lane " << Lane << " from " << *Scalar << ".\n" ; } } while (false) | |||
3081 | << Lane << " from " << *Scalar << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Need to extract: Extra arg from lane " << Lane << " from " << *Scalar << ".\n" ; } } while (false); | |||
3082 | ExternalUses.emplace_back(Scalar, nullptr, FoundLane); | |||
3083 | } | |||
3084 | for (User *U : Scalar->users()) { | |||
3085 | LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Checking user:" << *U << ".\n"; } } while (false); | |||
3086 | ||||
3087 | Instruction *UserInst = dyn_cast<Instruction>(U); | |||
3088 | if (!UserInst) | |||
3089 | continue; | |||
3090 | ||||
3091 | if (isDeleted(UserInst)) | |||
3092 | continue; | |||
3093 | ||||
3094 | // Skip in-tree scalars that become vectors | |||
3095 | if (TreeEntry *UseEntry = getTreeEntry(U)) { | |||
3096 | Value *UseScalar = UseEntry->Scalars[0]; | |||
3097 | // Some in-tree scalars will remain as scalar in vectorized | |||
3098 | // instructions. If that is the case, the one in Lane 0 will | |||
3099 | // be used. | |||
3100 | if (UseScalar != U || | |||
3101 | UseEntry->State == TreeEntry::ScatterVectorize || | |||
3102 | !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { | |||
3103 | LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *Udo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: \tInternal user will be removed:" << *U << ".\n"; } } while (false) | |||
3104 | << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: \tInternal user will be removed:" << *U << ".\n"; } } while (false); | |||
3105 | assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state")(static_cast <bool> (UseEntry->State != TreeEntry::NeedToGather && "Bad state") ? void (0) : __assert_fail ("UseEntry->State != TreeEntry::NeedToGather && \"Bad state\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3105, __extension__ __PRETTY_FUNCTION__)); | |||
3106 | continue; | |||
3107 | } | |||
3108 | } | |||
3109 | ||||
3110 | // Ignore users in the user ignore list. | |||
3111 | if (is_contained(UserIgnoreList, UserInst)) | |||
3112 | continue; | |||
3113 | ||||
3114 | LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Need to extract:" << * U << " from lane " << Lane << " from " << *Scalar << ".\n"; } } while (false) | |||
3115 | << Lane << " from " << *Scalar << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Need to extract:" << * U << " from lane " << Lane << " from " << *Scalar << ".\n"; } } while (false); | |||
3116 | ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane)); | |||
3117 | } | |||
3118 | } | |||
3119 | } | |||
3120 | } | |||
3121 | ||||
3122 | void BoUpSLP::buildTree(ArrayRef<Value *> Roots, | |||
3123 | ArrayRef<Value *> UserIgnoreLst) { | |||
3124 | deleteTree(); | |||
3125 | UserIgnoreList = UserIgnoreLst; | |||
3126 | if (!allSameType(Roots)) | |||
3127 | return; | |||
3128 | buildTree_rec(Roots, 0, EdgeInfo()); | |||
3129 | } | |||
3130 | ||||
3131 | namespace { | |||
3132 | /// Tracks the state we can represent the loads in the given sequence. | |||
3133 | enum class LoadsState { Gather, Vectorize, ScatterVectorize }; | |||
3134 | } // anonymous namespace | |||
3135 | ||||
3136 | /// Checks if the given array of loads can be represented as a vectorized, | |||
3137 | /// scatter or just simple gather. | |||
3138 | static LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0, | |||
3139 | const TargetTransformInfo &TTI, | |||
3140 | const DataLayout &DL, ScalarEvolution &SE, | |||
3141 | SmallVectorImpl<unsigned> &Order, | |||
3142 | SmallVectorImpl<Value *> &PointerOps) { | |||
3143 | // Check that a vectorized load would load the same memory as a scalar | |||
3144 | // load. For example, we don't want to vectorize loads that are smaller | |||
3145 | // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM | |||
3146 | // treats loading/storing it as an i8 struct. If we vectorize loads/stores | |||
3147 | // from such a struct, we read/write packed bits disagreeing with the | |||
3148 | // unvectorized version. | |||
3149 | Type *ScalarTy = VL0->getType(); | |||
3150 | ||||
3151 | if (DL.getTypeSizeInBits(ScalarTy) != DL.getTypeAllocSizeInBits(ScalarTy)) | |||
3152 | return LoadsState::Gather; | |||
3153 | ||||
3154 | // Make sure all loads in the bundle are simple - we can't vectorize | |||
3155 | // atomic or volatile loads. | |||
3156 | PointerOps.clear(); | |||
3157 | PointerOps.resize(VL.size()); | |||
3158 | auto *POIter = PointerOps.begin(); | |||
3159 | for (Value *V : VL) { | |||
3160 | auto *L = cast<LoadInst>(V); | |||
3161 | if (!L->isSimple()) | |||
3162 | return LoadsState::Gather; | |||
3163 | *POIter = L->getPointerOperand(); | |||
3164 | ++POIter; | |||
3165 | } | |||
3166 | ||||
3167 | Order.clear(); | |||
3168 | // Check the order of pointer operands. | |||
3169 | if (llvm::sortPtrAccesses(PointerOps, ScalarTy, DL, SE, Order)) { | |||
3170 | Value *Ptr0; | |||
3171 | Value *PtrN; | |||
3172 | if (Order.empty()) { | |||
3173 | Ptr0 = PointerOps.front(); | |||
3174 | PtrN = PointerOps.back(); | |||
3175 | } else { | |||
3176 | Ptr0 = PointerOps[Order.front()]; | |||
3177 | PtrN = PointerOps[Order.back()]; | |||
3178 | } | |||
3179 | Optional<int> Diff = | |||
3180 | getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, DL, SE); | |||
3181 | // Check that the sorted loads are consecutive. | |||
3182 | if (static_cast<unsigned>(*Diff) == VL.size() - 1) | |||
3183 | return LoadsState::Vectorize; | |||
3184 | Align CommonAlignment = cast<LoadInst>(VL0)->getAlign(); | |||
3185 | for (Value *V : VL) | |||
3186 | CommonAlignment = | |||
3187 | commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); | |||
3188 | if (TTI.isLegalMaskedGather(FixedVectorType::get(ScalarTy, VL.size()), | |||
3189 | CommonAlignment)) | |||
3190 | return LoadsState::ScatterVectorize; | |||
3191 | } | |||
3192 | ||||
3193 | return LoadsState::Gather; | |||
3194 | } | |||
3195 | ||||
3196 | void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, | |||
3197 | const EdgeInfo &UserTreeIdx) { | |||
3198 | assert((allConstant(VL) || allSameType(VL)) && "Invalid types!")(static_cast <bool> ((allConstant(VL) || allSameType(VL )) && "Invalid types!") ? void (0) : __assert_fail ("(allConstant(VL) || allSameType(VL)) && \"Invalid types!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3198, __extension__ __PRETTY_FUNCTION__)); | |||
3199 | ||||
3200 | InstructionsState S = getSameOpcode(VL); | |||
3201 | if (Depth == RecursionMaxDepth) { | |||
3202 | LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering due to max recursion depth.\n" ; } } while (false); | |||
3203 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); | |||
3204 | return; | |||
3205 | } | |||
3206 | ||||
3207 | // Don't handle scalable vectors | |||
3208 | if (S.getOpcode() == Instruction::ExtractElement && | |||
3209 | isa<ScalableVectorType>( | |||
3210 | cast<ExtractElementInst>(S.OpValue)->getVectorOperandType())) { | |||
3211 | LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering due to scalable vector type.\n" ; } } while (false); | |||
3212 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); | |||
3213 | return; | |||
3214 | } | |||
3215 | ||||
3216 | // Don't handle vectors. | |||
3217 | if (S.OpValue->getType()->isVectorTy() && | |||
3218 | !isa<InsertElementInst>(S.OpValue)) { | |||
3219 | LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering due to vector type.\n" ; } } while (false); | |||
3220 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); | |||
3221 | return; | |||
3222 | } | |||
3223 | ||||
3224 | if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) | |||
3225 | if (SI->getValueOperand()->getType()->isVectorTy()) { | |||
3226 | LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering due to store vector type.\n" ; } } while (false); | |||
3227 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); | |||
3228 | return; | |||
3229 | } | |||
3230 | ||||
3231 | // If all of the operands are identical or constant we have a simple solution. | |||
3232 | if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.getOpcode()) { | |||
3233 | LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering due to C,S,B,O. \n" ; } } while (false); | |||
3234 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); | |||
3235 | return; | |||
3236 | } | |||
3237 | ||||
3238 | // We now know that this is a vector of instructions of the same type from | |||
3239 | // the same block. | |||
3240 | ||||
3241 | // Don't vectorize ephemeral values. | |||
3242 | for (Value *V : VL) { | |||
3243 | if (EphValues.count(V)) { | |||
3244 | LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *Vdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: The instruction (" << * V << ") is ephemeral.\n"; } } while (false) | |||
3245 | << ") is ephemeral.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: The instruction (" << * V << ") is ephemeral.\n"; } } while (false); | |||
3246 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); | |||
3247 | return; | |||
3248 | } | |||
3249 | } | |||
3250 | ||||
3251 | // Check if this is a duplicate of another entry. | |||
3252 | if (TreeEntry *E = getTreeEntry(S.OpValue)) { | |||
3253 | LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n"; } } while (false); | |||
3254 | if (!E->isSame(VL)) { | |||
3255 | LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering due to partial overlap.\n" ; } } while (false); | |||
3256 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); | |||
3257 | return; | |||
3258 | } | |||
3259 | // Record the reuse of the tree node. FIXME, currently this is only used to | |||
3260 | // properly draw the graph rather than for the actual vectorization. | |||
3261 | E->UserTreeIndices.push_back(UserTreeIdx); | |||
3262 | LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValuedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue << ".\n"; } } while (false) | |||
3263 | << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue << ".\n"; } } while (false); | |||
3264 | return; | |||
3265 | } | |||
3266 | ||||
3267 | // Check that none of the instructions in the bundle are already in the tree. | |||
3268 | for (Value *V : VL) { | |||
3269 | auto *I = dyn_cast<Instruction>(V); | |||
3270 | if (!I) | |||
3271 | continue; | |||
3272 | if (getTreeEntry(I)) { | |||
3273 | LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *Vdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: The instruction (" << * V << ") is already in tree.\n"; } } while (false) | |||
3274 | << ") is already in tree.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: The instruction (" << * V << ") is already in tree.\n"; } } while (false); | |||
3275 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); | |||
3276 | return; | |||
3277 | } | |||
3278 | } | |||
3279 | ||||
3280 | // If any of the scalars is marked as a value that needs to stay scalar, then | |||
3281 | // we need to gather the scalars. | |||
3282 | // The reduction nodes (stored in UserIgnoreList) also should stay scalar. | |||
3283 | for (Value *V : VL) { | |||
3284 | if (MustGather.count(V) || is_contained(UserIgnoreList, V)) { | |||
3285 | LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering due to gathered scalar.\n" ; } } while (false); | |||
3286 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); | |||
3287 | return; | |||
3288 | } | |||
3289 | } | |||
3290 | ||||
3291 | // Check that all of the users of the scalars that we want to vectorize are | |||
3292 | // schedulable. | |||
3293 | auto *VL0 = cast<Instruction>(S.OpValue); | |||
3294 | BasicBlock *BB = VL0->getParent(); | |||
3295 | ||||
3296 | if (!DT->isReachableFromEntry(BB)) { | |||
3297 | // Don't go into unreachable blocks. They may contain instructions with | |||
3298 | // dependency cycles which confuse the final scheduling. | |||
3299 | LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: bundle in unreachable block.\n" ; } } while (false); | |||
3300 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); | |||
3301 | return; | |||
3302 | } | |||
3303 | ||||
3304 | // Check that every instruction appears once in this bundle. | |||
3305 | SmallVector<int> ReuseShuffleIndicies; | |||
3306 | SmallVector<Value *, 4> UniqueValues; | |||
3307 | DenseMap<Value *, unsigned> UniquePositions; | |||
3308 | for (Value *V : VL) { | |||
3309 | auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); | |||
3310 | ReuseShuffleIndicies.emplace_back(Res.first->second); | |||
3311 | if (Res.second) | |||
3312 | UniqueValues.emplace_back(V); | |||
3313 | } | |||
3314 | size_t NumUniqueScalarValues = UniqueValues.size(); | |||
3315 | if (NumUniqueScalarValues == VL.size()) { | |||
3316 | ReuseShuffleIndicies.clear(); | |||
3317 | } else { | |||
3318 | LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Shuffle for reused scalars.\n" ; } } while (false); | |||
3319 | if (NumUniqueScalarValues <= 1 || | |||
3320 | !llvm::isPowerOf2_32(NumUniqueScalarValues)) { | |||
3321 | LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Scalar used twice in bundle.\n" ; } } while (false); | |||
3322 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); | |||
3323 | return; | |||
3324 | } | |||
3325 | VL = UniqueValues; | |||
3326 | } | |||
3327 | ||||
3328 | auto &BSRef = BlocksSchedules[BB]; | |||
3329 | if (!BSRef) | |||
3330 | BSRef = std::make_unique<BlockScheduling>(BB); | |||
3331 | ||||
3332 | BlockScheduling &BS = *BSRef.get(); | |||
3333 | ||||
3334 | Optional<ScheduleData *> Bundle = BS.tryScheduleBundle(VL, this, S); | |||
3335 | if (!Bundle) { | |||
3336 | LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: We are not able to schedule this bundle!\n" ; } } while (false); | |||
3337 | assert((!BS.getScheduleData(VL0) ||(static_cast <bool> ((!BS.getScheduleData(VL0) || !BS.getScheduleData (VL0)->isPartOfBundle()) && "tryScheduleBundle should cancelScheduling on failure" ) ? void (0) : __assert_fail ("(!BS.getScheduleData(VL0) || !BS.getScheduleData(VL0)->isPartOfBundle()) && \"tryScheduleBundle should cancelScheduling on failure\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3339, __extension__ __PRETTY_FUNCTION__)) | |||
3338 | !BS.getScheduleData(VL0)->isPartOfBundle()) &&(static_cast <bool> ((!BS.getScheduleData(VL0) || !BS.getScheduleData (VL0)->isPartOfBundle()) && "tryScheduleBundle should cancelScheduling on failure" ) ? void (0) : __assert_fail ("(!BS.getScheduleData(VL0) || !BS.getScheduleData(VL0)->isPartOfBundle()) && \"tryScheduleBundle should cancelScheduling on failure\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3339, __extension__ __PRETTY_FUNCTION__)) | |||
3339 | "tryScheduleBundle should cancelScheduling on failure")(static_cast <bool> ((!BS.getScheduleData(VL0) || !BS.getScheduleData (VL0)->isPartOfBundle()) && "tryScheduleBundle should cancelScheduling on failure" ) ? void (0) : __assert_fail ("(!BS.getScheduleData(VL0) || !BS.getScheduleData(VL0)->isPartOfBundle()) && \"tryScheduleBundle should cancelScheduling on failure\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3339, __extension__ __PRETTY_FUNCTION__)); | |||
3340 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, | |||
3341 | ReuseShuffleIndicies); | |||
3342 | return; | |||
3343 | } | |||
3344 | LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: We are able to schedule this bundle.\n" ; } } while (false); | |||
3345 | ||||
3346 | unsigned ShuffleOrOp = S.isAltShuffle() ? | |||
3347 | (unsigned) Instruction::ShuffleVector : S.getOpcode(); | |||
3348 | switch (ShuffleOrOp) { | |||
3349 | case Instruction::PHI: { | |||
3350 | auto *PH = cast<PHINode>(VL0); | |||
3351 | ||||
3352 | // Check for terminator values (e.g. invoke). | |||
3353 | for (Value *V : VL) | |||
3354 | for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) { | |||
3355 | Instruction *Term = dyn_cast<Instruction>( | |||
3356 | cast<PHINode>(V)->getIncomingValueForBlock( | |||
3357 | PH->getIncomingBlock(I))); | |||
3358 | if (Term && Term->isTerminator()) { | |||
3359 | LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Need to swizzle PHINodes (terminator use).\n" ; } } while (false) | |||
3360 | << "SLP: Need to swizzle PHINodes (terminator use).\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Need to swizzle PHINodes (terminator use).\n" ; } } while (false); | |||
3361 | BS.cancelScheduling(VL, VL0); | |||
3362 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, | |||
3363 | ReuseShuffleIndicies); | |||
3364 | return; | |||
3365 | } | |||
3366 | } | |||
3367 | ||||
3368 | TreeEntry *TE = | |||
3369 | newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies); | |||
3370 | LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: added a vector of PHINodes.\n" ; } } while (false); | |||
3371 | ||||
3372 | // Keeps the reordered operands to avoid code duplication. | |||
3373 | SmallVector<ValueList, 2> OperandsVec; | |||
3374 | for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) { | |||
3375 | if (!DT->isReachableFromEntry(PH->getIncomingBlock(I))) { | |||
3376 | ValueList Operands(VL.size(), PoisonValue::get(PH->getType())); | |||
3377 | TE->setOperand(I, Operands); | |||
3378 | OperandsVec.push_back(Operands); | |||
3379 | continue; | |||
3380 | } | |||
3381 | ValueList Operands; | |||
3382 | // Prepare the operand vector. | |||
3383 | for (Value *V : VL) | |||
3384 | Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock( | |||
3385 | PH->getIncomingBlock(I))); | |||
3386 | TE->setOperand(I, Operands); | |||
3387 | OperandsVec.push_back(Operands); | |||
3388 | } | |||
3389 | for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx) | |||
3390 | buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx}); | |||
3391 | return; | |||
3392 | } | |||
3393 | case Instruction::ExtractValue: | |||
3394 | case Instruction::ExtractElement: { | |||
3395 | OrdersType CurrentOrder; | |||
3396 | bool Reuse = canReuseExtract(VL, VL0, CurrentOrder); | |||
3397 | if (Reuse) { | |||
3398 | LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Reusing or shuffling extract sequence.\n" ; } } while (false); | |||
3399 | newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, | |||
3400 | ReuseShuffleIndicies); | |||
3401 | // This is a special case, as it does not gather, but at the same time | |||
3402 | // we are not extending buildTree_rec() towards the operands. | |||
3403 | ValueList Op0; | |||
3404 | Op0.assign(VL.size(), VL0->getOperand(0)); | |||
3405 | VectorizableTree.back()->setOperand(0, Op0); | |||
3406 | return; | |||
3407 | } | |||
3408 | if (!CurrentOrder.empty()) { | |||
3409 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { { dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " "with order"; for (unsigned Idx : CurrentOrder) dbgs() << " " << Idx; dbgs() << "\n"; }; } } while (false) | |||
3410 | dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { { dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " "with order"; for (unsigned Idx : CurrentOrder) dbgs() << " " << Idx; dbgs() << "\n"; }; } } while (false) | |||
3411 | "with order";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { { dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " "with order"; for (unsigned Idx : CurrentOrder) dbgs() << " " << Idx; dbgs() << "\n"; }; } } while (false) | |||
3412 | for (unsigned Idx : CurrentOrder)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { { dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " "with order"; for (unsigned Idx : CurrentOrder) dbgs() << " " << Idx; dbgs() << "\n"; }; } } while (false) | |||
3413 | dbgs() << " " << Idx;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { { dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " "with order"; for (unsigned Idx : CurrentOrder) dbgs() << " " << Idx; dbgs() << "\n"; }; } } while (false) | |||
3414 | dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { { dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " "with order"; for (unsigned Idx : CurrentOrder) dbgs() << " " << Idx; dbgs() << "\n"; }; } } while (false) | |||
3415 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { { dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " "with order"; for (unsigned Idx : CurrentOrder) dbgs() << " " << Idx; dbgs() << "\n"; }; } } while (false); | |||
3416 | fixupOrderingIndices(CurrentOrder); | |||
3417 | // Insert new order with initial value 0, if it does not exist, | |||
3418 | // otherwise return the iterator to the existing one. | |||
3419 | newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, | |||
3420 | ReuseShuffleIndicies, CurrentOrder); | |||
3421 | // This is a special case, as it does not gather, but at the same time | |||
3422 | // we are not extending buildTree_rec() towards the operands. | |||
3423 | ValueList Op0; | |||
3424 | Op0.assign(VL.size(), VL0->getOperand(0)); | |||
3425 | VectorizableTree.back()->setOperand(0, Op0); | |||
3426 | return; | |||
3427 | } | |||
3428 | LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gather extract sequence.\n"; } } while (false); | |||
3429 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, | |||
3430 | ReuseShuffleIndicies); | |||
3431 | BS.cancelScheduling(VL, VL0); | |||
3432 | return; | |||
3433 | } | |||
3434 | case Instruction::InsertElement: { | |||
3435 | assert(ReuseShuffleIndicies.empty() && "All inserts should be unique")(static_cast <bool> (ReuseShuffleIndicies.empty() && "All inserts should be unique") ? void (0) : __assert_fail ( "ReuseShuffleIndicies.empty() && \"All inserts should be unique\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3435, __extension__ __PRETTY_FUNCTION__)); | |||
3436 | ||||
3437 | // Check that we have a buildvector and not a shuffle of 2 or more | |||
3438 | // different vectors. | |||
3439 | ValueSet SourceVectors; | |||
3440 | int MinIdx = std::numeric_limits<int>::max(); | |||
3441 | for (Value *V : VL) { | |||
3442 | SourceVectors.insert(cast<Instruction>(V)->getOperand(0)); | |||
3443 | Optional<int> Idx = *getInsertIndex(V, 0); | |||
3444 | if (!Idx || *Idx == UndefMaskElem) | |||
3445 | continue; | |||
3446 | MinIdx = std::min(MinIdx, *Idx); | |||
3447 | } | |||
3448 | ||||
3449 | if (count_if(VL, [&SourceVectors](Value *V) { | |||
3450 | return !SourceVectors.contains(V); | |||
3451 | }) >= 2) { | |||
3452 | // Found 2nd source vector - cancel. | |||
3453 | LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gather of insertelement vectors with " "different source vectors.\n"; } } while (false) | |||
3454 | "different source vectors.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gather of insertelement vectors with " "different source vectors.\n"; } } while (false); | |||
3455 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); | |||
3456 | BS.cancelScheduling(VL, VL0); | |||
3457 | return; | |||
3458 | } | |||
3459 | ||||
3460 | auto OrdCompare = [](const std::pair<int, int> &P1, | |||
3461 | const std::pair<int, int> &P2) { | |||
3462 | return P1.first > P2.first; | |||
3463 | }; | |||
3464 | PriorityQueue<std::pair<int, int>, SmallVector<std::pair<int, int>>, | |||
3465 | decltype(OrdCompare)> | |||
3466 | Indices(OrdCompare); | |||
3467 | for (int I = 0, E = VL.size(); I < E; ++I) { | |||
3468 | Optional<int> Idx = *getInsertIndex(VL[I], 0); | |||
3469 | if (!Idx || *Idx == UndefMaskElem) | |||
3470 | continue; | |||
3471 | Indices.emplace(*Idx, I); | |||
3472 | } | |||
3473 | OrdersType CurrentOrder(VL.size(), VL.size()); | |||
3474 | bool IsIdentity = true; | |||
3475 | for (int I = 0, E = VL.size(); I < E; ++I) { | |||
3476 | CurrentOrder[Indices.top().second] = I; | |||
3477 | IsIdentity &= Indices.top().second == I; | |||
3478 | Indices.pop(); | |||
3479 | } | |||
3480 | if (IsIdentity) | |||
3481 | CurrentOrder.clear(); | |||
3482 | TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, | |||
3483 | None, CurrentOrder); | |||
3484 | LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: added inserts bundle.\n"; } } while (false); | |||
3485 | ||||
3486 | constexpr int NumOps = 2; | |||
3487 | ValueList VectorOperands[NumOps]; | |||
3488 | for (int I = 0; I < NumOps; ++I) { | |||
3489 | for (Value *V : VL) | |||
3490 | VectorOperands[I].push_back(cast<Instruction>(V)->getOperand(I)); | |||
3491 | ||||
3492 | TE->setOperand(I, VectorOperands[I]); | |||
3493 | } | |||
3494 | buildTree_rec(VectorOperands[NumOps - 1], Depth + 1, {TE, NumOps - 1}); | |||
3495 | return; | |||
3496 | } | |||
3497 | case Instruction::Load: { | |||
3498 | // Check that a vectorized load would load the same memory as a scalar | |||
3499 | // load. For example, we don't want to vectorize loads that are smaller | |||
3500 | // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM | |||
3501 | // treats loading/storing it as an i8 struct. If we vectorize loads/stores | |||
3502 | // from such a struct, we read/write packed bits disagreeing with the | |||
3503 | // unvectorized version. | |||
3504 | SmallVector<Value *> PointerOps; | |||
3505 | OrdersType CurrentOrder; | |||
3506 | TreeEntry *TE = nullptr; | |||
3507 | switch (canVectorizeLoads(VL, VL0, *TTI, *DL, *SE, CurrentOrder, | |||
3508 | PointerOps)) { | |||
3509 | case LoadsState::Vectorize: | |||
3510 | if (CurrentOrder.empty()) { | |||
3511 | // Original loads are consecutive and does not require reordering. | |||
3512 | TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, | |||
3513 | ReuseShuffleIndicies); | |||
3514 | LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: added a vector of loads.\n"; } } while (false); | |||
3515 | } else { | |||
3516 | fixupOrderingIndices(CurrentOrder); | |||
3517 | // Need to reorder. | |||
3518 | TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, | |||
3519 | ReuseShuffleIndicies, CurrentOrder); | |||
3520 | LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: added a vector of jumbled loads.\n" ; } } while (false); | |||
3521 | } | |||
3522 | TE->setOperandsInOrder(); | |||
3523 | break; | |||
3524 | case LoadsState::ScatterVectorize: | |||
3525 | // Vectorizing non-consecutive loads with `llvm.masked.gather`. | |||
3526 | TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S, | |||
3527 | UserTreeIdx, ReuseShuffleIndicies); | |||
3528 | TE->setOperandsInOrder(); | |||
3529 | buildTree_rec(PointerOps, Depth + 1, {TE, 0}); | |||
3530 | LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: added a vector of non-consecutive loads.\n" ; } } while (false); | |||
3531 | break; | |||
3532 | case LoadsState::Gather: | |||
3533 | BS.cancelScheduling(VL, VL0); | |||
3534 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, | |||
3535 | ReuseShuffleIndicies); | |||
3536 | #ifndef NDEBUG | |||
3537 | Type *ScalarTy = VL0->getType(); | |||
3538 | if (DL->getTypeSizeInBits(ScalarTy) != | |||
3539 | DL->getTypeAllocSizeInBits(ScalarTy)) | |||
3540 | LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering loads of non-packed type.\n" ; } } while (false); | |||
3541 | else if (any_of(VL, [](Value *V) { | |||
3542 | return !cast<LoadInst>(V)->isSimple(); | |||
3543 | })) | |||
3544 | LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering non-simple loads.\n" ; } } while (false); | |||
3545 | else | |||
3546 | LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering non-consecutive loads.\n" ; } } while (false); | |||
3547 | #endif // NDEBUG | |||
3548 | break; | |||
3549 | } | |||
3550 | return; | |||
3551 | } | |||
3552 | case Instruction::ZExt: | |||
3553 | case Instruction::SExt: | |||
3554 | case Instruction::FPToUI: | |||
3555 | case Instruction::FPToSI: | |||
3556 | case Instruction::FPExt: | |||
3557 | case Instruction::PtrToInt: | |||
3558 | case Instruction::IntToPtr: | |||
3559 | case Instruction::SIToFP: | |||
3560 | case Instruction::UIToFP: | |||
3561 | case Instruction::Trunc: | |||
3562 | case Instruction::FPTrunc: | |||
3563 | case Instruction::BitCast: { | |||
3564 | Type *SrcTy = VL0->getOperand(0)->getType(); | |||
3565 | for (Value *V : VL) { | |||
3566 | Type *Ty = cast<Instruction>(V)->getOperand(0)->getType(); | |||
3567 | if (Ty != SrcTy || !isValidElementType(Ty)) { | |||
3568 | BS.cancelScheduling(VL, VL0); | |||
3569 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, | |||
3570 | ReuseShuffleIndicies); | |||
3571 | LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering casts with different src types.\n" ; } } while (false) | |||
3572 | << "SLP: Gathering casts with different src types.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering casts with different src types.\n" ; } } while (false); | |||
3573 | return; | |||
3574 | } | |||
3575 | } | |||
3576 | TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, | |||
3577 | ReuseShuffleIndicies); | |||
3578 | LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: added a vector of casts.\n"; } } while (false); | |||
3579 | ||||
3580 | TE->setOperandsInOrder(); | |||
3581 | for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { | |||
3582 | ValueList Operands; | |||
3583 | // Prepare the operand vector. | |||
3584 | for (Value *V : VL) | |||
3585 | Operands.push_back(cast<Instruction>(V)->getOperand(i)); | |||
3586 | ||||
3587 | buildTree_rec(Operands, Depth + 1, {TE, i}); | |||
3588 | } | |||
3589 | return; | |||
3590 | } | |||
3591 | case Instruction::ICmp: | |||
3592 | case Instruction::FCmp: { | |||
3593 | // Check that all of the compares have the same predicate. | |||
3594 | CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); | |||
3595 | CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0); | |||
3596 | Type *ComparedTy = VL0->getOperand(0)->getType(); | |||
3597 | for (Value *V : VL) { | |||
3598 | CmpInst *Cmp = cast<CmpInst>(V); | |||
3599 | if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) || | |||
3600 | Cmp->getOperand(0)->getType() != ComparedTy) { | |||
3601 | BS.cancelScheduling(VL, VL0); | |||
3602 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, | |||
3603 | ReuseShuffleIndicies); | |||
3604 | LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering cmp with different predicate.\n" ; } } while (false) | |||
3605 | << "SLP: Gathering cmp with different predicate.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering cmp with different predicate.\n" ; } } while (false); | |||
3606 | return; | |||
3607 | } | |||
3608 | } | |||
3609 | ||||
3610 | TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, | |||
3611 | ReuseShuffleIndicies); | |||
3612 | LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: added a vector of compares.\n" ; } } while (false); | |||
3613 | ||||
3614 | ValueList Left, Right; | |||
3615 | if (cast<CmpInst>(VL0)->isCommutative()) { | |||
3616 | // Commutative predicate - collect + sort operands of the instructions | |||
3617 | // so that each side is more likely to have the same opcode. | |||
3618 | assert(P0 == SwapP0 && "Commutative Predicate mismatch")(static_cast <bool> (P0 == SwapP0 && "Commutative Predicate mismatch" ) ? void (0) : __assert_fail ("P0 == SwapP0 && \"Commutative Predicate mismatch\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3618, __extension__ __PRETTY_FUNCTION__)); | |||
3619 | reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); | |||
3620 | } else { | |||
3621 | // Collect operands - commute if it uses the swapped predicate. | |||
3622 | for (Value *V : VL) { | |||
3623 | auto *Cmp = cast<CmpInst>(V); | |||
3624 | Value *LHS = Cmp->getOperand(0); | |||
3625 | Value *RHS = Cmp->getOperand(1); | |||
3626 | if (Cmp->getPredicate() != P0) | |||
3627 | std::swap(LHS, RHS); | |||
3628 | Left.push_back(LHS); | |||
3629 | Right.push_back(RHS); | |||
3630 | } | |||
3631 | } | |||
3632 | TE->setOperand(0, Left); | |||
3633 | TE->setOperand(1, Right); | |||
3634 | buildTree_rec(Left, Depth + 1, {TE, 0}); | |||
3635 | buildTree_rec(Right, Depth + 1, {TE, 1}); | |||
3636 | return; | |||
3637 | } | |||
3638 | case Instruction::Select: | |||
3639 | case Instruction::FNeg: | |||
3640 | case Instruction::Add: | |||
3641 | case Instruction::FAdd: | |||
3642 | case Instruction::Sub: | |||
3643 | case Instruction::FSub: | |||
3644 | case Instruction::Mul: | |||
3645 | case Instruction::FMul: | |||
3646 | case Instruction::UDiv: | |||
3647 | case Instruction::SDiv: | |||
3648 | case Instruction::FDiv: | |||
3649 | case Instruction::URem: | |||
3650 | case Instruction::SRem: | |||
3651 | case Instruction::FRem: | |||
3652 | case Instruction::Shl: | |||
3653 | case Instruction::LShr: | |||
3654 | case Instruction::AShr: | |||
3655 | case Instruction::And: | |||
3656 | case Instruction::Or: | |||
3657 | case Instruction::Xor: { | |||
3658 | TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, | |||
3659 | ReuseShuffleIndicies); | |||
3660 | LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: added a vector of un/bin op.\n" ; } } while (false); | |||
3661 | ||||
3662 | // Sort operands of the instructions so that each side is more likely to | |||
3663 | // have the same opcode. | |||
3664 | if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { | |||
3665 | ValueList Left, Right; | |||
3666 | reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); | |||
3667 | TE->setOperand(0, Left); | |||
3668 | TE->setOperand(1, Right); | |||
3669 | buildTree_rec(Left, Depth + 1, {TE, 0}); | |||
3670 | buildTree_rec(Right, Depth + 1, {TE, 1}); | |||
3671 | return; | |||
3672 | } | |||
3673 | ||||
3674 | TE->setOperandsInOrder(); | |||
3675 | for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { | |||
3676 | ValueList Operands; | |||
3677 | // Prepare the operand vector. | |||
3678 | for (Value *V : VL) | |||
3679 | Operands.push_back(cast<Instruction>(V)->getOperand(i)); | |||
3680 | ||||
3681 | buildTree_rec(Operands, Depth + 1, {TE, i}); | |||
3682 | } | |||
3683 | return; | |||
3684 | } | |||
3685 | case Instruction::GetElementPtr: { | |||
3686 | // We don't combine GEPs with complicated (nested) indexing. | |||
3687 | for (Value *V : VL) { | |||
3688 | if (cast<Instruction>(V)->getNumOperands() != 2) { | |||
3689 | LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n" ; } } while (false); | |||
3690 | BS.cancelScheduling(VL, VL0); | |||
3691 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, | |||
3692 | ReuseShuffleIndicies); | |||
3693 | return; | |||
3694 | } | |||
3695 | } | |||
3696 | ||||
3697 | // We can't combine several GEPs into one vector if they operate on | |||
3698 | // different types. | |||
3699 | Type *Ty0 = VL0->getOperand(0)->getType(); | |||
3700 | for (Value *V : VL) { | |||
3701 | Type *CurTy = cast<Instruction>(V)->getOperand(0)->getType(); | |||
3702 | if (Ty0 != CurTy) { | |||
3703 | LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: not-vectorizable GEP (different types).\n" ; } } while (false) | |||
3704 | << "SLP: not-vectorizable GEP (different types).\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: not-vectorizable GEP (different types).\n" ; } } while (false); | |||
3705 | BS.cancelScheduling(VL, VL0); | |||
3706 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, | |||
3707 | ReuseShuffleIndicies); | |||
3708 | return; | |||
3709 | } | |||
3710 | } | |||
3711 | ||||
3712 | // We don't combine GEPs with non-constant indexes. | |||
3713 | Type *Ty1 = VL0->getOperand(1)->getType(); | |||
3714 | for (Value *V : VL) { | |||
3715 | auto Op = cast<Instruction>(V)->getOperand(1); | |||
3716 | if (!isa<ConstantInt>(Op) || | |||
3717 | (Op->getType() != Ty1 && | |||
3718 | Op->getType()->getScalarSizeInBits() > | |||
3719 | DL->getIndexSizeInBits( | |||
3720 | V->getType()->getPointerAddressSpace()))) { | |||
3721 | LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n" ; } } while (false) | |||
3722 | << "SLP: not-vectorizable GEP (non-constant indexes).\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n" ; } } while (false); | |||
3723 | BS.cancelScheduling(VL, VL0); | |||
3724 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, | |||
3725 | ReuseShuffleIndicies); | |||
3726 | return; | |||
3727 | } | |||
3728 | } | |||
3729 | ||||
3730 | TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, | |||
3731 | ReuseShuffleIndicies); | |||
3732 | LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: added a vector of GEPs.\n"; } } while (false); | |||
3733 | TE->setOperandsInOrder(); | |||
3734 | for (unsigned i = 0, e = 2; i < e; ++i) { | |||
3735 | ValueList Operands; | |||
3736 | // Prepare the operand vector. | |||
3737 | for (Value *V : VL) | |||
3738 | Operands.push_back(cast<Instruction>(V)->getOperand(i)); | |||
3739 | ||||
3740 | buildTree_rec(Operands, Depth + 1, {TE, i}); | |||
3741 | } | |||
3742 | return; | |||
3743 | } | |||
3744 | case Instruction::Store: { | |||
3745 | // Check if the stores are consecutive or if we need to swizzle them. | |||
3746 | llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType(); | |||
3747 | // Avoid types that are padded when being allocated as scalars, while | |||
3748 | // being packed together in a vector (such as i1). | |||
3749 | if (DL->getTypeSizeInBits(ScalarTy) != | |||
3750 | DL->getTypeAllocSizeInBits(ScalarTy)) { | |||
3751 | BS.cancelScheduling(VL, VL0); | |||
3752 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, | |||
3753 | ReuseShuffleIndicies); | |||
3754 | LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering stores of non-packed type.\n" ; } } while (false); | |||
3755 | return; | |||
3756 | } | |||
3757 | // Make sure all stores in the bundle are simple - we can't vectorize | |||
3758 | // atomic or volatile stores. | |||
3759 | SmallVector<Value *, 4> PointerOps(VL.size()); | |||
3760 | ValueList Operands(VL.size()); | |||
3761 | auto POIter = PointerOps.begin(); | |||
3762 | auto OIter = Operands.begin(); | |||
3763 | for (Value *V : VL) { | |||
3764 | auto *SI = cast<StoreInst>(V); | |||
3765 | if (!SI->isSimple()) { | |||
3766 | BS.cancelScheduling(VL, VL0); | |||
3767 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, | |||
3768 | ReuseShuffleIndicies); | |||
3769 | LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering non-simple stores.\n" ; } } while (false); | |||
3770 | return; | |||
3771 | } | |||
3772 | *POIter = SI->getPointerOperand(); | |||
3773 | *OIter = SI->getValueOperand(); | |||
3774 | ++POIter; | |||
3775 | ++OIter; | |||
3776 | } | |||
3777 | ||||
3778 | OrdersType CurrentOrder; | |||
3779 | // Check the order of pointer operands. | |||
3780 | if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) { | |||
3781 | Value *Ptr0; | |||
3782 | Value *PtrN; | |||
3783 | if (CurrentOrder.empty()) { | |||
3784 | Ptr0 = PointerOps.front(); | |||
3785 | PtrN = PointerOps.back(); | |||
3786 | } else { | |||
3787 | Ptr0 = PointerOps[CurrentOrder.front()]; | |||
3788 | PtrN = PointerOps[CurrentOrder.back()]; | |||
3789 | } | |||
3790 | Optional<int> Dist = | |||
3791 | getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE); | |||
3792 | // Check that the sorted pointer operands are consecutive. | |||
3793 | if (static_cast<unsigned>(*Dist) == VL.size() - 1) { | |||
3794 | if (CurrentOrder.empty()) { | |||
3795 | // Original stores are consecutive and does not require reordering. | |||
3796 | TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, | |||
3797 | UserTreeIdx, ReuseShuffleIndicies); | |||
3798 | TE->setOperandsInOrder(); | |||
3799 | buildTree_rec(Operands, Depth + 1, {TE, 0}); | |||
3800 | LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: added a vector of stores.\n" ; } } while (false); | |||
3801 | } else { | |||
3802 | fixupOrderingIndices(CurrentOrder); | |||
3803 | TreeEntry *TE = | |||
3804 | newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, | |||
3805 | ReuseShuffleIndicies, CurrentOrder); | |||
3806 | TE->setOperandsInOrder(); | |||
3807 | buildTree_rec(Operands, Depth + 1, {TE, 0}); | |||
3808 | LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: added a vector of jumbled stores.\n" ; } } while (false); | |||
3809 | } | |||
3810 | return; | |||
3811 | } | |||
3812 | } | |||
3813 | ||||
3814 | BS.cancelScheduling(VL, VL0); | |||
3815 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, | |||
3816 | ReuseShuffleIndicies); | |||
3817 | LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Non-consecutive store.\n"; } } while (false); | |||
3818 | return; | |||
3819 | } | |||
3820 | case Instruction::Call: { | |||
3821 | // Check if the calls are all to the same vectorizable intrinsic or | |||
3822 | // library function. | |||
3823 | CallInst *CI = cast<CallInst>(VL0); | |||
3824 | Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); | |||
3825 | ||||
3826 | VFShape Shape = VFShape::get( | |||
3827 | *CI, ElementCount::getFixed(static_cast<unsigned int>(VL.size())), | |||
3828 | false /*HasGlobalPred*/); | |||
3829 | Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); | |||
3830 | ||||
3831 | if (!VecFunc && !isTriviallyVectorizable(ID)) { | |||
3832 | BS.cancelScheduling(VL, VL0); | |||
3833 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, | |||
3834 | ReuseShuffleIndicies); | |||
3835 | LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Non-vectorizable call.\n"; } } while (false); | |||
3836 | return; | |||
3837 | } | |||
3838 | Function *F = CI->getCalledFunction(); | |||
3839 | unsigned NumArgs = CI->getNumArgOperands(); | |||
3840 | SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr); | |||
3841 | for (unsigned j = 0; j != NumArgs; ++j) | |||
3842 | if (hasVectorInstrinsicScalarOpd(ID, j)) | |||
3843 | ScalarArgs[j] = CI->getArgOperand(j); | |||
3844 | for (Value *V : VL) { | |||
3845 | CallInst *CI2 = dyn_cast<CallInst>(V); | |||
3846 | if (!CI2 || CI2->getCalledFunction() != F || | |||
3847 | getVectorIntrinsicIDForCall(CI2, TLI) != ID || | |||
3848 | (VecFunc && | |||
3849 | VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) || | |||
3850 | !CI->hasIdenticalOperandBundleSchema(*CI2)) { | |||
3851 | BS.cancelScheduling(VL, VL0); | |||
3852 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, | |||
3853 | ReuseShuffleIndicies); | |||
3854 | LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *Vdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: mismatched calls:" << * CI << "!=" << *V << "\n"; } } while (false) | |||
3855 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: mismatched calls:" << * CI << "!=" << *V << "\n"; } } while (false); | |||
3856 | return; | |||
3857 | } | |||
3858 | // Some intrinsics have scalar arguments and should be same in order for | |||
3859 | // them to be vectorized. | |||
3860 | for (unsigned j = 0; j != NumArgs; ++j) { | |||
3861 | if (hasVectorInstrinsicScalarOpd(ID, j)) { | |||
3862 | Value *A1J = CI2->getArgOperand(j); | |||
3863 | if (ScalarArgs[j] != A1J) { | |||
3864 | BS.cancelScheduling(VL, VL0); | |||
3865 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, | |||
3866 | ReuseShuffleIndicies); | |||
3867 | LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: mismatched arguments in call:" << *CI << " argument " << ScalarArgs[j] << "!=" << A1J << "\n"; } } while (false) | |||
3868 | << " argument " << ScalarArgs[j] << "!=" << A1Jdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: mismatched arguments in call:" << *CI << " argument " << ScalarArgs[j] << "!=" << A1J << "\n"; } } while (false) | |||
3869 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: mismatched arguments in call:" << *CI << " argument " << ScalarArgs[j] << "!=" << A1J << "\n"; } } while (false); | |||
3870 | return; | |||
3871 | } | |||
3872 | } | |||
3873 | } | |||
3874 | // Verify that the bundle operands are identical between the two calls. | |||
3875 | if (CI->hasOperandBundles() && | |||
3876 | !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), | |||
3877 | CI->op_begin() + CI->getBundleOperandsEndIndex(), | |||
3878 | CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { | |||
3879 | BS.cancelScheduling(VL, VL0); | |||
3880 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, | |||
3881 | ReuseShuffleIndicies); | |||
3882 | LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!=" << *V << '\n'; } } while (false) | |||
3883 | << *CI << "!=" << *V << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!=" << *V << '\n'; } } while (false); | |||
3884 | return; | |||
3885 | } | |||
3886 | } | |||
3887 | ||||
3888 | TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, | |||
3889 | ReuseShuffleIndicies); | |||
3890 | TE->setOperandsInOrder(); | |||
3891 | for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { | |||
3892 | ValueList Operands; | |||
3893 | // Prepare the operand vector. | |||
3894 | for (Value *V : VL) { | |||
3895 | auto *CI2 = cast<CallInst>(V); | |||
3896 | Operands.push_back(CI2->getArgOperand(i)); | |||
3897 | } | |||
3898 | buildTree_rec(Operands, Depth + 1, {TE, i}); | |||
3899 | } | |||
3900 | return; | |||
3901 | } | |||
3902 | case Instruction::ShuffleVector: { | |||
3903 | // If this is not an alternate sequence of opcode like add-sub | |||
3904 | // then do not vectorize this instruction. | |||
3905 | if (!S.isAltShuffle()) { | |||
3906 | BS.cancelScheduling(VL, VL0); | |||
3907 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, | |||
3908 | ReuseShuffleIndicies); | |||
3909 | LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: ShuffleVector are not vectorized.\n" ; } } while (false); | |||
3910 | return; | |||
3911 | } | |||
3912 | TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, | |||
3913 | ReuseShuffleIndicies); | |||
3914 | LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: added a ShuffleVector op.\n" ; } } while (false); | |||
3915 | ||||
3916 | // Reorder operands if reordering would enable vectorization. | |||
3917 | if (isa<BinaryOperator>(VL0)) { | |||
3918 | ValueList Left, Right; | |||
3919 | reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); | |||
3920 | TE->setOperand(0, Left); | |||
3921 | TE->setOperand(1, Right); | |||
3922 | buildTree_rec(Left, Depth + 1, {TE, 0}); | |||
3923 | buildTree_rec(Right, Depth + 1, {TE, 1}); | |||
3924 | return; | |||
3925 | } | |||
3926 | ||||
3927 | TE->setOperandsInOrder(); | |||
3928 | for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { | |||
3929 | ValueList Operands; | |||
3930 | // Prepare the operand vector. | |||
3931 | for (Value *V : VL) | |||
3932 | Operands.push_back(cast<Instruction>(V)->getOperand(i)); | |||
3933 | ||||
3934 | buildTree_rec(Operands, Depth + 1, {TE, i}); | |||
3935 | } | |||
3936 | return; | |||
3937 | } | |||
3938 | default: | |||
3939 | BS.cancelScheduling(VL, VL0); | |||
3940 | newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, | |||
3941 | ReuseShuffleIndicies); | |||
3942 | LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Gathering unknown instruction.\n" ; } } while (false); | |||
3943 | return; | |||
3944 | } | |||
3945 | } | |||
3946 | ||||
3947 | unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const { | |||
3948 | unsigned N = 1; | |||
3949 | Type *EltTy = T; | |||
3950 | ||||
3951 | while (isa<StructType>(EltTy) || isa<ArrayType>(EltTy) || | |||
3952 | isa<VectorType>(EltTy)) { | |||
3953 | if (auto *ST = dyn_cast<StructType>(EltTy)) { | |||
3954 | // Check that struct is homogeneous. | |||
3955 | for (const auto *Ty : ST->elements()) | |||
3956 | if (Ty != *ST->element_begin()) | |||
3957 | return 0; | |||
3958 | N *= ST->getNumElements(); | |||
3959 | EltTy = *ST->element_begin(); | |||
3960 | } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) { | |||
3961 | N *= AT->getNumElements(); | |||
3962 | EltTy = AT->getElementType(); | |||
3963 | } else { | |||
3964 | auto *VT = cast<FixedVectorType>(EltTy); | |||
3965 | N *= VT->getNumElements(); | |||
3966 | EltTy = VT->getElementType(); | |||
3967 | } | |||
3968 | } | |||
3969 | ||||
3970 | if (!isValidElementType(EltTy)) | |||
3971 | return 0; | |||
3972 | uint64_t VTSize = DL.getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N)); | |||
3973 | if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T)) | |||
3974 | return 0; | |||
3975 | return N; | |||
3976 | } | |||
3977 | ||||
3978 | bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, | |||
3979 | SmallVectorImpl<unsigned> &CurrentOrder) const { | |||
3980 | Instruction *E0 = cast<Instruction>(OpValue); | |||
3981 | assert(E0->getOpcode() == Instruction::ExtractElement ||(static_cast <bool> (E0->getOpcode() == Instruction:: ExtractElement || E0->getOpcode() == Instruction::ExtractValue ) ? void (0) : __assert_fail ("E0->getOpcode() == Instruction::ExtractElement || E0->getOpcode() == Instruction::ExtractValue" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3982, __extension__ __PRETTY_FUNCTION__)) | |||
3982 | E0->getOpcode() == Instruction::ExtractValue)(static_cast <bool> (E0->getOpcode() == Instruction:: ExtractElement || E0->getOpcode() == Instruction::ExtractValue ) ? void (0) : __assert_fail ("E0->getOpcode() == Instruction::ExtractElement || E0->getOpcode() == Instruction::ExtractValue" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3982, __extension__ __PRETTY_FUNCTION__)); | |||
3983 | assert(E0->getOpcode() == getSameOpcode(VL).getOpcode() && "Invalid opcode")(static_cast <bool> (E0->getOpcode() == getSameOpcode (VL).getOpcode() && "Invalid opcode") ? void (0) : __assert_fail ("E0->getOpcode() == getSameOpcode(VL).getOpcode() && \"Invalid opcode\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 3983, __extension__ __PRETTY_FUNCTION__)); | |||
3984 | // Check if all of the extracts come from the same vector and from the | |||
3985 | // correct offset. | |||
3986 | Value *Vec = E0->getOperand(0); | |||
3987 | ||||
3988 | CurrentOrder.clear(); | |||
3989 | ||||
3990 | // We have to extract from a vector/aggregate with the same number of elements. | |||
3991 | unsigned NElts; | |||
3992 | if (E0->getOpcode() == Instruction::ExtractValue) { | |||
3993 | const DataLayout &DL = E0->getModule()->getDataLayout(); | |||
3994 | NElts = canMapToVector(Vec->getType(), DL); | |||
3995 | if (!NElts) | |||
3996 | return false; | |||
3997 | // Check if load can be rewritten as load of vector. | |||
3998 | LoadInst *LI = dyn_cast<LoadInst>(Vec); | |||
3999 | if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) | |||
4000 | return false; | |||
4001 | } else { | |||
4002 | NElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); | |||
4003 | } | |||
4004 | ||||
4005 | if (NElts != VL.size()) | |||
4006 | return false; | |||
4007 | ||||
4008 | // Check that all of the indices extract from the correct offset. | |||
4009 | bool ShouldKeepOrder = true; | |||
4010 | unsigned E = VL.size(); | |||
4011 | // Assign to all items the initial value E + 1 so we can check if the extract | |||
4012 | // instruction index was used already. | |||
4013 | // Also, later we can check that all the indices are used and we have a | |||
4014 | // consecutive access in the extract instructions, by checking that no | |||
4015 | // element of CurrentOrder still has value E + 1. | |||
4016 | CurrentOrder.assign(E, E + 1); | |||
4017 | unsigned I = 0; | |||
4018 | for (; I < E; ++I) { | |||
4019 | auto *Inst = cast<Instruction>(VL[I]); | |||
4020 | if (Inst->getOperand(0) != Vec) | |||
4021 | break; | |||
4022 | Optional<unsigned> Idx = getExtractIndex(Inst); | |||
4023 | if (!Idx) | |||
4024 | break; | |||
4025 | const unsigned ExtIdx = *Idx; | |||
4026 | if (ExtIdx != I) { | |||
4027 | if (ExtIdx >= E || CurrentOrder[ExtIdx] != E + 1) | |||
4028 | break; | |||
4029 | ShouldKeepOrder = false; | |||
4030 | CurrentOrder[ExtIdx] = I; | |||
4031 | } else { | |||
4032 | if (CurrentOrder[I] != E + 1) | |||
4033 | break; | |||
4034 | CurrentOrder[I] = I; | |||
4035 | } | |||
4036 | } | |||
4037 | if (I < E) { | |||
4038 | CurrentOrder.clear(); | |||
4039 | return false; | |||
4040 | } | |||
4041 | ||||
4042 | return ShouldKeepOrder; | |||
4043 | } | |||
4044 | ||||
4045 | bool BoUpSLP::areAllUsersVectorized(Instruction *I, | |||
4046 | ArrayRef<Value *> VectorizedVals) const { | |||
4047 | return (I->hasOneUse() && is_contained(VectorizedVals, I)) || | |||
4048 | llvm::all_of(I->users(), [this](User *U) { | |||
4049 | return ScalarToTreeEntry.count(U) > 0; | |||
4050 | }); | |||
4051 | } | |||
4052 | ||||
4053 | static std::pair<InstructionCost, InstructionCost> | |||
4054 | getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy, | |||
4055 | TargetTransformInfo *TTI, TargetLibraryInfo *TLI) { | |||
4056 | Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); | |||
4057 | ||||
4058 | // Calculate the cost of the scalar and vector calls. | |||
4059 | SmallVector<Type *, 4> VecTys; | |||
4060 | for (Use &Arg : CI->args()) | |||
4061 | VecTys.push_back( | |||
4062 | FixedVectorType::get(Arg->getType(), VecTy->getNumElements())); | |||
4063 | FastMathFlags FMF; | |||
4064 | if (auto *FPCI = dyn_cast<FPMathOperator>(CI)) | |||
4065 | FMF = FPCI->getFastMathFlags(); | |||
4066 | SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end()); | |||
4067 | IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, VecTys, FMF, | |||
4068 | dyn_cast<IntrinsicInst>(CI)); | |||
4069 | auto IntrinsicCost = | |||
4070 | TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput); | |||
4071 | ||||
4072 | auto Shape = VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>( | |||
4073 | VecTy->getNumElements())), | |||
4074 | false /*HasGlobalPred*/); | |||
4075 | Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); | |||
4076 | auto LibCost = IntrinsicCost; | |||
4077 | if (!CI->isNoBuiltin() && VecFunc) { | |||
4078 | // Calculate the cost of the vector library call. | |||
4079 | // If the corresponding vector call is cheaper, return its cost. | |||
4080 | LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys, | |||
4081 | TTI::TCK_RecipThroughput); | |||
4082 | } | |||
4083 | return {IntrinsicCost, LibCost}; | |||
4084 | } | |||
4085 | ||||
4086 | /// Compute the cost of creating a vector of type \p VecTy containing the | |||
4087 | /// extracted values from \p VL. | |||
4088 | static InstructionCost | |||
4089 | computeExtractCost(ArrayRef<Value *> VL, FixedVectorType *VecTy, | |||
4090 | TargetTransformInfo::ShuffleKind ShuffleKind, | |||
4091 | ArrayRef<int> Mask, TargetTransformInfo &TTI) { | |||
4092 | unsigned NumOfParts = TTI.getNumberOfParts(VecTy); | |||
4093 | ||||
4094 | if (ShuffleKind != TargetTransformInfo::SK_PermuteSingleSrc || !NumOfParts || | |||
4095 | VecTy->getNumElements() < NumOfParts) | |||
4096 | return TTI.getShuffleCost(ShuffleKind, VecTy, Mask); | |||
4097 | ||||
4098 | bool AllConsecutive = true; | |||
4099 | unsigned EltsPerVector = VecTy->getNumElements() / NumOfParts; | |||
4100 | unsigned Idx = -1; | |||
4101 | InstructionCost Cost = 0; | |||
4102 | ||||
4103 | // Process extracts in blocks of EltsPerVector to check if the source vector | |||
4104 | // operand can be re-used directly. If not, add the cost of creating a shuffle | |||
4105 | // to extract the values into a vector register. | |||
4106 | for (auto *V : VL) { | |||
4107 | ++Idx; | |||
4108 | ||||
4109 | // Reached the start of a new vector registers. | |||
4110 | if (Idx % EltsPerVector == 0) { | |||
4111 | AllConsecutive = true; | |||
4112 | continue; | |||
4113 | } | |||
4114 | ||||
4115 | // Check all extracts for a vector register on the target directly | |||
4116 | // extract values in order. | |||
4117 | unsigned CurrentIdx = *getExtractIndex(cast<Instruction>(V)); | |||
4118 | unsigned PrevIdx = *getExtractIndex(cast<Instruction>(VL[Idx - 1])); | |||
4119 | AllConsecutive &= PrevIdx + 1 == CurrentIdx && | |||
4120 | CurrentIdx % EltsPerVector == Idx % EltsPerVector; | |||
4121 | ||||
4122 | if (AllConsecutive) | |||
4123 | continue; | |||
4124 | ||||
4125 | // Skip all indices, except for the last index per vector block. | |||
4126 | if ((Idx + 1) % EltsPerVector != 0 && Idx + 1 != VL.size()) | |||
4127 | continue; | |||
4128 | ||||
4129 | // If we have a series of extracts which are not consecutive and hence | |||
4130 | // cannot re-use the source vector register directly, compute the shuffle | |||
4131 | // cost to extract the a vector with EltsPerVector elements. | |||
4132 | Cost += TTI.getShuffleCost( | |||
4133 | TargetTransformInfo::SK_PermuteSingleSrc, | |||
4134 | FixedVectorType::get(VecTy->getElementType(), EltsPerVector)); | |||
4135 | } | |||
4136 | return Cost; | |||
4137 | } | |||
4138 | ||||
4139 | /// Build shuffle mask for shuffle graph entries and lists of main and alternate | |||
4140 | /// operations operands. | |||
4141 | static void | |||
4142 | buildSuffleEntryMask(ArrayRef<Value *> VL, ArrayRef<unsigned> ReorderIndices, | |||
4143 | ArrayRef<int> ReusesIndices, | |||
4144 | const function_ref<bool(Instruction *)> IsAltOp, | |||
4145 | SmallVectorImpl<int> &Mask, | |||
4146 | SmallVectorImpl<Value *> *OpScalars = nullptr, | |||
4147 | SmallVectorImpl<Value *> *AltScalars = nullptr) { | |||
4148 | unsigned Sz = VL.size(); | |||
4149 | Mask.assign(Sz, UndefMaskElem); | |||
4150 | SmallVector<int> OrderMask; | |||
4151 | if (!ReorderIndices.empty()) | |||
4152 | inversePermutation(ReorderIndices, OrderMask); | |||
4153 | for (unsigned I = 0; I < Sz; ++I) { | |||
4154 | unsigned Idx = I; | |||
4155 | if (!ReorderIndices.empty()) | |||
4156 | Idx = OrderMask[I]; | |||
4157 | auto *OpInst = cast<Instruction>(VL[I]); | |||
4158 | if (IsAltOp(OpInst)) { | |||
4159 | Mask[Idx] = Sz + I; | |||
4160 | if (AltScalars) | |||
4161 | AltScalars->push_back(OpInst); | |||
4162 | } else { | |||
4163 | Mask[Idx] = I; | |||
4164 | if (OpScalars) | |||
4165 | OpScalars->push_back(OpInst); | |||
4166 | } | |||
4167 | } | |||
4168 | if (!ReusesIndices.empty()) { | |||
4169 | SmallVector<int> NewMask(ReusesIndices.size(), UndefMaskElem); | |||
4170 | transform(ReusesIndices, NewMask.begin(), [&Mask](int Idx) { | |||
4171 | return Idx != UndefMaskElem ? Mask[Idx] : UndefMaskElem; | |||
4172 | }); | |||
4173 | Mask.swap(NewMask); | |||
4174 | } | |||
4175 | } | |||
4176 | ||||
4177 | InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E, | |||
4178 | ArrayRef<Value *> VectorizedVals) { | |||
4179 | ArrayRef<Value*> VL = E->Scalars; | |||
4180 | ||||
4181 | Type *ScalarTy = VL[0]->getType(); | |||
4182 | if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) | |||
4183 | ScalarTy = SI->getValueOperand()->getType(); | |||
4184 | else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0])) | |||
4185 | ScalarTy = CI->getOperand(0)->getType(); | |||
4186 | else if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) | |||
4187 | ScalarTy = IE->getOperand(1)->getType(); | |||
4188 | auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); | |||
4189 | TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; | |||
4190 | ||||
4191 | // If we have computed a smaller type for the expression, update VecTy so | |||
4192 | // that the costs will be accurate. | |||
4193 | if (MinBWs.count(VL[0])) | |||
4194 | VecTy = FixedVectorType::get( | |||
4195 | IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size()); | |||
4196 | auto *FinalVecTy = VecTy; | |||
4197 | ||||
4198 | unsigned ReuseShuffleNumbers = E->ReuseShuffleIndices.size(); | |||
4199 | bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); | |||
4200 | if (NeedToShuffleReuses) | |||
4201 | FinalVecTy = | |||
4202 | FixedVectorType::get(VecTy->getElementType(), ReuseShuffleNumbers); | |||
4203 | // FIXME: it tries to fix a problem with MSVC buildbots. | |||
4204 | TargetTransformInfo &TTIRef = *TTI; | |||
4205 | auto &&AdjustExtractsCost = [this, &TTIRef, CostKind, VL, VecTy, | |||
4206 | VectorizedVals](InstructionCost &Cost, | |||
4207 | bool IsGather) { | |||
4208 | DenseMap<Value *, int> ExtractVectorsTys; | |||
4209 | for (auto *V : VL) { | |||
4210 | // If all users of instruction are going to be vectorized and this | |||
4211 | // instruction itself is not going to be vectorized, consider this | |||
4212 | // instruction as dead and remove its cost from the final cost of the | |||
4213 | // vectorized tree. | |||
4214 | if (!areAllUsersVectorized(cast<Instruction>(V), VectorizedVals) || | |||
4215 | (IsGather && ScalarToTreeEntry.count(V))) | |||
4216 | continue; | |||
4217 | auto *EE = cast<ExtractElementInst>(V); | |||
4218 | unsigned Idx = *getExtractIndex(EE); | |||
4219 | if (TTIRef.getNumberOfParts(VecTy) != | |||
4220 | TTIRef.getNumberOfParts(EE->getVectorOperandType())) { | |||
4221 | auto It = | |||
4222 | ExtractVectorsTys.try_emplace(EE->getVectorOperand(), Idx).first; | |||
4223 | It->getSecond() = std::min<int>(It->second, Idx); | |||
4224 | } | |||
4225 | // Take credit for instruction that will become dead. | |||
4226 | if (EE->hasOneUse()) { | |||
4227 | Instruction *Ext = EE->user_back(); | |||
4228 | if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && | |||
4229 | all_of(Ext->users(), | |||
4230 | [](User *U) { return isa<GetElementPtrInst>(U); })) { | |||
4231 | // Use getExtractWithExtendCost() to calculate the cost of | |||
4232 | // extractelement/ext pair. | |||
4233 | Cost -= | |||
4234 | TTIRef.getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(), | |||
4235 | EE->getVectorOperandType(), Idx); | |||
4236 | // Add back the cost of s|zext which is subtracted separately. | |||
4237 | Cost += TTIRef.getCastInstrCost( | |||
4238 | Ext->getOpcode(), Ext->getType(), EE->getType(), | |||
4239 | TTI::getCastContextHint(Ext), CostKind, Ext); | |||
4240 | continue; | |||
4241 | } | |||
4242 | } | |||
4243 | Cost -= TTIRef.getVectorInstrCost(Instruction::ExtractElement, | |||
4244 | EE->getVectorOperandType(), Idx); | |||
4245 | } | |||
4246 | // Add a cost for subvector extracts/inserts if required. | |||
4247 | for (const auto &Data : ExtractVectorsTys) { | |||
4248 | auto *EEVTy = cast<FixedVectorType>(Data.first->getType()); | |||
4249 | unsigned NumElts = VecTy->getNumElements(); | |||
4250 | if (TTIRef.getNumberOfParts(EEVTy) > TTIRef.getNumberOfParts(VecTy)) { | |||
4251 | unsigned Idx = (Data.second / NumElts) * NumElts; | |||
4252 | unsigned EENumElts = EEVTy->getNumElements(); | |||
4253 | if (Idx + NumElts <= EENumElts) { | |||
4254 | Cost += | |||
4255 | TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, | |||
4256 | EEVTy, None, Idx, VecTy); | |||
4257 | } else { | |||
4258 | // Need to round up the subvector type vectorization factor to avoid a | |||
4259 | // crash in cost model functions. Make SubVT so that Idx + VF of SubVT | |||
4260 | // <= EENumElts. | |||
4261 | auto *SubVT = | |||
4262 | FixedVectorType::get(VecTy->getElementType(), EENumElts - Idx); | |||
4263 | Cost += | |||
4264 | TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, | |||
4265 | EEVTy, None, Idx, SubVT); | |||
4266 | } | |||
4267 | } else { | |||
4268 | Cost += TTIRef.getShuffleCost(TargetTransformInfo::SK_InsertSubvector, | |||
4269 | VecTy, None, 0, EEVTy); | |||
4270 | } | |||
4271 | } | |||
4272 | }; | |||
4273 | if (E->State == TreeEntry::NeedToGather) { | |||
4274 | if (allConstant(VL)) | |||
4275 | return 0; | |||
4276 | if (isa<InsertElementInst>(VL[0])) | |||
4277 | return InstructionCost::getInvalid(); | |||
4278 | SmallVector<int> Mask; | |||
4279 | SmallVector<const TreeEntry *> Entries; | |||
4280 | Optional<TargetTransformInfo::ShuffleKind> Shuffle = | |||
4281 | isGatherShuffledEntry(E, Mask, Entries); | |||
4282 | if (Shuffle.hasValue()) { | |||
4283 | InstructionCost GatherCost = 0; | |||
4284 | if (ShuffleVectorInst::isIdentityMask(Mask)) { | |||
4285 | // Perfect match in the graph, will reuse the previously vectorized | |||
4286 | // node. Cost is 0. | |||
4287 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: perfect diamond match for gather bundle that starts with " << *VL.front() << ".\n"; } } while (false) | |||
4288 | dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: perfect diamond match for gather bundle that starts with " << *VL.front() << ".\n"; } } while (false) | |||
4289 | << "SLP: perfect diamond match for gather bundle that starts with "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: perfect diamond match for gather bundle that starts with " << *VL.front() << ".\n"; } } while (false) | |||
4290 | << *VL.front() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: perfect diamond match for gather bundle that starts with " << *VL.front() << ".\n"; } } while (false); | |||
4291 | if (NeedToShuffleReuses) | |||
4292 | GatherCost = | |||
4293 | TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, | |||
4294 | FinalVecTy, E->ReuseShuffleIndices); | |||
4295 | } else { | |||
4296 | LLVM_DEBUG(dbgs() << "SLP: shuffled " << Entries.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: shuffled " << Entries. size() << " entries for bundle that starts with " << *VL.front() << ".\n"; } } while (false) | |||
4297 | << " entries for bundle that starts with "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: shuffled " << Entries. size() << " entries for bundle that starts with " << *VL.front() << ".\n"; } } while (false) | |||
4298 | << *VL.front() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: shuffled " << Entries. size() << " entries for bundle that starts with " << *VL.front() << ".\n"; } } while (false); | |||
4299 | // Detected that instead of gather we can emit a shuffle of single/two | |||
4300 | // previously vectorized nodes. Add the cost of the permutation rather | |||
4301 | // than gather. | |||
4302 | ::addMask(Mask, E->ReuseShuffleIndices); | |||
4303 | GatherCost = TTI->getShuffleCost(*Shuffle, FinalVecTy, Mask); | |||
4304 | } | |||
4305 | return GatherCost; | |||
4306 | } | |||
4307 | if (isSplat(VL)) { | |||
4308 | // Found the broadcasting of the single scalar, calculate the cost as the | |||
4309 | // broadcast. | |||
4310 | return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy); | |||
4311 | } | |||
4312 | if (E->getOpcode() == Instruction::ExtractElement && allSameType(VL) && | |||
4313 | allSameBlock(VL) && | |||
4314 | !isa<ScalableVectorType>( | |||
4315 | cast<ExtractElementInst>(E->getMainOp())->getVectorOperandType())) { | |||
4316 | // Check that gather of extractelements can be represented as just a | |||
4317 | // shuffle of a single/two vectors the scalars are extracted from. | |||
4318 | SmallVector<int> Mask; | |||
4319 | Optional<TargetTransformInfo::ShuffleKind> ShuffleKind = | |||
4320 | isShuffle(VL, Mask); | |||
4321 | if (ShuffleKind.hasValue()) { | |||
4322 | // Found the bunch of extractelement instructions that must be gathered | |||
4323 | // into a vector and can be represented as a permutation elements in a | |||
4324 | // single input vector or of 2 input vectors. | |||
4325 | InstructionCost Cost = | |||
4326 | computeExtractCost(VL, VecTy, *ShuffleKind, Mask, *TTI); | |||
4327 | AdjustExtractsCost(Cost, /*IsGather=*/true); | |||
4328 | if (NeedToShuffleReuses) | |||
4329 | Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, | |||
4330 | FinalVecTy, E->ReuseShuffleIndices); | |||
4331 | return Cost; | |||
4332 | } | |||
4333 | } | |||
4334 | InstructionCost ReuseShuffleCost = 0; | |||
4335 | if (NeedToShuffleReuses) | |||
4336 | ReuseShuffleCost = TTI->getShuffleCost( | |||
4337 | TTI::SK_PermuteSingleSrc, FinalVecTy, E->ReuseShuffleIndices); | |||
4338 | // Improve gather cost for gather of loads, if we can group some of the | |||
4339 | // loads into vector loads. | |||
4340 | if (VL.size() > 2 && E->getOpcode() == Instruction::Load && | |||
4341 | !E->isAltShuffle()) { | |||
4342 | BoUpSLP::ValueSet VectorizedLoads; | |||
4343 | unsigned StartIdx = 0; | |||
4344 | unsigned VF = VL.size() / 2; | |||
4345 | unsigned VectorizedCnt = 0; | |||
4346 | unsigned ScatterVectorizeCnt = 0; | |||
4347 | const unsigned Sz = DL->getTypeSizeInBits(E->getMainOp()->getType()); | |||
4348 | for (unsigned MinVF = getMinVF(2 * Sz); VF >= MinVF; VF /= 2) { | |||
4349 | for (unsigned Cnt = StartIdx, End = VL.size(); Cnt + VF <= End; | |||
4350 | Cnt += VF) { | |||
4351 | ArrayRef<Value *> Slice = VL.slice(Cnt, VF); | |||
4352 | if (!VectorizedLoads.count(Slice.front()) && | |||
4353 | !VectorizedLoads.count(Slice.back()) && allSameBlock(Slice)) { | |||
4354 | SmallVector<Value *> PointerOps; | |||
4355 | OrdersType CurrentOrder; | |||
4356 | LoadsState LS = canVectorizeLoads(Slice, Slice.front(), *TTI, *DL, | |||
4357 | *SE, CurrentOrder, PointerOps); | |||
4358 | switch (LS) { | |||
4359 | case LoadsState::Vectorize: | |||
4360 | case LoadsState::ScatterVectorize: | |||
4361 | // Mark the vectorized loads so that we don't vectorize them | |||
4362 | // again. | |||
4363 | if (LS == LoadsState::Vectorize) | |||
4364 | ++VectorizedCnt; | |||
4365 | else | |||
4366 | ++ScatterVectorizeCnt; | |||
4367 | VectorizedLoads.insert(Slice.begin(), Slice.end()); | |||
4368 | // If we vectorized initial block, no need to try to vectorize it | |||
4369 | // again. | |||
4370 | if (Cnt == StartIdx) | |||
4371 | StartIdx += VF; | |||
4372 | break; | |||
4373 | case LoadsState::Gather: | |||
4374 | break; | |||
4375 | } | |||
4376 | } | |||
4377 | } | |||
4378 | // Check if the whole array was vectorized already - exit. | |||
4379 | if (StartIdx >= VL.size()) | |||
4380 | break; | |||
4381 | // Found vectorizable parts - exit. | |||
4382 | if (!VectorizedLoads.empty()) | |||
4383 | break; | |||
4384 | } | |||
4385 | if (!VectorizedLoads.empty()) { | |||
4386 | InstructionCost GatherCost = 0; | |||
4387 | // Get the cost for gathered loads. | |||
4388 | for (unsigned I = 0, End = VL.size(); I < End; I += VF) { | |||
4389 | if (VectorizedLoads.contains(VL[I])) | |||
4390 | continue; | |||
4391 | GatherCost += getGatherCost(VL.slice(I, VF)); | |||
4392 | } | |||
4393 | // The cost for vectorized loads. | |||
4394 | InstructionCost ScalarsCost = 0; | |||
4395 | for (Value *V : VectorizedLoads) { | |||
4396 | auto *LI = cast<LoadInst>(V); | |||
4397 | ScalarsCost += TTI->getMemoryOpCost( | |||
4398 | Instruction::Load, LI->getType(), LI->getAlign(), | |||
4399 | LI->getPointerAddressSpace(), CostKind, LI); | |||
4400 | } | |||
4401 | auto *LI = cast<LoadInst>(E->getMainOp()); | |||
4402 | auto *LoadTy = FixedVectorType::get(LI->getType(), VF); | |||
4403 | Align Alignment = LI->getAlign(); | |||
4404 | GatherCost += | |||
4405 | VectorizedCnt * | |||
4406 | TTI->getMemoryOpCost(Instruction::Load, LoadTy, Alignment, | |||
4407 | LI->getPointerAddressSpace(), CostKind, LI); | |||
4408 | GatherCost += ScatterVectorizeCnt * | |||
4409 | TTI->getGatherScatterOpCost( | |||
4410 | Instruction::Load, LoadTy, LI->getPointerOperand(), | |||
4411 | /*VariableMask=*/false, Alignment, CostKind, LI); | |||
4412 | // Add the cost for the subvectors shuffling. | |||
4413 | GatherCost += ((VL.size() - VF) / VF) * | |||
4414 | TTI->getShuffleCost(TTI::SK_Select, VecTy); | |||
4415 | return ReuseShuffleCost + GatherCost - ScalarsCost; | |||
4416 | } | |||
4417 | } | |||
4418 | return ReuseShuffleCost + getGatherCost(VL); | |||
4419 | } | |||
4420 | InstructionCost CommonCost = 0; | |||
4421 | SmallVector<int> Mask; | |||
4422 | if (!E->ReorderIndices.empty()) { | |||
4423 | SmallVector<int> NewMask; | |||
4424 | if (E->getOpcode() == Instruction::Store) { | |||
4425 | // For stores the order is actually a mask. | |||
4426 | NewMask.resize(E->ReorderIndices.size()); | |||
4427 | copy(E->ReorderIndices, NewMask.begin()); | |||
4428 | } else { | |||
4429 | inversePermutation(E->ReorderIndices, NewMask); | |||
4430 | } | |||
4431 | ::addMask(Mask, NewMask); | |||
4432 | } | |||
4433 | if (NeedToShuffleReuses) | |||
4434 | ::addMask(Mask, E->ReuseShuffleIndices); | |||
4435 | if (!Mask.empty() && !ShuffleVectorInst::isIdentityMask(Mask)) | |||
4436 | CommonCost = | |||
4437 | TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FinalVecTy, Mask); | |||
4438 | assert((E->State == TreeEntry::Vectorize ||(static_cast <bool> ((E->State == TreeEntry::Vectorize || E->State == TreeEntry::ScatterVectorize) && "Unhandled state" ) ? void (0) : __assert_fail ("(E->State == TreeEntry::Vectorize || E->State == TreeEntry::ScatterVectorize) && \"Unhandled state\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4440, __extension__ __PRETTY_FUNCTION__)) | |||
4439 | E->State == TreeEntry::ScatterVectorize) &&(static_cast <bool> ((E->State == TreeEntry::Vectorize || E->State == TreeEntry::ScatterVectorize) && "Unhandled state" ) ? void (0) : __assert_fail ("(E->State == TreeEntry::Vectorize || E->State == TreeEntry::ScatterVectorize) && \"Unhandled state\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4440, __extension__ __PRETTY_FUNCTION__)) | |||
4440 | "Unhandled state")(static_cast <bool> ((E->State == TreeEntry::Vectorize || E->State == TreeEntry::ScatterVectorize) && "Unhandled state" ) ? void (0) : __assert_fail ("(E->State == TreeEntry::Vectorize || E->State == TreeEntry::ScatterVectorize) && \"Unhandled state\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4440, __extension__ __PRETTY_FUNCTION__)); | |||
4441 | assert(E->getOpcode() && allSameType(VL) && allSameBlock(VL) && "Invalid VL")(static_cast <bool> (E->getOpcode() && allSameType (VL) && allSameBlock(VL) && "Invalid VL") ? void (0) : __assert_fail ("E->getOpcode() && allSameType(VL) && allSameBlock(VL) && \"Invalid VL\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4441, __extension__ __PRETTY_FUNCTION__)); | |||
4442 | Instruction *VL0 = E->getMainOp(); | |||
4443 | unsigned ShuffleOrOp = | |||
4444 | E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); | |||
4445 | switch (ShuffleOrOp) { | |||
4446 | case Instruction::PHI: | |||
4447 | return 0; | |||
4448 | ||||
4449 | case Instruction::ExtractValue: | |||
4450 | case Instruction::ExtractElement: { | |||
4451 | // The common cost of removal ExtractElement/ExtractValue instructions + | |||
4452 | // the cost of shuffles, if required to resuffle the original vector. | |||
4453 | if (NeedToShuffleReuses) { | |||
4454 | unsigned Idx = 0; | |||
4455 | for (unsigned I : E->ReuseShuffleIndices) { | |||
4456 | if (ShuffleOrOp == Instruction::ExtractElement) { | |||
4457 | auto *EE = cast<ExtractElementInst>(VL[I]); | |||
4458 | CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement, | |||
4459 | EE->getVectorOperandType(), | |||
4460 | *getExtractIndex(EE)); | |||
4461 | } else { | |||
4462 | CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement, | |||
4463 | VecTy, Idx); | |||
4464 | ++Idx; | |||
4465 | } | |||
4466 | } | |||
4467 | Idx = ReuseShuffleNumbers; | |||
4468 | for (Value *V : VL) { | |||
4469 | if (ShuffleOrOp == Instruction::ExtractElement) { | |||
4470 | auto *EE = cast<ExtractElementInst>(V); | |||
4471 | CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement, | |||
4472 | EE->getVectorOperandType(), | |||
4473 | *getExtractIndex(EE)); | |||
4474 | } else { | |||
4475 | --Idx; | |||
4476 | CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement, | |||
4477 | VecTy, Idx); | |||
4478 | } | |||
4479 | } | |||
4480 | } | |||
4481 | if (ShuffleOrOp == Instruction::ExtractValue) { | |||
4482 | for (unsigned I = 0, E = VL.size(); I < E; ++I) { | |||
4483 | auto *EI = cast<Instruction>(VL[I]); | |||
4484 | // Take credit for instruction that will become dead. | |||
4485 | if (EI->hasOneUse()) { | |||
4486 | Instruction *Ext = EI->user_back(); | |||
4487 | if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && | |||
4488 | all_of(Ext->users(), | |||
4489 | [](User *U) { return isa<GetElementPtrInst>(U); })) { | |||
4490 | // Use getExtractWithExtendCost() to calculate the cost of | |||
4491 | // extractelement/ext pair. | |||
4492 | CommonCost -= TTI->getExtractWithExtendCost( | |||
4493 | Ext->getOpcode(), Ext->getType(), VecTy, I); | |||
4494 | // Add back the cost of s|zext which is subtracted separately. | |||
4495 | CommonCost += TTI->getCastInstrCost( | |||
4496 | Ext->getOpcode(), Ext->getType(), EI->getType(), | |||
4497 | TTI::getCastContextHint(Ext), CostKind, Ext); | |||
4498 | continue; | |||
4499 | } | |||
4500 | } | |||
4501 | CommonCost -= | |||
4502 | TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, I); | |||
4503 | } | |||
4504 | } else { | |||
4505 | AdjustExtractsCost(CommonCost, /*IsGather=*/false); | |||
4506 | } | |||
4507 | return CommonCost; | |||
4508 | } | |||
4509 | case Instruction::InsertElement: { | |||
4510 | assert(E->ReuseShuffleIndices.empty() &&(static_cast <bool> (E->ReuseShuffleIndices.empty() && "Unique insertelements only are expected.") ? void (0) : __assert_fail ("E->ReuseShuffleIndices.empty() && \"Unique insertelements only are expected.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4511, __extension__ __PRETTY_FUNCTION__)) | |||
4511 | "Unique insertelements only are expected.")(static_cast <bool> (E->ReuseShuffleIndices.empty() && "Unique insertelements only are expected.") ? void (0) : __assert_fail ("E->ReuseShuffleIndices.empty() && \"Unique insertelements only are expected.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4511, __extension__ __PRETTY_FUNCTION__)); | |||
4512 | auto *SrcVecTy = cast<FixedVectorType>(VL0->getType()); | |||
4513 | ||||
4514 | unsigned const NumElts = SrcVecTy->getNumElements(); | |||
4515 | unsigned const NumScalars = VL.size(); | |||
4516 | APInt DemandedElts = APInt::getNullValue(NumElts); | |||
4517 | // TODO: Add support for Instruction::InsertValue. | |||
4518 | SmallVector<int> Mask; | |||
4519 | if (!E->ReorderIndices.empty()) { | |||
4520 | inversePermutation(E->ReorderIndices, Mask); | |||
4521 | Mask.append(NumElts - NumScalars, UndefMaskElem); | |||
4522 | } else { | |||
4523 | Mask.assign(NumElts, UndefMaskElem); | |||
4524 | std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0); | |||
4525 | } | |||
4526 | unsigned Offset = *getInsertIndex(VL0, 0); | |||
4527 | bool IsIdentity = true; | |||
4528 | SmallVector<int> PrevMask(NumElts, UndefMaskElem); | |||
4529 | Mask.swap(PrevMask); | |||
4530 | for (unsigned I = 0; I < NumScalars; ++I) { | |||
4531 | Optional<int> InsertIdx = getInsertIndex(VL[PrevMask[I]], 0); | |||
4532 | if (!InsertIdx || *InsertIdx == UndefMaskElem) | |||
4533 | continue; | |||
4534 | DemandedElts.setBit(*InsertIdx); | |||
4535 | IsIdentity &= *InsertIdx - Offset == I; | |||
4536 | Mask[*InsertIdx - Offset] = I; | |||
4537 | } | |||
4538 | assert(Offset < NumElts && "Failed to find vector index offset")(static_cast <bool> (Offset < NumElts && "Failed to find vector index offset" ) ? void (0) : __assert_fail ("Offset < NumElts && \"Failed to find vector index offset\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4538, __extension__ __PRETTY_FUNCTION__)); | |||
4539 | ||||
4540 | InstructionCost Cost = 0; | |||
4541 | Cost -= TTI->getScalarizationOverhead(SrcVecTy, DemandedElts, | |||
4542 | /*Insert*/ true, /*Extract*/ false); | |||
4543 | ||||
4544 | if (IsIdentity && NumElts != NumScalars && Offset % NumScalars != 0) { | |||
4545 | // FIXME: Replace with SK_InsertSubvector once it is properly supported. | |||
4546 | unsigned Sz = PowerOf2Ceil(Offset + NumScalars); | |||
4547 | Cost += TTI->getShuffleCost( | |||
4548 | TargetTransformInfo::SK_PermuteSingleSrc, | |||
4549 | FixedVectorType::get(SrcVecTy->getElementType(), Sz)); | |||
4550 | } else if (!IsIdentity) { | |||
4551 | auto *FirstInsert = | |||
4552 | cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { | |||
4553 | return !is_contained(E->Scalars, | |||
4554 | cast<Instruction>(V)->getOperand(0)); | |||
4555 | })); | |||
4556 | if (isa<UndefValue>(FirstInsert->getOperand(0))) { | |||
4557 | Cost += TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, SrcVecTy, Mask); | |||
4558 | } else { | |||
4559 | SmallVector<int> InsertMask(NumElts); | |||
4560 | std::iota(InsertMask.begin(), InsertMask.end(), 0); | |||
4561 | for (unsigned I = 0; I < NumElts; I++) { | |||
4562 | if (Mask[I] != UndefMaskElem) | |||
4563 | InsertMask[Offset + I] = NumElts + I; | |||
4564 | } | |||
4565 | Cost += | |||
4566 | TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVecTy, InsertMask); | |||
4567 | } | |||
4568 | } | |||
4569 | ||||
4570 | return Cost; | |||
4571 | } | |||
4572 | case Instruction::ZExt: | |||
4573 | case Instruction::SExt: | |||
4574 | case Instruction::FPToUI: | |||
4575 | case Instruction::FPToSI: | |||
4576 | case Instruction::FPExt: | |||
4577 | case Instruction::PtrToInt: | |||
4578 | case Instruction::IntToPtr: | |||
4579 | case Instruction::SIToFP: | |||
4580 | case Instruction::UIToFP: | |||
4581 | case Instruction::Trunc: | |||
4582 | case Instruction::FPTrunc: | |||
4583 | case Instruction::BitCast: { | |||
4584 | Type *SrcTy = VL0->getOperand(0)->getType(); | |||
4585 | InstructionCost ScalarEltCost = | |||
4586 | TTI->getCastInstrCost(E->getOpcode(), ScalarTy, SrcTy, | |||
4587 | TTI::getCastContextHint(VL0), CostKind, VL0); | |||
4588 | if (NeedToShuffleReuses) { | |||
4589 | CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; | |||
4590 | } | |||
4591 | ||||
4592 | // Calculate the cost of this instruction. | |||
4593 | InstructionCost ScalarCost = VL.size() * ScalarEltCost; | |||
4594 | ||||
4595 | auto *SrcVecTy = FixedVectorType::get(SrcTy, VL.size()); | |||
4596 | InstructionCost VecCost = 0; | |||
4597 | // Check if the values are candidates to demote. | |||
4598 | if (!MinBWs.count(VL0) || VecTy != SrcVecTy) { | |||
4599 | VecCost = CommonCost + TTI->getCastInstrCost( | |||
4600 | E->getOpcode(), VecTy, SrcVecTy, | |||
4601 | TTI::getCastContextHint(VL0), CostKind, VL0); | |||
4602 | } | |||
4603 | LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dumpTreeCosts(E, CommonCost, VecCost, ScalarCost); } } while (false); | |||
4604 | return VecCost - ScalarCost; | |||
4605 | } | |||
4606 | case Instruction::FCmp: | |||
4607 | case Instruction::ICmp: | |||
4608 | case Instruction::Select: { | |||
4609 | // Calculate the cost of this instruction. | |||
4610 | InstructionCost ScalarEltCost = | |||
4611 | TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, Builder.getInt1Ty(), | |||
4612 | CmpInst::BAD_ICMP_PREDICATE, CostKind, VL0); | |||
4613 | if (NeedToShuffleReuses) { | |||
4614 | CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; | |||
4615 | } | |||
4616 | auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size()); | |||
4617 | InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; | |||
4618 | ||||
4619 | // Check if all entries in VL are either compares or selects with compares | |||
4620 | // as condition that have the same predicates. | |||
4621 | CmpInst::Predicate VecPred = CmpInst::BAD_ICMP_PREDICATE; | |||
4622 | bool First = true; | |||
4623 | for (auto *V : VL) { | |||
4624 | CmpInst::Predicate CurrentPred; | |||
4625 | auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value()); | |||
4626 | if ((!match(V, m_Select(MatchCmp, m_Value(), m_Value())) && | |||
4627 | !match(V, MatchCmp)) || | |||
4628 | (!First && VecPred != CurrentPred)) { | |||
4629 | VecPred = CmpInst::BAD_ICMP_PREDICATE; | |||
4630 | break; | |||
4631 | } | |||
4632 | First = false; | |||
4633 | VecPred = CurrentPred; | |||
4634 | } | |||
4635 | ||||
4636 | InstructionCost VecCost = TTI->getCmpSelInstrCost( | |||
4637 | E->getOpcode(), VecTy, MaskTy, VecPred, CostKind, VL0); | |||
4638 | // Check if it is possible and profitable to use min/max for selects in | |||
4639 | // VL. | |||
4640 | // | |||
4641 | auto IntrinsicAndUse = canConvertToMinOrMaxIntrinsic(VL); | |||
4642 | if (IntrinsicAndUse.first != Intrinsic::not_intrinsic) { | |||
4643 | IntrinsicCostAttributes CostAttrs(IntrinsicAndUse.first, VecTy, | |||
4644 | {VecTy, VecTy}); | |||
4645 | InstructionCost IntrinsicCost = | |||
4646 | TTI->getIntrinsicInstrCost(CostAttrs, CostKind); | |||
4647 | // If the selects are the only uses of the compares, they will be dead | |||
4648 | // and we can adjust the cost by removing their cost. | |||
4649 | if (IntrinsicAndUse.second) | |||
4650 | IntrinsicCost -= | |||
4651 | TTI->getCmpSelInstrCost(Instruction::ICmp, VecTy, MaskTy, | |||
4652 | CmpInst::BAD_ICMP_PREDICATE, CostKind); | |||
4653 | VecCost = std::min(VecCost, IntrinsicCost); | |||
4654 | } | |||
4655 | LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dumpTreeCosts(E, CommonCost, VecCost, ScalarCost); } } while (false); | |||
4656 | return CommonCost + VecCost - ScalarCost; | |||
4657 | } | |||
4658 | case Instruction::FNeg: | |||
4659 | case Instruction::Add: | |||
4660 | case Instruction::FAdd: | |||
4661 | case Instruction::Sub: | |||
4662 | case Instruction::FSub: | |||
4663 | case Instruction::Mul: | |||
4664 | case Instruction::FMul: | |||
4665 | case Instruction::UDiv: | |||
4666 | case Instruction::SDiv: | |||
4667 | case Instruction::FDiv: | |||
4668 | case Instruction::URem: | |||
4669 | case Instruction::SRem: | |||
4670 | case Instruction::FRem: | |||
4671 | case Instruction::Shl: | |||
4672 | case Instruction::LShr: | |||
4673 | case Instruction::AShr: | |||
4674 | case Instruction::And: | |||
4675 | case Instruction::Or: | |||
4676 | case Instruction::Xor: { | |||
4677 | // Certain instructions can be cheaper to vectorize if they have a | |||
4678 | // constant second vector operand. | |||
4679 | TargetTransformInfo::OperandValueKind Op1VK = | |||
4680 | TargetTransformInfo::OK_AnyValue; | |||
4681 | TargetTransformInfo::OperandValueKind Op2VK = | |||
4682 | TargetTransformInfo::OK_UniformConstantValue; | |||
4683 | TargetTransformInfo::OperandValueProperties Op1VP = | |||
4684 | TargetTransformInfo::OP_None; | |||
4685 | TargetTransformInfo::OperandValueProperties Op2VP = | |||
4686 | TargetTransformInfo::OP_PowerOf2; | |||
4687 | ||||
4688 | // If all operands are exactly the same ConstantInt then set the | |||
4689 | // operand kind to OK_UniformConstantValue. | |||
4690 | // If instead not all operands are constants, then set the operand kind | |||
4691 | // to OK_AnyValue. If all operands are constants but not the same, | |||
4692 | // then set the operand kind to OK_NonUniformConstantValue. | |||
4693 | ConstantInt *CInt0 = nullptr; | |||
4694 | for (unsigned i = 0, e = VL.size(); i < e; ++i) { | |||
4695 | const Instruction *I = cast<Instruction>(VL[i]); | |||
4696 | unsigned OpIdx = isa<BinaryOperator>(I) ? 1 : 0; | |||
4697 | ConstantInt *CInt = dyn_cast<ConstantInt>(I->getOperand(OpIdx)); | |||
4698 | if (!CInt) { | |||
4699 | Op2VK = TargetTransformInfo::OK_AnyValue; | |||
4700 | Op2VP = TargetTransformInfo::OP_None; | |||
4701 | break; | |||
4702 | } | |||
4703 | if (Op2VP == TargetTransformInfo::OP_PowerOf2 && | |||
4704 | !CInt->getValue().isPowerOf2()) | |||
4705 | Op2VP = TargetTransformInfo::OP_None; | |||
4706 | if (i == 0) { | |||
4707 | CInt0 = CInt; | |||
4708 | continue; | |||
4709 | } | |||
4710 | if (CInt0 != CInt) | |||
4711 | Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; | |||
4712 | } | |||
4713 | ||||
4714 | SmallVector<const Value *, 4> Operands(VL0->operand_values()); | |||
4715 | InstructionCost ScalarEltCost = | |||
4716 | TTI->getArithmeticInstrCost(E->getOpcode(), ScalarTy, CostKind, Op1VK, | |||
4717 | Op2VK, Op1VP, Op2VP, Operands, VL0); | |||
4718 | if (NeedToShuffleReuses) { | |||
4719 | CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; | |||
4720 | } | |||
4721 | InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; | |||
4722 | InstructionCost VecCost = | |||
4723 | TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind, Op1VK, | |||
4724 | Op2VK, Op1VP, Op2VP, Operands, VL0); | |||
4725 | LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dumpTreeCosts(E, CommonCost, VecCost, ScalarCost); } } while (false); | |||
4726 | return CommonCost + VecCost - ScalarCost; | |||
4727 | } | |||
4728 | case Instruction::GetElementPtr: { | |||
4729 | TargetTransformInfo::OperandValueKind Op1VK = | |||
4730 | TargetTransformInfo::OK_AnyValue; | |||
4731 | TargetTransformInfo::OperandValueKind Op2VK = | |||
4732 | TargetTransformInfo::OK_UniformConstantValue; | |||
4733 | ||||
4734 | InstructionCost ScalarEltCost = TTI->getArithmeticInstrCost( | |||
4735 | Instruction::Add, ScalarTy, CostKind, Op1VK, Op2VK); | |||
4736 | if (NeedToShuffleReuses) { | |||
4737 | CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; | |||
4738 | } | |||
4739 | InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; | |||
4740 | InstructionCost VecCost = TTI->getArithmeticInstrCost( | |||
4741 | Instruction::Add, VecTy, CostKind, Op1VK, Op2VK); | |||
4742 | LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dumpTreeCosts(E, CommonCost, VecCost, ScalarCost); } } while (false); | |||
4743 | return CommonCost + VecCost - ScalarCost; | |||
4744 | } | |||
4745 | case Instruction::Load: { | |||
4746 | // Cost of wide load - cost of scalar loads. | |||
4747 | Align Alignment = cast<LoadInst>(VL0)->getAlign(); | |||
4748 | InstructionCost ScalarEltCost = TTI->getMemoryOpCost( | |||
4749 | Instruction::Load, ScalarTy, Alignment, 0, CostKind, VL0); | |||
4750 | if (NeedToShuffleReuses) { | |||
4751 | CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; | |||
4752 | } | |||
4753 | InstructionCost ScalarLdCost = VecTy->getNumElements() * ScalarEltCost; | |||
4754 | InstructionCost VecLdCost; | |||
4755 | if (E->State == TreeEntry::Vectorize) { | |||
4756 | VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, Alignment, 0, | |||
4757 | CostKind, VL0); | |||
4758 | } else { | |||
4759 | assert(E->State == TreeEntry::ScatterVectorize && "Unknown EntryState")(static_cast <bool> (E->State == TreeEntry::ScatterVectorize && "Unknown EntryState") ? void (0) : __assert_fail ( "E->State == TreeEntry::ScatterVectorize && \"Unknown EntryState\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4759, __extension__ __PRETTY_FUNCTION__)); | |||
4760 | Align CommonAlignment = Alignment; | |||
4761 | for (Value *V : VL) | |||
4762 | CommonAlignment = | |||
4763 | commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); | |||
4764 | VecLdCost = TTI->getGatherScatterOpCost( | |||
4765 | Instruction::Load, VecTy, cast<LoadInst>(VL0)->getPointerOperand(), | |||
4766 | /*VariableMask=*/false, CommonAlignment, CostKind, VL0); | |||
4767 | } | |||
4768 | LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecLdCost, ScalarLdCost))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dumpTreeCosts(E, CommonCost, VecLdCost, ScalarLdCost ); } } while (false); | |||
4769 | return CommonCost + VecLdCost - ScalarLdCost; | |||
4770 | } | |||
4771 | case Instruction::Store: { | |||
4772 | // We know that we can merge the stores. Calculate the cost. | |||
4773 | bool IsReorder = !E->ReorderIndices.empty(); | |||
4774 | auto *SI = | |||
4775 | cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0); | |||
4776 | Align Alignment = SI->getAlign(); | |||
4777 | InstructionCost ScalarEltCost = TTI->getMemoryOpCost( | |||
4778 | Instruction::Store, ScalarTy, Alignment, 0, CostKind, VL0); | |||
4779 | InstructionCost ScalarStCost = VecTy->getNumElements() * ScalarEltCost; | |||
4780 | InstructionCost VecStCost = TTI->getMemoryOpCost( | |||
4781 | Instruction::Store, VecTy, Alignment, 0, CostKind, VL0); | |||
4782 | LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecStCost, ScalarStCost))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dumpTreeCosts(E, CommonCost, VecStCost, ScalarStCost ); } } while (false); | |||
4783 | return CommonCost + VecStCost - ScalarStCost; | |||
4784 | } | |||
4785 | case Instruction::Call: { | |||
4786 | CallInst *CI = cast<CallInst>(VL0); | |||
4787 | Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); | |||
4788 | ||||
4789 | // Calculate the cost of the scalar and vector calls. | |||
4790 | IntrinsicCostAttributes CostAttrs(ID, *CI, 1); | |||
4791 | InstructionCost ScalarEltCost = | |||
4792 | TTI->getIntrinsicInstrCost(CostAttrs, CostKind); | |||
4793 | if (NeedToShuffleReuses) { | |||
4794 | CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; | |||
4795 | } | |||
4796 | InstructionCost ScalarCallCost = VecTy->getNumElements() * ScalarEltCost; | |||
4797 | ||||
4798 | auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); | |||
4799 | InstructionCost VecCallCost = | |||
4800 | std::min(VecCallCosts.first, VecCallCosts.second); | |||
4801 | ||||
4802 | LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCostdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost << " (" << VecCallCost << "-" << ScalarCallCost << ")" << " for " << *CI << "\n"; } } while (false) | |||
4803 | << " (" << VecCallCost << "-" << ScalarCallCost << ")"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost << " (" << VecCallCost << "-" << ScalarCallCost << ")" << " for " << *CI << "\n"; } } while (false) | |||
4804 | << " for " << *CI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost << " (" << VecCallCost << "-" << ScalarCallCost << ")" << " for " << *CI << "\n"; } } while (false); | |||
4805 | ||||
4806 | return CommonCost + VecCallCost - ScalarCallCost; | |||
4807 | } | |||
4808 | case Instruction::ShuffleVector: { | |||
4809 | assert(E->isAltShuffle() &&(static_cast <bool> (E->isAltShuffle() && (( Instruction::isBinaryOp(E->getOpcode()) && Instruction ::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E ->getOpcode()) && Instruction::isCast(E->getAltOpcode ()))) && "Invalid Shuffle Vector Operand") ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4814, __extension__ __PRETTY_FUNCTION__)) | |||
4810 | ((Instruction::isBinaryOp(E->getOpcode()) &&(static_cast <bool> (E->isAltShuffle() && (( Instruction::isBinaryOp(E->getOpcode()) && Instruction ::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E ->getOpcode()) && Instruction::isCast(E->getAltOpcode ()))) && "Invalid Shuffle Vector Operand") ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4814, __extension__ __PRETTY_FUNCTION__)) | |||
4811 | Instruction::isBinaryOp(E->getAltOpcode())) ||(static_cast <bool> (E->isAltShuffle() && (( Instruction::isBinaryOp(E->getOpcode()) && Instruction ::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E ->getOpcode()) && Instruction::isCast(E->getAltOpcode ()))) && "Invalid Shuffle Vector Operand") ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4814, __extension__ __PRETTY_FUNCTION__)) | |||
4812 | (Instruction::isCast(E->getOpcode()) &&(static_cast <bool> (E->isAltShuffle() && (( Instruction::isBinaryOp(E->getOpcode()) && Instruction ::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E ->getOpcode()) && Instruction::isCast(E->getAltOpcode ()))) && "Invalid Shuffle Vector Operand") ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4814, __extension__ __PRETTY_FUNCTION__)) | |||
4813 | Instruction::isCast(E->getAltOpcode()))) &&(static_cast <bool> (E->isAltShuffle() && (( Instruction::isBinaryOp(E->getOpcode()) && Instruction ::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E ->getOpcode()) && Instruction::isCast(E->getAltOpcode ()))) && "Invalid Shuffle Vector Operand") ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4814, __extension__ __PRETTY_FUNCTION__)) | |||
4814 | "Invalid Shuffle Vector Operand")(static_cast <bool> (E->isAltShuffle() && (( Instruction::isBinaryOp(E->getOpcode()) && Instruction ::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E ->getOpcode()) && Instruction::isCast(E->getAltOpcode ()))) && "Invalid Shuffle Vector Operand") ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4814, __extension__ __PRETTY_FUNCTION__)); | |||
4815 | InstructionCost ScalarCost = 0; | |||
4816 | if (NeedToShuffleReuses) { | |||
4817 | for (unsigned Idx : E->ReuseShuffleIndices) { | |||
4818 | Instruction *I = cast<Instruction>(VL[Idx]); | |||
4819 | CommonCost -= TTI->getInstructionCost(I, CostKind); | |||
4820 | } | |||
4821 | for (Value *V : VL) { | |||
4822 | Instruction *I = cast<Instruction>(V); | |||
4823 | CommonCost += TTI->getInstructionCost(I, CostKind); | |||
4824 | } | |||
4825 | } | |||
4826 | for (Value *V : VL) { | |||
4827 | Instruction *I = cast<Instruction>(V); | |||
4828 | assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode")(static_cast <bool> (E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode" ) ? void (0) : __assert_fail ("E->isOpcodeOrAlt(I) && \"Unexpected main/alternate opcode\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4828, __extension__ __PRETTY_FUNCTION__)); | |||
4829 | ScalarCost += TTI->getInstructionCost(I, CostKind); | |||
4830 | } | |||
4831 | // VecCost is equal to sum of the cost of creating 2 vectors | |||
4832 | // and the cost of creating shuffle. | |||
4833 | InstructionCost VecCost = 0; | |||
4834 | if (Instruction::isBinaryOp(E->getOpcode())) { | |||
4835 | VecCost = TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind); | |||
4836 | VecCost += TTI->getArithmeticInstrCost(E->getAltOpcode(), VecTy, | |||
4837 | CostKind); | |||
4838 | } else { | |||
4839 | Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType(); | |||
4840 | Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType(); | |||
4841 | auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size()); | |||
4842 | auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size()); | |||
4843 | VecCost = TTI->getCastInstrCost(E->getOpcode(), VecTy, Src0Ty, | |||
4844 | TTI::CastContextHint::None, CostKind); | |||
4845 | VecCost += TTI->getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty, | |||
4846 | TTI::CastContextHint::None, CostKind); | |||
4847 | } | |||
4848 | ||||
4849 | SmallVector<int> Mask; | |||
4850 | buildSuffleEntryMask( | |||
4851 | E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices, | |||
4852 | [E](Instruction *I) { | |||
4853 | assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode")(static_cast <bool> (E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode" ) ? void (0) : __assert_fail ("E->isOpcodeOrAlt(I) && \"Unexpected main/alternate opcode\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4853, __extension__ __PRETTY_FUNCTION__)); | |||
4854 | return I->getOpcode() == E->getAltOpcode(); | |||
4855 | }, | |||
4856 | Mask); | |||
4857 | CommonCost = | |||
4858 | TTI->getShuffleCost(TargetTransformInfo::SK_Select, FinalVecTy, Mask); | |||
4859 | LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dumpTreeCosts(E, CommonCost, VecCost, ScalarCost); } } while (false); | |||
4860 | return CommonCost + VecCost - ScalarCost; | |||
4861 | } | |||
4862 | default: | |||
4863 | llvm_unreachable("Unknown instruction")::llvm::llvm_unreachable_internal("Unknown instruction", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4863); | |||
4864 | } | |||
4865 | } | |||
4866 | ||||
4867 | bool BoUpSLP::isFullyVectorizableTinyTree() const { | |||
4868 | LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Check whether the tree with height " << VectorizableTree.size() << " is fully vectorizable .\n" ; } } while (false) | |||
4869 | << VectorizableTree.size() << " is fully vectorizable .\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Check whether the tree with height " << VectorizableTree.size() << " is fully vectorizable .\n" ; } } while (false); | |||
4870 | ||||
4871 | // We only handle trees of heights 1 and 2. | |||
4872 | if (VectorizableTree.size() == 1 && | |||
4873 | VectorizableTree[0]->State == TreeEntry::Vectorize) | |||
4874 | return true; | |||
4875 | ||||
4876 | if (VectorizableTree.size() != 2) | |||
4877 | return false; | |||
4878 | ||||
4879 | // Handle splat and all-constants stores. Also try to vectorize tiny trees | |||
4880 | // with the second gather nodes if they have less scalar operands rather than | |||
4881 | // the initial tree element (may be profitable to shuffle the second gather) | |||
4882 | // or they are extractelements, which form shuffle. | |||
4883 | SmallVector<int> Mask; | |||
4884 | if (VectorizableTree[0]->State == TreeEntry::Vectorize && | |||
4885 | (allConstant(VectorizableTree[1]->Scalars) || | |||
4886 | isSplat(VectorizableTree[1]->Scalars) || | |||
4887 | (VectorizableTree[1]->State == TreeEntry::NeedToGather && | |||
4888 | VectorizableTree[1]->Scalars.size() < | |||
4889 | VectorizableTree[0]->Scalars.size()) || | |||
4890 | (VectorizableTree[1]->State == TreeEntry::NeedToGather && | |||
4891 | VectorizableTree[1]->getOpcode() == Instruction::ExtractElement && | |||
4892 | isShuffle(VectorizableTree[1]->Scalars, Mask)))) | |||
4893 | return true; | |||
4894 | ||||
4895 | // Gathering cost would be too much for tiny trees. | |||
4896 | if (VectorizableTree[0]->State == TreeEntry::NeedToGather || | |||
4897 | VectorizableTree[1]->State == TreeEntry::NeedToGather) | |||
4898 | return false; | |||
4899 | ||||
4900 | return true; | |||
4901 | } | |||
4902 | ||||
4903 | static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts, | |||
4904 | TargetTransformInfo *TTI, | |||
4905 | bool MustMatchOrInst) { | |||
4906 | // Look past the root to find a source value. Arbitrarily follow the | |||
4907 | // path through operand 0 of any 'or'. Also, peek through optional | |||
4908 | // shift-left-by-multiple-of-8-bits. | |||
4909 | Value *ZextLoad = Root; | |||
4910 | const APInt *ShAmtC; | |||
4911 | bool FoundOr = false; | |||
4912 | while (!isa<ConstantExpr>(ZextLoad) && | |||
4913 | (match(ZextLoad, m_Or(m_Value(), m_Value())) || | |||
4914 | (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) && | |||
4915 | ShAmtC->urem(8) == 0))) { | |||
4916 | auto *BinOp = cast<BinaryOperator>(ZextLoad); | |||
4917 | ZextLoad = BinOp->getOperand(0); | |||
4918 | if (BinOp->getOpcode() == Instruction::Or) | |||
4919 | FoundOr = true; | |||
4920 | } | |||
4921 | // Check if the input is an extended load of the required or/shift expression. | |||
4922 | Value *LoadPtr; | |||
4923 | if ((MustMatchOrInst && !FoundOr) || ZextLoad == Root || | |||
4924 | !match(ZextLoad, m_ZExt(m_Load(m_Value(LoadPtr))))) | |||
4925 | return false; | |||
4926 | ||||
4927 | // Require that the total load bit width is a legal integer type. | |||
4928 | // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target. | |||
4929 | // But <16 x i8> --> i128 is not, so the backend probably can't reduce it. | |||
4930 | Type *SrcTy = LoadPtr->getType()->getPointerElementType(); | |||
4931 | unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts; | |||
4932 | if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth))) | |||
4933 | return false; | |||
4934 | ||||
4935 | // Everything matched - assume that we can fold the whole sequence using | |||
4936 | // load combining. | |||
4937 | LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Assume load combining for tree starting at " << *(cast<Instruction>(Root)) << "\n"; } } while (false) | |||
4938 | << *(cast<Instruction>(Root)) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Assume load combining for tree starting at " << *(cast<Instruction>(Root)) << "\n"; } } while (false); | |||
4939 | ||||
4940 | return true; | |||
4941 | } | |||
4942 | ||||
4943 | bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const { | |||
4944 | if (RdxKind != RecurKind::Or) | |||
4945 | return false; | |||
4946 | ||||
4947 | unsigned NumElts = VectorizableTree[0]->Scalars.size(); | |||
4948 | Value *FirstReduced = VectorizableTree[0]->Scalars[0]; | |||
4949 | return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI, | |||
4950 | /* MatchOr */ false); | |||
4951 | } | |||
4952 | ||||
4953 | bool BoUpSLP::isLoadCombineCandidate() const { | |||
4954 | // Peek through a final sequence of stores and check if all operations are | |||
4955 | // likely to be load-combined. | |||
4956 | unsigned NumElts = VectorizableTree[0]->Scalars.size(); | |||
4957 | for (Value *Scalar : VectorizableTree[0]->Scalars) { | |||
4958 | Value *X; | |||
4959 | if (!match(Scalar, m_Store(m_Value(X), m_Value())) || | |||
4960 | !isLoadCombineCandidateImpl(X, NumElts, TTI, /* MatchOr */ true)) | |||
4961 | return false; | |||
4962 | } | |||
4963 | return true; | |||
4964 | } | |||
4965 | ||||
4966 | bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() const { | |||
4967 | // No need to vectorize inserts of gathered values. | |||
4968 | if (VectorizableTree.size() == 2 && | |||
4969 | isa<InsertElementInst>(VectorizableTree[0]->Scalars[0]) && | |||
4970 | VectorizableTree[1]->State == TreeEntry::NeedToGather) | |||
4971 | return true; | |||
4972 | ||||
4973 | // We can vectorize the tree if its size is greater than or equal to the | |||
4974 | // minimum size specified by the MinTreeSize command line option. | |||
4975 | if (VectorizableTree.size() >= MinTreeSize) | |||
4976 | return false; | |||
4977 | ||||
4978 | // If we have a tiny tree (a tree whose size is less than MinTreeSize), we | |||
4979 | // can vectorize it if we can prove it fully vectorizable. | |||
4980 | if (isFullyVectorizableTinyTree()) | |||
4981 | return false; | |||
4982 | ||||
4983 | assert(VectorizableTree.empty()(static_cast <bool> (VectorizableTree.empty() ? ExternalUses .empty() : true && "We shouldn't have any external users" ) ? void (0) : __assert_fail ("VectorizableTree.empty() ? ExternalUses.empty() : true && \"We shouldn't have any external users\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4985, __extension__ __PRETTY_FUNCTION__)) | |||
4984 | ? ExternalUses.empty()(static_cast <bool> (VectorizableTree.empty() ? ExternalUses .empty() : true && "We shouldn't have any external users" ) ? void (0) : __assert_fail ("VectorizableTree.empty() ? ExternalUses.empty() : true && \"We shouldn't have any external users\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4985, __extension__ __PRETTY_FUNCTION__)) | |||
4985 | : true && "We shouldn't have any external users")(static_cast <bool> (VectorizableTree.empty() ? ExternalUses .empty() : true && "We shouldn't have any external users" ) ? void (0) : __assert_fail ("VectorizableTree.empty() ? ExternalUses.empty() : true && \"We shouldn't have any external users\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 4985, __extension__ __PRETTY_FUNCTION__)); | |||
4986 | ||||
4987 | // Otherwise, we can't vectorize the tree. It is both tiny and not fully | |||
4988 | // vectorizable. | |||
4989 | return true; | |||
4990 | } | |||
4991 | ||||
4992 | InstructionCost BoUpSLP::getSpillCost() const { | |||
4993 | // Walk from the bottom of the tree to the top, tracking which values are | |||
4994 | // live. When we see a call instruction that is not part of our tree, | |||
4995 | // query TTI to see if there is a cost to keeping values live over it | |||
4996 | // (for example, if spills and fills are required). | |||
4997 | unsigned BundleWidth = VectorizableTree.front()->Scalars.size(); | |||
4998 | InstructionCost Cost = 0; | |||
4999 | ||||
5000 | SmallPtrSet<Instruction*, 4> LiveValues; | |||
5001 | Instruction *PrevInst = nullptr; | |||
5002 | ||||
5003 | // The entries in VectorizableTree are not necessarily ordered by their | |||
5004 | // position in basic blocks. Collect them and order them by dominance so later | |||
5005 | // instructions are guaranteed to be visited first. For instructions in | |||
5006 | // different basic blocks, we only scan to the beginning of the block, so | |||
5007 | // their order does not matter, as long as all instructions in a basic block | |||
5008 | // are grouped together. Using dominance ensures a deterministic order. | |||
5009 | SmallVector<Instruction *, 16> OrderedScalars; | |||
5010 | for (const auto &TEPtr : VectorizableTree) { | |||
5011 | Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]); | |||
5012 | if (!Inst) | |||
5013 | continue; | |||
5014 | OrderedScalars.push_back(Inst); | |||
5015 | } | |||
5016 | llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) { | |||
5017 | auto *NodeA = DT->getNode(A->getParent()); | |||
5018 | auto *NodeB = DT->getNode(B->getParent()); | |||
5019 | assert(NodeA && "Should only process reachable instructions")(static_cast <bool> (NodeA && "Should only process reachable instructions" ) ? void (0) : __assert_fail ("NodeA && \"Should only process reachable instructions\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5019, __extension__ __PRETTY_FUNCTION__)); | |||
5020 | assert(NodeB && "Should only process reachable instructions")(static_cast <bool> (NodeB && "Should only process reachable instructions" ) ? void (0) : __assert_fail ("NodeB && \"Should only process reachable instructions\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5020, __extension__ __PRETTY_FUNCTION__)); | |||
5021 | assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) &&(static_cast <bool> ((NodeA == NodeB) == (NodeA->getDFSNumIn () == NodeB->getDFSNumIn()) && "Different nodes should have different DFS numbers" ) ? void (0) : __assert_fail ("(NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && \"Different nodes should have different DFS numbers\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5022, __extension__ __PRETTY_FUNCTION__)) | |||
5022 | "Different nodes should have different DFS numbers")(static_cast <bool> ((NodeA == NodeB) == (NodeA->getDFSNumIn () == NodeB->getDFSNumIn()) && "Different nodes should have different DFS numbers" ) ? void (0) : __assert_fail ("(NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && \"Different nodes should have different DFS numbers\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5022, __extension__ __PRETTY_FUNCTION__)); | |||
5023 | if (NodeA != NodeB) | |||
5024 | return NodeA->getDFSNumIn() < NodeB->getDFSNumIn(); | |||
5025 | return B->comesBefore(A); | |||
5026 | }); | |||
5027 | ||||
5028 | for (Instruction *Inst : OrderedScalars) { | |||
5029 | if (!PrevInst) { | |||
5030 | PrevInst = Inst; | |||
5031 | continue; | |||
5032 | } | |||
5033 | ||||
5034 | // Update LiveValues. | |||
5035 | LiveValues.erase(PrevInst); | |||
5036 | for (auto &J : PrevInst->operands()) { | |||
5037 | if (isa<Instruction>(&*J) && getTreeEntry(&*J)) | |||
5038 | LiveValues.insert(cast<Instruction>(&*J)); | |||
5039 | } | |||
5040 | ||||
5041 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { { dbgs() << "SLP: #LV: " << LiveValues .size(); for (auto *X : LiveValues) dbgs() << " " << X->getName(); dbgs() << ", Looking at "; Inst->dump (); }; } } while (false) | |||
5042 | dbgs() << "SLP: #LV: " << LiveValues.size();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { { dbgs() << "SLP: #LV: " << LiveValues .size(); for (auto *X : LiveValues) dbgs() << " " << X->getName(); dbgs() << ", Looking at "; Inst->dump (); }; } } while (false) | |||
5043 | for (auto *X : LiveValues)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { { dbgs() << "SLP: #LV: " << LiveValues .size(); for (auto *X : LiveValues) dbgs() << " " << X->getName(); dbgs() << ", Looking at "; Inst->dump (); }; } } while (false) | |||
5044 | dbgs() << " " << X->getName();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { { dbgs() << "SLP: #LV: " << LiveValues .size(); for (auto *X : LiveValues) dbgs() << " " << X->getName(); dbgs() << ", Looking at "; Inst->dump (); }; } } while (false) | |||
5045 | dbgs() << ", Looking at ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { { dbgs() << "SLP: #LV: " << LiveValues .size(); for (auto *X : LiveValues) dbgs() << " " << X->getName(); dbgs() << ", Looking at "; Inst->dump (); }; } } while (false) | |||
5046 | Inst->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { { dbgs() << "SLP: #LV: " << LiveValues .size(); for (auto *X : LiveValues) dbgs() << " " << X->getName(); dbgs() << ", Looking at "; Inst->dump (); }; } } while (false) | |||
5047 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { { dbgs() << "SLP: #LV: " << LiveValues .size(); for (auto *X : LiveValues) dbgs() << " " << X->getName(); dbgs() << ", Looking at "; Inst->dump (); }; } } while (false); | |||
5048 | ||||
5049 | // Now find the sequence of instructions between PrevInst and Inst. | |||
5050 | unsigned NumCalls = 0; | |||
5051 | BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), | |||
5052 | PrevInstIt = | |||
5053 | PrevInst->getIterator().getReverse(); | |||
5054 | while (InstIt != PrevInstIt) { | |||
5055 | if (PrevInstIt == PrevInst->getParent()->rend()) { | |||
5056 | PrevInstIt = Inst->getParent()->rbegin(); | |||
5057 | continue; | |||
5058 | } | |||
5059 | ||||
5060 | // Debug information does not impact spill cost. | |||
5061 | if ((isa<CallInst>(&*PrevInstIt) && | |||
5062 | !isa<DbgInfoIntrinsic>(&*PrevInstIt)) && | |||
5063 | &*PrevInstIt != PrevInst) | |||
5064 | NumCalls++; | |||
5065 | ||||
5066 | ++PrevInstIt; | |||
5067 | } | |||
5068 | ||||
5069 | if (NumCalls) { | |||
5070 | SmallVector<Type*, 4> V; | |||
5071 | for (auto *II : LiveValues) { | |||
5072 | auto *ScalarTy = II->getType(); | |||
5073 | if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy)) | |||
5074 | ScalarTy = VectorTy->getElementType(); | |||
5075 | V.push_back(FixedVectorType::get(ScalarTy, BundleWidth)); | |||
5076 | } | |||
5077 | Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V); | |||
5078 | } | |||
5079 | ||||
5080 | PrevInst = Inst; | |||
5081 | } | |||
5082 | ||||
5083 | return Cost; | |||
5084 | } | |||
5085 | ||||
5086 | InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) { | |||
5087 | InstructionCost Cost = 0; | |||
5088 | LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Calculating cost for tree of size " << VectorizableTree.size() << ".\n"; } } while ( false) | |||
5089 | << VectorizableTree.size() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Calculating cost for tree of size " << VectorizableTree.size() << ".\n"; } } while ( false); | |||
5090 | ||||
5091 | unsigned BundleWidth = VectorizableTree[0]->Scalars.size(); | |||
5092 | ||||
5093 | for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) { | |||
5094 | TreeEntry &TE = *VectorizableTree[I].get(); | |||
5095 | ||||
5096 | InstructionCost C = getEntryCost(&TE, VectorizedVals); | |||
5097 | Cost += C; | |||
5098 | LLVM_DEBUG(dbgs() << "SLP: Adding cost " << Cdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " << *TE.Scalars[0] << ".\n" << "SLP: Current total cost = " << Cost << "\n"; } } while (false) | |||
5099 | << " for bundle that starts with " << *TE.Scalars[0]do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " << *TE.Scalars[0] << ".\n" << "SLP: Current total cost = " << Cost << "\n"; } } while (false) | |||
5100 | << ".\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " << *TE.Scalars[0] << ".\n" << "SLP: Current total cost = " << Cost << "\n"; } } while (false) | |||
5101 | << "SLP: Current total cost = " << Cost << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " << *TE.Scalars[0] << ".\n" << "SLP: Current total cost = " << Cost << "\n"; } } while (false); | |||
5102 | } | |||
5103 | ||||
5104 | SmallPtrSet<Value *, 16> ExtractCostCalculated; | |||
5105 | InstructionCost ExtractCost = 0; | |||
5106 | SmallVector<unsigned> VF; | |||
5107 | SmallVector<SmallVector<int>> ShuffleMask; | |||
5108 | SmallVector<Value *> FirstUsers; | |||
5109 | SmallVector<APInt> DemandedElts; | |||
5110 | for (ExternalUser &EU : ExternalUses) { | |||
5111 | // We only add extract cost once for the same scalar. | |||
5112 | if (!ExtractCostCalculated.insert(EU.Scalar).second) | |||
5113 | continue; | |||
5114 | ||||
5115 | // Uses by ephemeral values are free (because the ephemeral value will be | |||
5116 | // removed prior to code generation, and so the extraction will be | |||
5117 | // removed as well). | |||
5118 | if (EphValues.count(EU.User)) | |||
5119 | continue; | |||
5120 | ||||
5121 | // No extract cost for vector "scalar" | |||
5122 | if (isa<FixedVectorType>(EU.Scalar->getType())) | |||
5123 | continue; | |||
5124 | ||||
5125 | // Already counted the cost for external uses when tried to adjust the cost | |||
5126 | // for extractelements, no need to add it again. | |||
5127 | if (isa<ExtractElementInst>(EU.Scalar)) | |||
5128 | continue; | |||
5129 | ||||
5130 | // If found user is an insertelement, do not calculate extract cost but try | |||
5131 | // to detect it as a final shuffled/identity match. | |||
5132 | if (EU.User && isa<InsertElementInst>(EU.User)) { | |||
5133 | if (auto *FTy = dyn_cast<FixedVectorType>(EU.User->getType())) { | |||
5134 | Optional<int> InsertIdx = getInsertIndex(EU.User, 0); | |||
5135 | if (!InsertIdx || *InsertIdx == UndefMaskElem) | |||
5136 | continue; | |||
5137 | Value *VU = EU.User; | |||
5138 | auto *It = find_if(FirstUsers, [VU](Value *V) { | |||
5139 | // Checks if 2 insertelements are from the same buildvector. | |||
5140 | if (VU->getType() != V->getType()) | |||
5141 | return false; | |||
5142 | auto *IE1 = cast<InsertElementInst>(VU); | |||
5143 | auto *IE2 = cast<InsertElementInst>(V); | |||
5144 | // Go though of insertelement instructions trying to find either VU as | |||
5145 | // the original vector for IE2 or V as the original vector for IE1. | |||
5146 | do { | |||
5147 | if (IE1 == VU || IE2 == V) | |||
5148 | return true; | |||
5149 | if (IE1) | |||
5150 | IE1 = dyn_cast<InsertElementInst>(IE1->getOperand(0)); | |||
5151 | if (IE2) | |||
5152 | IE2 = dyn_cast<InsertElementInst>(IE2->getOperand(0)); | |||
5153 | } while (IE1 || IE2); | |||
5154 | return false; | |||
5155 | }); | |||
5156 | int VecId = -1; | |||
5157 | if (It == FirstUsers.end()) { | |||
5158 | VF.push_back(FTy->getNumElements()); | |||
5159 | ShuffleMask.emplace_back(VF.back(), UndefMaskElem); | |||
5160 | FirstUsers.push_back(EU.User); | |||
5161 | DemandedElts.push_back(APInt::getNullValue(VF.back())); | |||
5162 | VecId = FirstUsers.size() - 1; | |||
5163 | } else { | |||
5164 | VecId = std::distance(FirstUsers.begin(), It); | |||
5165 | } | |||
5166 | int Idx = *InsertIdx; | |||
5167 | ShuffleMask[VecId][Idx] = EU.Lane; | |||
5168 | DemandedElts[VecId].setBit(Idx); | |||
5169 | } | |||
5170 | } | |||
5171 | ||||
5172 | // If we plan to rewrite the tree in a smaller type, we will need to sign | |||
5173 | // extend the extracted value back to the original type. Here, we account | |||
5174 | // for the extract and the added cost of the sign extend if needed. | |||
5175 | auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth); | |||
5176 | auto *ScalarRoot = VectorizableTree[0]->Scalars[0]; | |||
5177 | if (MinBWs.count(ScalarRoot)) { | |||
5178 | auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); | |||
5179 | auto Extend = | |||
5180 | MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt; | |||
5181 | VecTy = FixedVectorType::get(MinTy, BundleWidth); | |||
5182 | ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), | |||
5183 | VecTy, EU.Lane); | |||
5184 | } else { | |||
5185 | ExtractCost += | |||
5186 | TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); | |||
5187 | } | |||
5188 | } | |||
5189 | ||||
5190 | InstructionCost SpillCost = getSpillCost(); | |||
5191 | Cost += SpillCost + ExtractCost; | |||
5192 | for (int I = 0, E = FirstUsers.size(); I < E; ++I) { | |||
5193 | // For the very first element - simple shuffle of the source vector. | |||
5194 | int Limit = ShuffleMask[I].size() * 2; | |||
5195 | if (I == 0 && | |||
5196 | all_of(ShuffleMask[I], [Limit](int Idx) { return Idx < Limit; }) && | |||
5197 | !ShuffleVectorInst::isIdentityMask(ShuffleMask[I])) { | |||
5198 | InstructionCost C = TTI->getShuffleCost( | |||
5199 | TTI::SK_PermuteSingleSrc, | |||
5200 | cast<FixedVectorType>(FirstUsers[I]->getType()), ShuffleMask[I]); | |||
5201 | LLVM_DEBUG(dbgs() << "SLP: Adding cost " << Cdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << C << " for final shuffle of insertelement external users " << *VectorizableTree.front()->Scalars.front() << ".\n" << "SLP: Current total cost = " << Cost << "\n"; } } while (false) | |||
5202 | << " for final shuffle of insertelement external users "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << C << " for final shuffle of insertelement external users " << *VectorizableTree.front()->Scalars.front() << ".\n" << "SLP: Current total cost = " << Cost << "\n"; } } while (false) | |||
5203 | << *VectorizableTree.front()->Scalars.front() << ".\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << C << " for final shuffle of insertelement external users " << *VectorizableTree.front()->Scalars.front() << ".\n" << "SLP: Current total cost = " << Cost << "\n"; } } while (false) | |||
5204 | << "SLP: Current total cost = " << Cost << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << C << " for final shuffle of insertelement external users " << *VectorizableTree.front()->Scalars.front() << ".\n" << "SLP: Current total cost = " << Cost << "\n"; } } while (false); | |||
5205 | Cost += C; | |||
5206 | continue; | |||
5207 | } | |||
5208 | // Other elements - permutation of 2 vectors (the initial one and the next | |||
5209 | // Ith incoming vector). | |||
5210 | unsigned VF = ShuffleMask[I].size(); | |||
5211 | for (unsigned Idx = 0; Idx < VF; ++Idx) { | |||
5212 | int &Mask = ShuffleMask[I][Idx]; | |||
5213 | Mask = Mask == UndefMaskElem ? Idx : VF + Mask; | |||
5214 | } | |||
5215 | InstructionCost C = TTI->getShuffleCost( | |||
5216 | TTI::SK_PermuteTwoSrc, cast<FixedVectorType>(FirstUsers[I]->getType()), | |||
5217 | ShuffleMask[I]); | |||
5218 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << C << " for final shuffle of vector node and external insertelement users " << *VectorizableTree.front()->Scalars.front() << ".\n" << "SLP: Current total cost = " << Cost << "\n"; } } while (false) | |||
5219 | dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << C << " for final shuffle of vector node and external insertelement users " << *VectorizableTree.front()->Scalars.front() << ".\n" << "SLP: Current total cost = " << Cost << "\n"; } } while (false) | |||
5220 | << "SLP: Adding cost " << Cdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << C << " for final shuffle of vector node and external insertelement users " << *VectorizableTree.front()->Scalars.front() << ".\n" << "SLP: Current total cost = " << Cost << "\n"; } } while (false) | |||
5221 | << " for final shuffle of vector node and external insertelement users "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << C << " for final shuffle of vector node and external insertelement users " << *VectorizableTree.front()->Scalars.front() << ".\n" << "SLP: Current total cost = " << Cost << "\n"; } } while (false) | |||
5222 | << *VectorizableTree.front()->Scalars.front() << ".\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << C << " for final shuffle of vector node and external insertelement users " << *VectorizableTree.front()->Scalars.front() << ".\n" << "SLP: Current total cost = " << Cost << "\n"; } } while (false) | |||
5223 | << "SLP: Current total cost = " << Cost << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << C << " for final shuffle of vector node and external insertelement users " << *VectorizableTree.front()->Scalars.front() << ".\n" << "SLP: Current total cost = " << Cost << "\n"; } } while (false); | |||
5224 | Cost += C; | |||
5225 | InstructionCost InsertCost = TTI->getScalarizationOverhead( | |||
5226 | cast<FixedVectorType>(FirstUsers[I]->getType()), DemandedElts[I], | |||
5227 | /*Insert*/ true, | |||
5228 | /*Extract*/ false); | |||
5229 | Cost -= InsertCost; | |||
5230 | LLVM_DEBUG(dbgs() << "SLP: subtracting the cost " << InsertCostdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: subtracting the cost " << InsertCost << " for insertelements gather.\n" << "SLP: Current total cost = " << Cost << "\n"; } } while (false) | |||
5231 | << " for insertelements gather.\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: subtracting the cost " << InsertCost << " for insertelements gather.\n" << "SLP: Current total cost = " << Cost << "\n"; } } while (false) | |||
5232 | << "SLP: Current total cost = " << Cost << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: subtracting the cost " << InsertCost << " for insertelements gather.\n" << "SLP: Current total cost = " << Cost << "\n"; } } while (false); | |||
5233 | } | |||
5234 | ||||
5235 | #ifndef NDEBUG | |||
5236 | SmallString<256> Str; | |||
5237 | { | |||
5238 | raw_svector_ostream OS(Str); | |||
5239 | OS << "SLP: Spill Cost = " << SpillCost << ".\n" | |||
5240 | << "SLP: Extract Cost = " << ExtractCost << ".\n" | |||
5241 | << "SLP: Total Cost = " << Cost << ".\n"; | |||
5242 | } | |||
5243 | LLVM_DEBUG(dbgs() << Str)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << Str; } } while (false); | |||
5244 | if (ViewSLPTree) | |||
5245 | ViewGraph(this, "SLP" + F->getName(), false, Str); | |||
5246 | #endif | |||
5247 | ||||
5248 | return Cost; | |||
5249 | } | |||
5250 | ||||
5251 | Optional<TargetTransformInfo::ShuffleKind> | |||
5252 | BoUpSLP::isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask, | |||
5253 | SmallVectorImpl<const TreeEntry *> &Entries) { | |||
5254 | // TODO: currently checking only for Scalars in the tree entry, need to count | |||
5255 | // reused elements too for better cost estimation. | |||
5256 | Mask.assign(TE->Scalars.size(), UndefMaskElem); | |||
5257 | Entries.clear(); | |||
5258 | // Build a lists of values to tree entries. | |||
5259 | DenseMap<Value *, SmallPtrSet<const TreeEntry *, 4>> ValueToTEs; | |||
5260 | for (const std::unique_ptr<TreeEntry> &EntryPtr : VectorizableTree) { | |||
5261 | if (EntryPtr.get() == TE) | |||
5262 | break; | |||
5263 | if (EntryPtr->State != TreeEntry::NeedToGather) | |||
5264 | continue; | |||
5265 | for (Value *V : EntryPtr->Scalars) | |||
5266 | ValueToTEs.try_emplace(V).first->getSecond().insert(EntryPtr.get()); | |||
5267 | } | |||
5268 | // Find all tree entries used by the gathered values. If no common entries | |||
5269 | // found - not a shuffle. | |||
5270 | // Here we build a set of tree nodes for each gathered value and trying to | |||
5271 | // find the intersection between these sets. If we have at least one common | |||
5272 | // tree node for each gathered value - we have just a permutation of the | |||
5273 | // single vector. If we have 2 different sets, we're in situation where we | |||
5274 | // have a permutation of 2 input vectors. | |||
5275 | SmallVector<SmallPtrSet<const TreeEntry *, 4>> UsedTEs; | |||
5276 | DenseMap<Value *, int> UsedValuesEntry; | |||
5277 | for (Value *V : TE->Scalars) { | |||
5278 | if (isa<UndefValue>(V)) | |||
5279 | continue; | |||
5280 | // Build a list of tree entries where V is used. | |||
5281 | SmallPtrSet<const TreeEntry *, 4> VToTEs; | |||
5282 | auto It = ValueToTEs.find(V); | |||
5283 | if (It != ValueToTEs.end()) | |||
5284 | VToTEs = It->second; | |||
5285 | if (const TreeEntry *VTE = getTreeEntry(V)) | |||
5286 | VToTEs.insert(VTE); | |||
5287 | if (VToTEs.empty()) | |||
5288 | return None; | |||
5289 | if (UsedTEs.empty()) { | |||
5290 | // The first iteration, just insert the list of nodes to vector. | |||
5291 | UsedTEs.push_back(VToTEs); | |||
5292 | } else { | |||
5293 | // Need to check if there are any previously used tree nodes which use V. | |||
5294 | // If there are no such nodes, consider that we have another one input | |||
5295 | // vector. | |||
5296 | SmallPtrSet<const TreeEntry *, 4> SavedVToTEs(VToTEs); | |||
5297 | unsigned Idx = 0; | |||
5298 | for (SmallPtrSet<const TreeEntry *, 4> &Set : UsedTEs) { | |||
5299 | // Do we have a non-empty intersection of previously listed tree entries | |||
5300 | // and tree entries using current V? | |||
5301 | set_intersect(VToTEs, Set); | |||
5302 | if (!VToTEs.empty()) { | |||
5303 | // Yes, write the new subset and continue analysis for the next | |||
5304 | // scalar. | |||
5305 | Set.swap(VToTEs); | |||
5306 | break; | |||
5307 | } | |||
5308 | VToTEs = SavedVToTEs; | |||
5309 | ++Idx; | |||
5310 | } | |||
5311 | // No non-empty intersection found - need to add a second set of possible | |||
5312 | // source vectors. | |||
5313 | if (Idx == UsedTEs.size()) { | |||
5314 | // If the number of input vectors is greater than 2 - not a permutation, | |||
5315 | // fallback to the regular gather. | |||
5316 | if (UsedTEs.size() == 2) | |||
5317 | return None; | |||
5318 | UsedTEs.push_back(SavedVToTEs); | |||
5319 | Idx = UsedTEs.size() - 1; | |||
5320 | } | |||
5321 | UsedValuesEntry.try_emplace(V, Idx); | |||
5322 | } | |||
5323 | } | |||
5324 | ||||
5325 | unsigned VF = 0; | |||
5326 | if (UsedTEs.size() == 1) { | |||
5327 | // Try to find the perfect match in another gather node at first. | |||
5328 | auto It = find_if(UsedTEs.front(), [TE](const TreeEntry *EntryPtr) { | |||
5329 | return EntryPtr->isSame(TE->Scalars); | |||
5330 | }); | |||
5331 | if (It != UsedTEs.front().end()) { | |||
5332 | Entries.push_back(*It); | |||
5333 | std::iota(Mask.begin(), Mask.end(), 0); | |||
5334 | return TargetTransformInfo::SK_PermuteSingleSrc; | |||
5335 | } | |||
5336 | // No perfect match, just shuffle, so choose the first tree node. | |||
5337 | Entries.push_back(*UsedTEs.front().begin()); | |||
5338 | } else { | |||
5339 | // Try to find nodes with the same vector factor. | |||
5340 | assert(UsedTEs.size() == 2 && "Expected at max 2 permuted entries.")(static_cast <bool> (UsedTEs.size() == 2 && "Expected at max 2 permuted entries." ) ? void (0) : __assert_fail ("UsedTEs.size() == 2 && \"Expected at max 2 permuted entries.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5340, __extension__ __PRETTY_FUNCTION__)); | |||
5341 | // FIXME: Shall be replaced by GetVF function once non-power-2 patch is | |||
5342 | // landed. | |||
5343 | auto &&GetVF = [](const TreeEntry *TE) { | |||
5344 | if (!TE->ReuseShuffleIndices.empty()) | |||
5345 | return TE->ReuseShuffleIndices.size(); | |||
5346 | return TE->Scalars.size(); | |||
5347 | }; | |||
5348 | DenseMap<int, const TreeEntry *> VFToTE; | |||
5349 | for (const TreeEntry *TE : UsedTEs.front()) | |||
5350 | VFToTE.try_emplace(GetVF(TE), TE); | |||
5351 | for (const TreeEntry *TE : UsedTEs.back()) { | |||
5352 | auto It = VFToTE.find(GetVF(TE)); | |||
5353 | if (It != VFToTE.end()) { | |||
5354 | VF = It->first; | |||
5355 | Entries.push_back(It->second); | |||
5356 | Entries.push_back(TE); | |||
5357 | break; | |||
5358 | } | |||
5359 | } | |||
5360 | // No 2 source vectors with the same vector factor - give up and do regular | |||
5361 | // gather. | |||
5362 | if (Entries.empty()) | |||
5363 | return None; | |||
5364 | } | |||
5365 | ||||
5366 | // Build a shuffle mask for better cost estimation and vector emission. | |||
5367 | for (int I = 0, E = TE->Scalars.size(); I < E; ++I) { | |||
5368 | Value *V = TE->Scalars[I]; | |||
5369 | if (isa<UndefValue>(V)) | |||
5370 | continue; | |||
5371 | unsigned Idx = UsedValuesEntry.lookup(V); | |||
5372 | const TreeEntry *VTE = Entries[Idx]; | |||
5373 | int FoundLane = VTE->findLaneForValue(V); | |||
5374 | Mask[I] = Idx * VF + FoundLane; | |||
5375 | // Extra check required by isSingleSourceMaskImpl function (called by | |||
5376 | // ShuffleVectorInst::isSingleSourceMask). | |||
5377 | if (Mask[I] >= 2 * E) | |||
5378 | return None; | |||
5379 | } | |||
5380 | switch (Entries.size()) { | |||
5381 | case 1: | |||
5382 | return TargetTransformInfo::SK_PermuteSingleSrc; | |||
5383 | case 2: | |||
5384 | return TargetTransformInfo::SK_PermuteTwoSrc; | |||
5385 | default: | |||
5386 | break; | |||
5387 | } | |||
5388 | return None; | |||
5389 | } | |||
5390 | ||||
5391 | InstructionCost | |||
5392 | BoUpSLP::getGatherCost(FixedVectorType *Ty, | |||
5393 | const DenseSet<unsigned> &ShuffledIndices) const { | |||
5394 | unsigned NumElts = Ty->getNumElements(); | |||
5395 | APInt DemandedElts = APInt::getNullValue(NumElts); | |||
5396 | for (unsigned I = 0; I < NumElts; ++I) | |||
5397 | if (!ShuffledIndices.count(I)) | |||
5398 | DemandedElts.setBit(I); | |||
5399 | InstructionCost Cost = | |||
5400 | TTI->getScalarizationOverhead(Ty, DemandedElts, /*Insert*/ true, | |||
5401 | /*Extract*/ false); | |||
5402 | if (!ShuffledIndices.empty()) | |||
5403 | Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, Ty); | |||
5404 | return Cost; | |||
5405 | } | |||
5406 | ||||
5407 | InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL) const { | |||
5408 | // Find the type of the operands in VL. | |||
5409 | Type *ScalarTy = VL[0]->getType(); | |||
5410 | if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) | |||
5411 | ScalarTy = SI->getValueOperand()->getType(); | |||
5412 | auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); | |||
5413 | // Find the cost of inserting/extracting values from the vector. | |||
5414 | // Check if the same elements are inserted several times and count them as | |||
5415 | // shuffle candidates. | |||
5416 | DenseSet<unsigned> ShuffledElements; | |||
5417 | DenseSet<Value *> UniqueElements; | |||
5418 | // Iterate in reverse order to consider insert elements with the high cost. | |||
5419 | for (unsigned I = VL.size(); I > 0; --I) { | |||
5420 | unsigned Idx = I - 1; | |||
5421 | if (isConstant(VL[Idx])) | |||
5422 | continue; | |||
5423 | if (!UniqueElements.insert(VL[Idx]).second) | |||
5424 | ShuffledElements.insert(Idx); | |||
5425 | } | |||
5426 | return getGatherCost(VecTy, ShuffledElements); | |||
5427 | } | |||
5428 | ||||
5429 | // Perform operand reordering on the instructions in VL and return the reordered | |||
5430 | // operands in Left and Right. | |||
5431 | void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, | |||
5432 | SmallVectorImpl<Value *> &Left, | |||
5433 | SmallVectorImpl<Value *> &Right, | |||
5434 | const DataLayout &DL, | |||
5435 | ScalarEvolution &SE, | |||
5436 | const BoUpSLP &R) { | |||
5437 | if (VL.empty()) | |||
5438 | return; | |||
5439 | VLOperands Ops(VL, DL, SE, R); | |||
5440 | // Reorder the operands in place. | |||
5441 | Ops.reorder(); | |||
5442 | Left = Ops.getVL(0); | |||
5443 | Right = Ops.getVL(1); | |||
5444 | } | |||
5445 | ||||
5446 | void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) { | |||
5447 | // Get the basic block this bundle is in. All instructions in the bundle | |||
5448 | // should be in this block. | |||
5449 | auto *Front = E->getMainOp(); | |||
5450 | auto *BB = Front->getParent(); | |||
5451 | assert(llvm::all_of(E->Scalars, [=](Value *V) -> bool {(static_cast <bool> (llvm::all_of(E->Scalars, [=](Value *V) -> bool { auto *I = cast<Instruction>(V); return !E->isOpcodeOrAlt(I) || I->getParent() == BB; })) ? void (0) : __assert_fail ("llvm::all_of(E->Scalars, [=](Value *V) -> bool { auto *I = cast<Instruction>(V); return !E->isOpcodeOrAlt(I) || I->getParent() == BB; })" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5454, __extension__ __PRETTY_FUNCTION__)) | |||
5452 | auto *I = cast<Instruction>(V);(static_cast <bool> (llvm::all_of(E->Scalars, [=](Value *V) -> bool { auto *I = cast<Instruction>(V); return !E->isOpcodeOrAlt(I) || I->getParent() == BB; })) ? void (0) : __assert_fail ("llvm::all_of(E->Scalars, [=](Value *V) -> bool { auto *I = cast<Instruction>(V); return !E->isOpcodeOrAlt(I) || I->getParent() == BB; })" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5454, __extension__ __PRETTY_FUNCTION__)) | |||
5453 | return !E->isOpcodeOrAlt(I) || I->getParent() == BB;(static_cast <bool> (llvm::all_of(E->Scalars, [=](Value *V) -> bool { auto *I = cast<Instruction>(V); return !E->isOpcodeOrAlt(I) || I->getParent() == BB; })) ? void (0) : __assert_fail ("llvm::all_of(E->Scalars, [=](Value *V) -> bool { auto *I = cast<Instruction>(V); return !E->isOpcodeOrAlt(I) || I->getParent() == BB; })" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5454, __extension__ __PRETTY_FUNCTION__)) | |||
5454 | }))(static_cast <bool> (llvm::all_of(E->Scalars, [=](Value *V) -> bool { auto *I = cast<Instruction>(V); return !E->isOpcodeOrAlt(I) || I->getParent() == BB; })) ? void (0) : __assert_fail ("llvm::all_of(E->Scalars, [=](Value *V) -> bool { auto *I = cast<Instruction>(V); return !E->isOpcodeOrAlt(I) || I->getParent() == BB; })" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5454, __extension__ __PRETTY_FUNCTION__)); | |||
5455 | ||||
5456 | // The last instruction in the bundle in program order. | |||
5457 | Instruction *LastInst = nullptr; | |||
5458 | ||||
5459 | // Find the last instruction. The common case should be that BB has been | |||
5460 | // scheduled, and the last instruction is VL.back(). So we start with | |||
5461 | // VL.back() and iterate over schedule data until we reach the end of the | |||
5462 | // bundle. The end of the bundle is marked by null ScheduleData. | |||
5463 | if (BlocksSchedules.count(BB)) { | |||
5464 | auto *Bundle = | |||
5465 | BlocksSchedules[BB]->getScheduleData(E->isOneOf(E->Scalars.back())); | |||
5466 | if (Bundle && Bundle->isPartOfBundle()) | |||
5467 | for (; Bundle; Bundle = Bundle->NextInBundle) | |||
5468 | if (Bundle->OpValue == Bundle->Inst) | |||
5469 | LastInst = Bundle->Inst; | |||
5470 | } | |||
5471 | ||||
5472 | // LastInst can still be null at this point if there's either not an entry | |||
5473 | // for BB in BlocksSchedules or there's no ScheduleData available for | |||
5474 | // VL.back(). This can be the case if buildTree_rec aborts for various | |||
5475 | // reasons (e.g., the maximum recursion depth is reached, the maximum region | |||
5476 | // size is reached, etc.). ScheduleData is initialized in the scheduling | |||
5477 | // "dry-run". | |||
5478 | // | |||
5479 | // If this happens, we can still find the last instruction by brute force. We | |||
5480 | // iterate forwards from Front (inclusive) until we either see all | |||
5481 | // instructions in the bundle or reach the end of the block. If Front is the | |||
5482 | // last instruction in program order, LastInst will be set to Front, and we | |||
5483 | // will visit all the remaining instructions in the block. | |||
5484 | // | |||
5485 | // One of the reasons we exit early from buildTree_rec is to place an upper | |||
5486 | // bound on compile-time. Thus, taking an additional compile-time hit here is | |||
5487 | // not ideal. However, this should be exceedingly rare since it requires that | |||
5488 | // we both exit early from buildTree_rec and that the bundle be out-of-order | |||
5489 | // (causing us to iterate all the way to the end of the block). | |||
5490 | if (!LastInst) { | |||
5491 | SmallPtrSet<Value *, 16> Bundle(E->Scalars.begin(), E->Scalars.end()); | |||
5492 | for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) { | |||
5493 | if (Bundle.erase(&I) && E->isOpcodeOrAlt(&I)) | |||
5494 | LastInst = &I; | |||
5495 | if (Bundle.empty()) | |||
5496 | break; | |||
5497 | } | |||
5498 | } | |||
5499 | assert(LastInst && "Failed to find last instruction in bundle")(static_cast <bool> (LastInst && "Failed to find last instruction in bundle" ) ? void (0) : __assert_fail ("LastInst && \"Failed to find last instruction in bundle\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5499, __extension__ __PRETTY_FUNCTION__)); | |||
5500 | ||||
5501 | // Set the insertion point after the last instruction in the bundle. Set the | |||
5502 | // debug location to Front. | |||
5503 | Builder.SetInsertPoint(BB, ++LastInst->getIterator()); | |||
5504 | Builder.SetCurrentDebugLocation(Front->getDebugLoc()); | |||
5505 | } | |||
5506 | ||||
5507 | Value *BoUpSLP::gather(ArrayRef<Value *> VL) { | |||
5508 | // List of instructions/lanes from current block and/or the blocks which are | |||
5509 | // part of the current loop. These instructions will be inserted at the end to | |||
5510 | // make it possible to optimize loops and hoist invariant instructions out of | |||
5511 | // the loops body with better chances for success. | |||
5512 | SmallVector<std::pair<Value *, unsigned>, 4> PostponedInsts; | |||
5513 | SmallSet<int, 4> PostponedIndices; | |||
5514 | Loop *L = LI->getLoopFor(Builder.GetInsertBlock()); | |||
5515 | auto &&CheckPredecessor = [](BasicBlock *InstBB, BasicBlock *InsertBB) { | |||
5516 | SmallPtrSet<BasicBlock *, 4> Visited; | |||
5517 | while (InsertBB && InsertBB != InstBB && Visited.insert(InsertBB).second) | |||
5518 | InsertBB = InsertBB->getSinglePredecessor(); | |||
5519 | return InsertBB && InsertBB == InstBB; | |||
5520 | }; | |||
5521 | for (int I = 0, E = VL.size(); I < E; ++I) { | |||
5522 | if (auto *Inst = dyn_cast<Instruction>(VL[I])) | |||
5523 | if ((CheckPredecessor(Inst->getParent(), Builder.GetInsertBlock()) || | |||
5524 | getTreeEntry(Inst) || (L && (L->contains(Inst)))) && | |||
5525 | PostponedIndices.insert(I).second) | |||
5526 | PostponedInsts.emplace_back(Inst, I); | |||
5527 | } | |||
5528 | ||||
5529 | auto &&CreateInsertElement = [this](Value *Vec, Value *V, unsigned Pos) { | |||
5530 | Vec = Builder.CreateInsertElement(Vec, V, Builder.getInt32(Pos)); | |||
5531 | auto *InsElt = dyn_cast<InsertElementInst>(Vec); | |||
5532 | if (!InsElt) | |||
5533 | return Vec; | |||
5534 | GatherSeq.insert(InsElt); | |||
5535 | CSEBlocks.insert(InsElt->getParent()); | |||
5536 | // Add to our 'need-to-extract' list. | |||
5537 | if (TreeEntry *Entry = getTreeEntry(V)) { | |||
5538 | // Find which lane we need to extract. | |||
5539 | unsigned FoundLane = Entry->findLaneForValue(V); | |||
5540 | ExternalUses.emplace_back(V, InsElt, FoundLane); | |||
5541 | } | |||
5542 | return Vec; | |||
5543 | }; | |||
5544 | Value *Val0 = | |||
5545 | isa<StoreInst>(VL[0]) ? cast<StoreInst>(VL[0])->getValueOperand() : VL[0]; | |||
5546 | FixedVectorType *VecTy = FixedVectorType::get(Val0->getType(), VL.size()); | |||
5547 | Value *Vec = PoisonValue::get(VecTy); | |||
5548 | SmallVector<int> NonConsts; | |||
5549 | // Insert constant values at first. | |||
5550 | for (int I = 0, E = VL.size(); I < E; ++I) { | |||
5551 | if (PostponedIndices.contains(I)) | |||
5552 | continue; | |||
5553 | if (!isConstant(VL[I])) { | |||
5554 | NonConsts.push_back(I); | |||
5555 | continue; | |||
5556 | } | |||
5557 | Vec = CreateInsertElement(Vec, VL[I], I); | |||
5558 | } | |||
5559 | // Insert non-constant values. | |||
5560 | for (int I : NonConsts) | |||
5561 | Vec = CreateInsertElement(Vec, VL[I], I); | |||
5562 | // Append instructions, which are/may be part of the loop, in the end to make | |||
5563 | // it possible to hoist non-loop-based instructions. | |||
5564 | for (const std::pair<Value *, unsigned> &Pair : PostponedInsts) | |||
5565 | Vec = CreateInsertElement(Vec, Pair.first, Pair.second); | |||
5566 | ||||
5567 | return Vec; | |||
5568 | } | |||
5569 | ||||
5570 | namespace { | |||
5571 | /// Merges shuffle masks and emits final shuffle instruction, if required. | |||
5572 | class ShuffleInstructionBuilder { | |||
5573 | IRBuilderBase &Builder; | |||
5574 | const unsigned VF = 0; | |||
5575 | bool IsFinalized = false; | |||
5576 | SmallVector<int, 4> Mask; | |||
5577 | ||||
5578 | public: | |||
5579 | ShuffleInstructionBuilder(IRBuilderBase &Builder, unsigned VF) | |||
5580 | : Builder(Builder), VF(VF) {} | |||
5581 | ||||
5582 | /// Adds a mask, inverting it before applying. | |||
5583 | void addInversedMask(ArrayRef<unsigned> SubMask) { | |||
5584 | if (SubMask.empty()) | |||
5585 | return; | |||
5586 | SmallVector<int, 4> NewMask; | |||
5587 | inversePermutation(SubMask, NewMask); | |||
5588 | addMask(NewMask); | |||
5589 | } | |||
5590 | ||||
5591 | /// Functions adds masks, merging them into single one. | |||
5592 | void addMask(ArrayRef<unsigned> SubMask) { | |||
5593 | SmallVector<int, 4> NewMask(SubMask.begin(), SubMask.end()); | |||
5594 | addMask(NewMask); | |||
5595 | } | |||
5596 | ||||
5597 | void addMask(ArrayRef<int> SubMask) { ::addMask(Mask, SubMask); } | |||
5598 | ||||
5599 | Value *finalize(Value *V) { | |||
5600 | IsFinalized = true; | |||
5601 | unsigned ValueVF = cast<FixedVectorType>(V->getType())->getNumElements(); | |||
5602 | if (VF == ValueVF && Mask.empty()) | |||
5603 | return V; | |||
5604 | SmallVector<int, 4> NormalizedMask(VF, UndefMaskElem); | |||
5605 | std::iota(NormalizedMask.begin(), NormalizedMask.end(), 0); | |||
5606 | addMask(NormalizedMask); | |||
5607 | ||||
5608 | if (VF == ValueVF && ShuffleVectorInst::isIdentityMask(Mask)) | |||
5609 | return V; | |||
5610 | return Builder.CreateShuffleVector(V, Mask, "shuffle"); | |||
5611 | } | |||
5612 | ||||
5613 | ~ShuffleInstructionBuilder() { | |||
5614 | assert((IsFinalized || Mask.empty()) &&(static_cast <bool> ((IsFinalized || Mask.empty()) && "Shuffle construction must be finalized.") ? void (0) : __assert_fail ("(IsFinalized || Mask.empty()) && \"Shuffle construction must be finalized.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5615, __extension__ __PRETTY_FUNCTION__)) | |||
5615 | "Shuffle construction must be finalized.")(static_cast <bool> ((IsFinalized || Mask.empty()) && "Shuffle construction must be finalized.") ? void (0) : __assert_fail ("(IsFinalized || Mask.empty()) && \"Shuffle construction must be finalized.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5615, __extension__ __PRETTY_FUNCTION__)); | |||
5616 | } | |||
5617 | }; | |||
5618 | } // namespace | |||
5619 | ||||
5620 | Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { | |||
5621 | unsigned VF = VL.size(); | |||
5622 | InstructionsState S = getSameOpcode(VL); | |||
5623 | if (S.getOpcode()) { | |||
5624 | if (TreeEntry *E = getTreeEntry(S.OpValue)) | |||
5625 | if (E->isSame(VL)) { | |||
5626 | Value *V = vectorizeTree(E); | |||
5627 | if (VF != cast<FixedVectorType>(V->getType())->getNumElements()) { | |||
5628 | if (!E->ReuseShuffleIndices.empty()) { | |||
5629 | // Reshuffle to get only unique values. | |||
5630 | // If some of the scalars are duplicated in the vectorization tree | |||
5631 | // entry, we do not vectorize them but instead generate a mask for | |||
5632 | // the reuses. But if there are several users of the same entry, | |||
5633 | // they may have different vectorization factors. This is especially | |||
5634 | // important for PHI nodes. In this case, we need to adapt the | |||
5635 | // resulting instruction for the user vectorization factor and have | |||
5636 | // to reshuffle it again to take only unique elements of the vector. | |||
5637 | // Without this code the function incorrectly returns reduced vector | |||
5638 | // instruction with the same elements, not with the unique ones. | |||
5639 | ||||
5640 | // block: | |||
5641 | // %phi = phi <2 x > { .., %entry} {%shuffle, %block} | |||
5642 | // %2 = shuffle <2 x > %phi, %poison, <4 x > <0, 0, 1, 1> | |||
5643 | // ... (use %2) | |||
5644 | // %shuffle = shuffle <2 x> %2, poison, <2 x> {0, 2} | |||
5645 | // br %block | |||
5646 | SmallVector<int> UniqueIdxs; | |||
5647 | SmallSet<int, 4> UsedIdxs; | |||
5648 | int Pos = 0; | |||
5649 | int Sz = VL.size(); | |||
5650 | for (int Idx : E->ReuseShuffleIndices) { | |||
5651 | if (Idx != Sz && UsedIdxs.insert(Idx).second) | |||
5652 | UniqueIdxs.emplace_back(Pos); | |||
5653 | ++Pos; | |||
5654 | } | |||
5655 | assert(VF >= UsedIdxs.size() && "Expected vectorization factor "(static_cast <bool> (VF >= UsedIdxs.size() && "Expected vectorization factor " "less than original vector size." ) ? void (0) : __assert_fail ("VF >= UsedIdxs.size() && \"Expected vectorization factor \" \"less than original vector size.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5656, __extension__ __PRETTY_FUNCTION__)) | |||
5656 | "less than original vector size.")(static_cast <bool> (VF >= UsedIdxs.size() && "Expected vectorization factor " "less than original vector size." ) ? void (0) : __assert_fail ("VF >= UsedIdxs.size() && \"Expected vectorization factor \" \"less than original vector size.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5656, __extension__ __PRETTY_FUNCTION__)); | |||
5657 | UniqueIdxs.append(VF - UsedIdxs.size(), UndefMaskElem); | |||
5658 | V = Builder.CreateShuffleVector(V, UniqueIdxs, "shrink.shuffle"); | |||
5659 | } else { | |||
5660 | assert(VF < cast<FixedVectorType>(V->getType())->getNumElements() &&(static_cast <bool> (VF < cast<FixedVectorType> (V->getType())->getNumElements() && "Expected vectorization factor less " "than original vector size.") ? void (0) : __assert_fail ("VF < cast<FixedVectorType>(V->getType())->getNumElements() && \"Expected vectorization factor less \" \"than original vector size.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5662, __extension__ __PRETTY_FUNCTION__)) | |||
5661 | "Expected vectorization factor less "(static_cast <bool> (VF < cast<FixedVectorType> (V->getType())->getNumElements() && "Expected vectorization factor less " "than original vector size.") ? void (0) : __assert_fail ("VF < cast<FixedVectorType>(V->getType())->getNumElements() && \"Expected vectorization factor less \" \"than original vector size.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5662, __extension__ __PRETTY_FUNCTION__)) | |||
5662 | "than original vector size.")(static_cast <bool> (VF < cast<FixedVectorType> (V->getType())->getNumElements() && "Expected vectorization factor less " "than original vector size.") ? void (0) : __assert_fail ("VF < cast<FixedVectorType>(V->getType())->getNumElements() && \"Expected vectorization factor less \" \"than original vector size.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5662, __extension__ __PRETTY_FUNCTION__)); | |||
5663 | SmallVector<int> UniformMask(VF, 0); | |||
5664 | std::iota(UniformMask.begin(), UniformMask.end(), 0); | |||
5665 | V = Builder.CreateShuffleVector(V, UniformMask, "shrink.shuffle"); | |||
5666 | } | |||
5667 | } | |||
5668 | return V; | |||
5669 | } | |||
5670 | } | |||
5671 | ||||
5672 | // Check that every instruction appears once in this bundle. | |||
5673 | SmallVector<int> ReuseShuffleIndicies; | |||
5674 | SmallVector<Value *> UniqueValues; | |||
5675 | if (VL.size() > 2) { | |||
5676 | DenseMap<Value *, unsigned> UniquePositions; | |||
5677 | unsigned NumValues = | |||
5678 | std::distance(VL.begin(), find_if(reverse(VL), [](Value *V) { | |||
5679 | return !isa<UndefValue>(V); | |||
5680 | }).base()); | |||
5681 | VF = std::max<unsigned>(VF, PowerOf2Ceil(NumValues)); | |||
5682 | int UniqueVals = 0; | |||
5683 | for (Value *V : VL.drop_back(VL.size() - VF)) { | |||
5684 | if (isa<UndefValue>(V)) { | |||
5685 | ReuseShuffleIndicies.emplace_back(UndefMaskElem); | |||
5686 | continue; | |||
5687 | } | |||
5688 | if (isConstant(V)) { | |||
5689 | ReuseShuffleIndicies.emplace_back(UniqueValues.size()); | |||
5690 | UniqueValues.emplace_back(V); | |||
5691 | continue; | |||
5692 | } | |||
5693 | auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); | |||
5694 | ReuseShuffleIndicies.emplace_back(Res.first->second); | |||
5695 | if (Res.second) { | |||
5696 | UniqueValues.emplace_back(V); | |||
5697 | ++UniqueVals; | |||
5698 | } | |||
5699 | } | |||
5700 | if (UniqueVals == 1 && UniqueValues.size() == 1) { | |||
5701 | // Emit pure splat vector. | |||
5702 | ReuseShuffleIndicies.append(VF - ReuseShuffleIndicies.size(), | |||
5703 | UndefMaskElem); | |||
5704 | } else if (UniqueValues.size() >= VF - 1 || UniqueValues.size() <= 1) { | |||
5705 | ReuseShuffleIndicies.clear(); | |||
5706 | UniqueValues.clear(); | |||
5707 | UniqueValues.append(VL.begin(), std::next(VL.begin(), NumValues)); | |||
5708 | } | |||
5709 | UniqueValues.append(VF - UniqueValues.size(), | |||
5710 | PoisonValue::get(VL[0]->getType())); | |||
5711 | VL = UniqueValues; | |||
5712 | } | |||
5713 | ||||
5714 | ShuffleInstructionBuilder ShuffleBuilder(Builder, VF); | |||
5715 | Value *Vec = gather(VL); | |||
5716 | if (!ReuseShuffleIndicies.empty()) { | |||
5717 | ShuffleBuilder.addMask(ReuseShuffleIndicies); | |||
5718 | Vec = ShuffleBuilder.finalize(Vec); | |||
5719 | if (auto *I = dyn_cast<Instruction>(Vec)) { | |||
5720 | GatherSeq.insert(I); | |||
5721 | CSEBlocks.insert(I->getParent()); | |||
5722 | } | |||
5723 | } | |||
5724 | return Vec; | |||
5725 | } | |||
5726 | ||||
5727 | Value *BoUpSLP::vectorizeTree(TreeEntry *E) { | |||
5728 | IRBuilder<>::InsertPointGuard Guard(Builder); | |||
5729 | ||||
5730 | if (E->VectorizedValue) { | |||
5731 | LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"; } } while (false); | |||
5732 | return E->VectorizedValue; | |||
5733 | } | |||
5734 | ||||
5735 | bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); | |||
5736 | unsigned VF = E->Scalars.size(); | |||
5737 | if (NeedToShuffleReuses) | |||
5738 | VF = E->ReuseShuffleIndices.size(); | |||
5739 | ShuffleInstructionBuilder ShuffleBuilder(Builder, VF); | |||
5740 | if (E->State == TreeEntry::NeedToGather) { | |||
5741 | setInsertPointAfterBundle(E); | |||
5742 | Value *Vec; | |||
5743 | SmallVector<int> Mask; | |||
5744 | SmallVector<const TreeEntry *> Entries; | |||
5745 | Optional<TargetTransformInfo::ShuffleKind> Shuffle = | |||
5746 | isGatherShuffledEntry(E, Mask, Entries); | |||
5747 | if (Shuffle.hasValue()) { | |||
5748 | assert((Entries.size() == 1 || Entries.size() == 2) &&(static_cast <bool> ((Entries.size() == 1 || Entries.size () == 2) && "Expected shuffle of 1 or 2 entries.") ? void (0) : __assert_fail ("(Entries.size() == 1 || Entries.size() == 2) && \"Expected shuffle of 1 or 2 entries.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5749, __extension__ __PRETTY_FUNCTION__)) | |||
5749 | "Expected shuffle of 1 or 2 entries.")(static_cast <bool> ((Entries.size() == 1 || Entries.size () == 2) && "Expected shuffle of 1 or 2 entries.") ? void (0) : __assert_fail ("(Entries.size() == 1 || Entries.size() == 2) && \"Expected shuffle of 1 or 2 entries.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5749, __extension__ __PRETTY_FUNCTION__)); | |||
5750 | Vec = Builder.CreateShuffleVector(Entries.front()->VectorizedValue, | |||
5751 | Entries.back()->VectorizedValue, Mask); | |||
5752 | } else { | |||
5753 | Vec = gather(E->Scalars); | |||
5754 | } | |||
5755 | if (NeedToShuffleReuses) { | |||
5756 | ShuffleBuilder.addMask(E->ReuseShuffleIndices); | |||
5757 | Vec = ShuffleBuilder.finalize(Vec); | |||
5758 | if (auto *I = dyn_cast<Instruction>(Vec)) { | |||
5759 | GatherSeq.insert(I); | |||
5760 | CSEBlocks.insert(I->getParent()); | |||
5761 | } | |||
5762 | } | |||
5763 | E->VectorizedValue = Vec; | |||
5764 | return Vec; | |||
5765 | } | |||
5766 | ||||
5767 | assert((E->State == TreeEntry::Vectorize ||(static_cast <bool> ((E->State == TreeEntry::Vectorize || E->State == TreeEntry::ScatterVectorize) && "Unhandled state" ) ? void (0) : __assert_fail ("(E->State == TreeEntry::Vectorize || E->State == TreeEntry::ScatterVectorize) && \"Unhandled state\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5769, __extension__ __PRETTY_FUNCTION__)) | |||
5768 | E->State == TreeEntry::ScatterVectorize) &&(static_cast <bool> ((E->State == TreeEntry::Vectorize || E->State == TreeEntry::ScatterVectorize) && "Unhandled state" ) ? void (0) : __assert_fail ("(E->State == TreeEntry::Vectorize || E->State == TreeEntry::ScatterVectorize) && \"Unhandled state\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5769, __extension__ __PRETTY_FUNCTION__)) | |||
5769 | "Unhandled state")(static_cast <bool> ((E->State == TreeEntry::Vectorize || E->State == TreeEntry::ScatterVectorize) && "Unhandled state" ) ? void (0) : __assert_fail ("(E->State == TreeEntry::Vectorize || E->State == TreeEntry::ScatterVectorize) && \"Unhandled state\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5769, __extension__ __PRETTY_FUNCTION__)); | |||
5770 | unsigned ShuffleOrOp = | |||
5771 | E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); | |||
5772 | Instruction *VL0 = E->getMainOp(); | |||
5773 | Type *ScalarTy = VL0->getType(); | |||
5774 | if (auto *Store = dyn_cast<StoreInst>(VL0)) | |||
5775 | ScalarTy = Store->getValueOperand()->getType(); | |||
5776 | else if (auto *IE = dyn_cast<InsertElementInst>(VL0)) | |||
5777 | ScalarTy = IE->getOperand(1)->getType(); | |||
5778 | auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size()); | |||
5779 | switch (ShuffleOrOp) { | |||
5780 | case Instruction::PHI: { | |||
5781 | assert((static_cast <bool> ((E->ReorderIndices.empty() || E != VectorizableTree.front().get()) && "PHI reordering is free." ) ? void (0) : __assert_fail ("(E->ReorderIndices.empty() || E != VectorizableTree.front().get()) && \"PHI reordering is free.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5783, __extension__ __PRETTY_FUNCTION__)) | |||
5782 | (E->ReorderIndices.empty() || E != VectorizableTree.front().get()) &&(static_cast <bool> ((E->ReorderIndices.empty() || E != VectorizableTree.front().get()) && "PHI reordering is free." ) ? void (0) : __assert_fail ("(E->ReorderIndices.empty() || E != VectorizableTree.front().get()) && \"PHI reordering is free.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5783, __extension__ __PRETTY_FUNCTION__)) | |||
5783 | "PHI reordering is free.")(static_cast <bool> ((E->ReorderIndices.empty() || E != VectorizableTree.front().get()) && "PHI reordering is free." ) ? void (0) : __assert_fail ("(E->ReorderIndices.empty() || E != VectorizableTree.front().get()) && \"PHI reordering is free.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5783, __extension__ __PRETTY_FUNCTION__)); | |||
5784 | auto *PH = cast<PHINode>(VL0); | |||
5785 | Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); | |||
5786 | Builder.SetCurrentDebugLocation(PH->getDebugLoc()); | |||
5787 | PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); | |||
5788 | Value *V = NewPhi; | |||
5789 | ShuffleBuilder.addInversedMask(E->ReorderIndices); | |||
5790 | ShuffleBuilder.addMask(E->ReuseShuffleIndices); | |||
5791 | V = ShuffleBuilder.finalize(V); | |||
5792 | ||||
5793 | E->VectorizedValue = V; | |||
5794 | ||||
5795 | // PHINodes may have multiple entries from the same block. We want to | |||
5796 | // visit every block once. | |||
5797 | SmallPtrSet<BasicBlock*, 4> VisitedBBs; | |||
5798 | ||||
5799 | for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { | |||
5800 | ValueList Operands; | |||
5801 | BasicBlock *IBB = PH->getIncomingBlock(i); | |||
5802 | ||||
5803 | if (!VisitedBBs.insert(IBB).second) { | |||
5804 | NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); | |||
5805 | continue; | |||
5806 | } | |||
5807 | ||||
5808 | Builder.SetInsertPoint(IBB->getTerminator()); | |||
5809 | Builder.SetCurrentDebugLocation(PH->getDebugLoc()); | |||
5810 | Value *Vec = vectorizeTree(E->getOperand(i)); | |||
5811 | NewPhi->addIncoming(Vec, IBB); | |||
5812 | } | |||
5813 | ||||
5814 | assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() &&(static_cast <bool> (NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && "Invalid number of incoming values" ) ? void (0) : __assert_fail ("NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && \"Invalid number of incoming values\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5815, __extension__ __PRETTY_FUNCTION__)) | |||
5815 | "Invalid number of incoming values")(static_cast <bool> (NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && "Invalid number of incoming values" ) ? void (0) : __assert_fail ("NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && \"Invalid number of incoming values\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5815, __extension__ __PRETTY_FUNCTION__)); | |||
5816 | return V; | |||
5817 | } | |||
5818 | ||||
5819 | case Instruction::ExtractElement: { | |||
5820 | Value *V = E->getSingleOperand(0); | |||
5821 | Builder.SetInsertPoint(VL0); | |||
5822 | ShuffleBuilder.addInversedMask(E->ReorderIndices); | |||
5823 | ShuffleBuilder.addMask(E->ReuseShuffleIndices); | |||
5824 | V = ShuffleBuilder.finalize(V); | |||
5825 | E->VectorizedValue = V; | |||
5826 | return V; | |||
5827 | } | |||
5828 | case Instruction::ExtractValue: { | |||
5829 | auto *LI = cast<LoadInst>(E->getSingleOperand(0)); | |||
5830 | Builder.SetInsertPoint(LI); | |||
5831 | auto *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace()); | |||
5832 | Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy); | |||
5833 | LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign()); | |||
5834 | Value *NewV = propagateMetadata(V, E->Scalars); | |||
5835 | ShuffleBuilder.addInversedMask(E->ReorderIndices); | |||
5836 | ShuffleBuilder.addMask(E->ReuseShuffleIndices); | |||
5837 | NewV = ShuffleBuilder.finalize(NewV); | |||
5838 | E->VectorizedValue = NewV; | |||
5839 | return NewV; | |||
5840 | } | |||
5841 | case Instruction::InsertElement: { | |||
5842 | assert(E->ReuseShuffleIndices.empty() && "All inserts should be unique")(static_cast <bool> (E->ReuseShuffleIndices.empty() && "All inserts should be unique") ? void (0) : __assert_fail ( "E->ReuseShuffleIndices.empty() && \"All inserts should be unique\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5842, __extension__ __PRETTY_FUNCTION__)); | |||
5843 | Builder.SetInsertPoint(cast<Instruction>(E->Scalars.back())); | |||
5844 | Value *V = vectorizeTree(E->getOperand(1)); | |||
5845 | ||||
5846 | // Create InsertVector shuffle if necessary | |||
5847 | auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { | |||
5848 | return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0)); | |||
5849 | })); | |||
5850 | const unsigned NumElts = | |||
5851 | cast<FixedVectorType>(FirstInsert->getType())->getNumElements(); | |||
5852 | const unsigned NumScalars = E->Scalars.size(); | |||
5853 | ||||
5854 | unsigned Offset = *getInsertIndex(VL0, 0); | |||
5855 | assert(Offset < NumElts && "Failed to find vector index offset")(static_cast <bool> (Offset < NumElts && "Failed to find vector index offset" ) ? void (0) : __assert_fail ("Offset < NumElts && \"Failed to find vector index offset\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 5855, __extension__ __PRETTY_FUNCTION__)); | |||
5856 | ||||
5857 | // Create shuffle to resize vector | |||
5858 | SmallVector<int> Mask; | |||
5859 | if (!E->ReorderIndices.empty()) { | |||
5860 | inversePermutation(E->ReorderIndices, Mask); | |||
5861 | Mask.append(NumElts - NumScalars, UndefMaskElem); | |||
5862 | } else { | |||
5863 | Mask.assign(NumElts, UndefMaskElem); | |||
5864 | std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0); | |||
5865 | } | |||
5866 | // Create InsertVector shuffle if necessary | |||
5867 | bool IsIdentity = true; | |||
5868 | SmallVector<int> PrevMask(NumElts, UndefMaskElem); | |||
5869 | Mask.swap(PrevMask); | |||
5870 | for (unsigned I = 0; I < NumScalars; ++I) { | |||
5871 | Value *Scalar = E->Scalars[PrevMask[I]]; | |||
5872 | Optional<int> InsertIdx = getInsertIndex(Scalar, 0); | |||
5873 | if (!InsertIdx || *InsertIdx == UndefMaskElem) | |||
5874 | continue; | |||
5875 | IsIdentity &= *InsertIdx - Offset == I; | |||
5876 | Mask[*InsertIdx - Offset] = I; | |||
5877 | } | |||
5878 | if (!IsIdentity || NumElts != NumScalars) | |||
5879 | V = Builder.CreateShuffleVector(V, Mask); | |||
5880 | ||||
5881 | if ((!IsIdentity || Offset != 0 || | |||
5882 | !isa<UndefValue>(FirstInsert->getOperand(0))) && | |||
5883 | NumElts != NumScalars) { | |||
5884 | SmallVector<int> InsertMask(NumElts); | |||
5885 | std::iota(InsertMask.begin(), InsertMask.end(), 0); | |||
5886 | for (unsigned I = 0; I < NumElts; I++) { | |||
5887 | if (Mask[I] != UndefMaskElem) | |||
5888 | InsertMask[Offset + I] = NumElts + I; | |||
5889 | } | |||
5890 | ||||
5891 | V = Builder.CreateShuffleVector( | |||
5892 | FirstInsert->getOperand(0), V, InsertMask, | |||
5893 | cast<Instruction>(E->Scalars.back())->getName()); | |||
5894 | } | |||
5895 | ||||
5896 | ++NumVectorInstructions; | |||
5897 | E->VectorizedValue = V; | |||
5898 | return V; | |||
5899 | } | |||
5900 | case Instruction::ZExt: | |||
5901 | case Instruction::SExt: | |||
5902 | case Instruction::FPToUI: | |||
5903 | case Instruction::FPToSI: | |||
5904 | case Instruction::FPExt: | |||
5905 | case Instruction::PtrToInt: | |||
5906 | case Instruction::IntToPtr: | |||
5907 | case Instruction::SIToFP: | |||
5908 | case Instruction::UIToFP: | |||
5909 | case Instruction::Trunc: | |||
5910 | case Instruction::FPTrunc: | |||
5911 | case Instruction::BitCast: { | |||
5912 | setInsertPointAfterBundle(E); | |||
5913 | ||||
5914 | Value *InVec = vectorizeTree(E->getOperand(0)); | |||
5915 | ||||
5916 | if (E->VectorizedValue) { | |||
5917 | LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"; } } while (false); | |||
5918 | return E->VectorizedValue; | |||
5919 | } | |||
5920 | ||||
5921 | auto *CI = cast<CastInst>(VL0); | |||
5922 | Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); | |||
5923 | ShuffleBuilder.addInversedMask(E->ReorderIndices); | |||
5924 | ShuffleBuilder.addMask(E->ReuseShuffleIndices); | |||
5925 | V = ShuffleBuilder.finalize(V); | |||
5926 | ||||
5927 | E->VectorizedValue = V; | |||
5928 | ++NumVectorInstructions; | |||
5929 | return V; | |||
5930 | } | |||
5931 | case Instruction::FCmp: | |||
5932 | case Instruction::ICmp: { | |||
5933 | setInsertPointAfterBundle(E); | |||
5934 | ||||
5935 | Value *L = vectorizeTree(E->getOperand(0)); | |||
5936 | Value *R = vectorizeTree(E->getOperand(1)); | |||
5937 | ||||
5938 | if (E->VectorizedValue) { | |||
5939 | LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"; } } while (false); | |||
5940 | return E->VectorizedValue; | |||
5941 | } | |||
5942 | ||||
5943 | CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); | |||
5944 | Value *V = Builder.CreateCmp(P0, L, R); | |||
5945 | propagateIRFlags(V, E->Scalars, VL0); | |||
5946 | ShuffleBuilder.addInversedMask(E->ReorderIndices); | |||
5947 | ShuffleBuilder.addMask(E->ReuseShuffleIndices); | |||
5948 | V = ShuffleBuilder.finalize(V); | |||
5949 | ||||
5950 | E->VectorizedValue = V; | |||
5951 | ++NumVectorInstructions; | |||
5952 | return V; | |||
5953 | } | |||
5954 | case Instruction::Select: { | |||
5955 | setInsertPointAfterBundle(E); | |||
5956 | ||||
5957 | Value *Cond = vectorizeTree(E->getOperand(0)); | |||
5958 | Value *True = vectorizeTree(E->getOperand(1)); | |||
5959 | Value *False = vectorizeTree(E->getOperand(2)); | |||
5960 | ||||
5961 | if (E->VectorizedValue) { | |||
5962 | LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"; } } while (false); | |||
5963 | return E->VectorizedValue; | |||
5964 | } | |||
5965 | ||||
5966 | Value *V = Builder.CreateSelect(Cond, True, False); | |||
5967 | ShuffleBuilder.addInversedMask(E->ReorderIndices); | |||
5968 | ShuffleBuilder.addMask(E->ReuseShuffleIndices); | |||
5969 | V = ShuffleBuilder.finalize(V); | |||
5970 | ||||
5971 | E->VectorizedValue = V; | |||
5972 | ++NumVectorInstructions; | |||
5973 | return V; | |||
5974 | } | |||
5975 | case Instruction::FNeg: { | |||
5976 | setInsertPointAfterBundle(E); | |||
5977 | ||||
5978 | Value *Op = vectorizeTree(E->getOperand(0)); | |||
5979 | ||||
5980 | if (E->VectorizedValue) { | |||
5981 | LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"; } } while (false); | |||
5982 | return E->VectorizedValue; | |||
5983 | } | |||
5984 | ||||
5985 | Value *V = Builder.CreateUnOp( | |||
5986 | static_cast<Instruction::UnaryOps>(E->getOpcode()), Op); | |||
5987 | propagateIRFlags(V, E->Scalars, VL0); | |||
5988 | if (auto *I = dyn_cast<Instruction>(V)) | |||
5989 | V = propagateMetadata(I, E->Scalars); | |||
5990 | ||||
5991 | ShuffleBuilder.addInversedMask(E->ReorderIndices); | |||
5992 | ShuffleBuilder.addMask(E->ReuseShuffleIndices); | |||
5993 | V = ShuffleBuilder.finalize(V); | |||
5994 | ||||
5995 | E->VectorizedValue = V; | |||
5996 | ++NumVectorInstructions; | |||
5997 | ||||
5998 | return V; | |||
5999 | } | |||
6000 | case Instruction::Add: | |||
6001 | case Instruction::FAdd: | |||
6002 | case Instruction::Sub: | |||
6003 | case Instruction::FSub: | |||
6004 | case Instruction::Mul: | |||
6005 | case Instruction::FMul: | |||
6006 | case Instruction::UDiv: | |||
6007 | case Instruction::SDiv: | |||
6008 | case Instruction::FDiv: | |||
6009 | case Instruction::URem: | |||
6010 | case Instruction::SRem: | |||
6011 | case Instruction::FRem: | |||
6012 | case Instruction::Shl: | |||
6013 | case Instruction::LShr: | |||
6014 | case Instruction::AShr: | |||
6015 | case Instruction::And: | |||
6016 | case Instruction::Or: | |||
6017 | case Instruction::Xor: { | |||
6018 | setInsertPointAfterBundle(E); | |||
6019 | ||||
6020 | Value *LHS = vectorizeTree(E->getOperand(0)); | |||
6021 | Value *RHS = vectorizeTree(E->getOperand(1)); | |||
6022 | ||||
6023 | if (E->VectorizedValue) { | |||
6024 | LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"; } } while (false); | |||
6025 | return E->VectorizedValue; | |||
6026 | } | |||
6027 | ||||
6028 | Value *V = Builder.CreateBinOp( | |||
6029 | static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, | |||
6030 | RHS); | |||
6031 | propagateIRFlags(V, E->Scalars, VL0); | |||
6032 | if (auto *I = dyn_cast<Instruction>(V)) | |||
6033 | V = propagateMetadata(I, E->Scalars); | |||
6034 | ||||
6035 | ShuffleBuilder.addInversedMask(E->ReorderIndices); | |||
6036 | ShuffleBuilder.addMask(E->ReuseShuffleIndices); | |||
6037 | V = ShuffleBuilder.finalize(V); | |||
6038 | ||||
6039 | E->VectorizedValue = V; | |||
6040 | ++NumVectorInstructions; | |||
6041 | ||||
6042 | return V; | |||
6043 | } | |||
6044 | case Instruction::Load: { | |||
6045 | // Loads are inserted at the head of the tree because we don't want to | |||
6046 | // sink them all the way down past store instructions. | |||
6047 | setInsertPointAfterBundle(E); | |||
6048 | ||||
6049 | LoadInst *LI = cast<LoadInst>(VL0); | |||
6050 | Instruction *NewLI; | |||
6051 | unsigned AS = LI->getPointerAddressSpace(); | |||
6052 | Value *PO = LI->getPointerOperand(); | |||
6053 | if (E->State == TreeEntry::Vectorize) { | |||
6054 | ||||
6055 | Value *VecPtr = Builder.CreateBitCast(PO, VecTy->getPointerTo(AS)); | |||
6056 | ||||
6057 | // The pointer operand uses an in-tree scalar so we add the new BitCast | |||
6058 | // to ExternalUses list to make sure that an extract will be generated | |||
6059 | // in the future. | |||
6060 | if (getTreeEntry(PO)) | |||
6061 | ExternalUses.emplace_back(PO, cast<User>(VecPtr), 0); | |||
6062 | ||||
6063 | NewLI = Builder.CreateAlignedLoad(VecTy, VecPtr, LI->getAlign()); | |||
6064 | } else { | |||
6065 | assert(E->State == TreeEntry::ScatterVectorize && "Unhandled state")(static_cast <bool> (E->State == TreeEntry::ScatterVectorize && "Unhandled state") ? void (0) : __assert_fail ("E->State == TreeEntry::ScatterVectorize && \"Unhandled state\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6065, __extension__ __PRETTY_FUNCTION__)); | |||
6066 | Value *VecPtr = vectorizeTree(E->getOperand(0)); | |||
6067 | // Use the minimum alignment of the gathered loads. | |||
6068 | Align CommonAlignment = LI->getAlign(); | |||
6069 | for (Value *V : E->Scalars) | |||
6070 | CommonAlignment = | |||
6071 | commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); | |||
6072 | NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment); | |||
6073 | } | |||
6074 | Value *V = propagateMetadata(NewLI, E->Scalars); | |||
6075 | ||||
6076 | ShuffleBuilder.addInversedMask(E->ReorderIndices); | |||
6077 | ShuffleBuilder.addMask(E->ReuseShuffleIndices); | |||
6078 | V = ShuffleBuilder.finalize(V); | |||
6079 | E->VectorizedValue = V; | |||
6080 | ++NumVectorInstructions; | |||
6081 | return V; | |||
6082 | } | |||
6083 | case Instruction::Store: { | |||
6084 | auto *SI = cast<StoreInst>(VL0); | |||
6085 | unsigned AS = SI->getPointerAddressSpace(); | |||
6086 | ||||
6087 | setInsertPointAfterBundle(E); | |||
6088 | ||||
6089 | Value *VecValue = vectorizeTree(E->getOperand(0)); | |||
6090 | ShuffleBuilder.addMask(E->ReorderIndices); | |||
6091 | VecValue = ShuffleBuilder.finalize(VecValue); | |||
6092 | ||||
6093 | Value *ScalarPtr = SI->getPointerOperand(); | |||
6094 | Value *VecPtr = Builder.CreateBitCast( | |||
6095 | ScalarPtr, VecValue->getType()->getPointerTo(AS)); | |||
6096 | StoreInst *ST = Builder.CreateAlignedStore(VecValue, VecPtr, | |||
6097 | SI->getAlign()); | |||
6098 | ||||
6099 | // The pointer operand uses an in-tree scalar, so add the new BitCast to | |||
6100 | // ExternalUses to make sure that an extract will be generated in the | |||
6101 | // future. | |||
6102 | if (getTreeEntry(ScalarPtr)) | |||
6103 | ExternalUses.push_back(ExternalUser(ScalarPtr, cast<User>(VecPtr), 0)); | |||
6104 | ||||
6105 | Value *V = propagateMetadata(ST, E->Scalars); | |||
6106 | ||||
6107 | E->VectorizedValue = V; | |||
6108 | ++NumVectorInstructions; | |||
6109 | return V; | |||
6110 | } | |||
6111 | case Instruction::GetElementPtr: { | |||
6112 | setInsertPointAfterBundle(E); | |||
6113 | ||||
6114 | Value *Op0 = vectorizeTree(E->getOperand(0)); | |||
6115 | ||||
6116 | std::vector<Value *> OpVecs; | |||
6117 | for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; | |||
6118 | ++j) { | |||
6119 | ValueList &VL = E->getOperand(j); | |||
6120 | // Need to cast all elements to the same type before vectorization to | |||
6121 | // avoid crash. | |||
6122 | Type *VL0Ty = VL0->getOperand(j)->getType(); | |||
6123 | Type *Ty = llvm::all_of( | |||
6124 | VL, [VL0Ty](Value *V) { return VL0Ty == V->getType(); }) | |||
6125 | ? VL0Ty | |||
6126 | : DL->getIndexType(cast<GetElementPtrInst>(VL0) | |||
6127 | ->getPointerOperandType() | |||
6128 | ->getScalarType()); | |||
6129 | for (Value *&V : VL) { | |||
6130 | auto *CI = cast<ConstantInt>(V); | |||
6131 | V = ConstantExpr::getIntegerCast(CI, Ty, | |||
6132 | CI->getValue().isSignBitSet()); | |||
6133 | } | |||
6134 | Value *OpVec = vectorizeTree(VL); | |||
6135 | OpVecs.push_back(OpVec); | |||
6136 | } | |||
6137 | ||||
6138 | Value *V = Builder.CreateGEP( | |||
6139 | cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs); | |||
6140 | if (Instruction *I = dyn_cast<Instruction>(V)) | |||
6141 | V = propagateMetadata(I, E->Scalars); | |||
6142 | ||||
6143 | ShuffleBuilder.addInversedMask(E->ReorderIndices); | |||
6144 | ShuffleBuilder.addMask(E->ReuseShuffleIndices); | |||
6145 | V = ShuffleBuilder.finalize(V); | |||
6146 | ||||
6147 | E->VectorizedValue = V; | |||
6148 | ++NumVectorInstructions; | |||
6149 | ||||
6150 | return V; | |||
6151 | } | |||
6152 | case Instruction::Call: { | |||
6153 | CallInst *CI = cast<CallInst>(VL0); | |||
6154 | setInsertPointAfterBundle(E); | |||
6155 | ||||
6156 | Intrinsic::ID IID = Intrinsic::not_intrinsic; | |||
6157 | if (Function *FI = CI->getCalledFunction()) | |||
6158 | IID = FI->getIntrinsicID(); | |||
6159 | ||||
6160 | Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); | |||
6161 | ||||
6162 | auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); | |||
6163 | bool UseIntrinsic = ID != Intrinsic::not_intrinsic && | |||
6164 | VecCallCosts.first <= VecCallCosts.second; | |||
6165 | ||||
6166 | Value *ScalarArg = nullptr; | |||
6167 | std::vector<Value *> OpVecs; | |||
6168 | SmallVector<Type *, 2> TysForDecl = | |||
6169 | {FixedVectorType::get(CI->getType(), E->Scalars.size())}; | |||
6170 | for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { | |||
6171 | ValueList OpVL; | |||
6172 | // Some intrinsics have scalar arguments. This argument should not be | |||
6173 | // vectorized. | |||
6174 | if (UseIntrinsic && hasVectorInstrinsicScalarOpd(IID, j)) { | |||
6175 | CallInst *CEI = cast<CallInst>(VL0); | |||
6176 | ScalarArg = CEI->getArgOperand(j); | |||
6177 | OpVecs.push_back(CEI->getArgOperand(j)); | |||
6178 | if (hasVectorInstrinsicOverloadedScalarOpd(IID, j)) | |||
6179 | TysForDecl.push_back(ScalarArg->getType()); | |||
6180 | continue; | |||
6181 | } | |||
6182 | ||||
6183 | Value *OpVec = vectorizeTree(E->getOperand(j)); | |||
6184 | LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"; } } while (false); | |||
6185 | OpVecs.push_back(OpVec); | |||
6186 | } | |||
6187 | ||||
6188 | Function *CF; | |||
6189 | if (!UseIntrinsic) { | |||
6190 | VFShape Shape = | |||
6191 | VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>( | |||
6192 | VecTy->getNumElements())), | |||
6193 | false /*HasGlobalPred*/); | |||
6194 | CF = VFDatabase(*CI).getVectorizedFunction(Shape); | |||
6195 | } else { | |||
6196 | CF = Intrinsic::getDeclaration(F->getParent(), ID, TysForDecl); | |||
6197 | } | |||
6198 | ||||
6199 | SmallVector<OperandBundleDef, 1> OpBundles; | |||
6200 | CI->getOperandBundlesAsDefs(OpBundles); | |||
6201 | Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); | |||
6202 | ||||
6203 | // The scalar argument uses an in-tree scalar so we add the new vectorized | |||
6204 | // call to ExternalUses list to make sure that an extract will be | |||
6205 | // generated in the future. | |||
6206 | if (ScalarArg && getTreeEntry(ScalarArg)) | |||
6207 | ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0)); | |||
6208 | ||||
6209 | propagateIRFlags(V, E->Scalars, VL0); | |||
6210 | ShuffleBuilder.addInversedMask(E->ReorderIndices); | |||
6211 | ShuffleBuilder.addMask(E->ReuseShuffleIndices); | |||
6212 | V = ShuffleBuilder.finalize(V); | |||
6213 | ||||
6214 | E->VectorizedValue = V; | |||
6215 | ++NumVectorInstructions; | |||
6216 | return V; | |||
6217 | } | |||
6218 | case Instruction::ShuffleVector: { | |||
6219 | assert(E->isAltShuffle() &&(static_cast <bool> (E->isAltShuffle() && (( Instruction::isBinaryOp(E->getOpcode()) && Instruction ::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E ->getOpcode()) && Instruction::isCast(E->getAltOpcode ()))) && "Invalid Shuffle Vector Operand") ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6224, __extension__ __PRETTY_FUNCTION__)) | |||
6220 | ((Instruction::isBinaryOp(E->getOpcode()) &&(static_cast <bool> (E->isAltShuffle() && (( Instruction::isBinaryOp(E->getOpcode()) && Instruction ::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E ->getOpcode()) && Instruction::isCast(E->getAltOpcode ()))) && "Invalid Shuffle Vector Operand") ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6224, __extension__ __PRETTY_FUNCTION__)) | |||
6221 | Instruction::isBinaryOp(E->getAltOpcode())) ||(static_cast <bool> (E->isAltShuffle() && (( Instruction::isBinaryOp(E->getOpcode()) && Instruction ::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E ->getOpcode()) && Instruction::isCast(E->getAltOpcode ()))) && "Invalid Shuffle Vector Operand") ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6224, __extension__ __PRETTY_FUNCTION__)) | |||
6222 | (Instruction::isCast(E->getOpcode()) &&(static_cast <bool> (E->isAltShuffle() && (( Instruction::isBinaryOp(E->getOpcode()) && Instruction ::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E ->getOpcode()) && Instruction::isCast(E->getAltOpcode ()))) && "Invalid Shuffle Vector Operand") ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6224, __extension__ __PRETTY_FUNCTION__)) | |||
6223 | Instruction::isCast(E->getAltOpcode()))) &&(static_cast <bool> (E->isAltShuffle() && (( Instruction::isBinaryOp(E->getOpcode()) && Instruction ::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E ->getOpcode()) && Instruction::isCast(E->getAltOpcode ()))) && "Invalid Shuffle Vector Operand") ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6224, __extension__ __PRETTY_FUNCTION__)) | |||
6224 | "Invalid Shuffle Vector Operand")(static_cast <bool> (E->isAltShuffle() && (( Instruction::isBinaryOp(E->getOpcode()) && Instruction ::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E ->getOpcode()) && Instruction::isCast(E->getAltOpcode ()))) && "Invalid Shuffle Vector Operand") ? void (0) : __assert_fail ("E->isAltShuffle() && ((Instruction::isBinaryOp(E->getOpcode()) && Instruction::isBinaryOp(E->getAltOpcode())) || (Instruction::isCast(E->getOpcode()) && Instruction::isCast(E->getAltOpcode()))) && \"Invalid Shuffle Vector Operand\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6224, __extension__ __PRETTY_FUNCTION__)); | |||
6225 | ||||
6226 | Value *LHS = nullptr, *RHS = nullptr; | |||
6227 | if (Instruction::isBinaryOp(E->getOpcode())) { | |||
6228 | setInsertPointAfterBundle(E); | |||
6229 | LHS = vectorizeTree(E->getOperand(0)); | |||
6230 | RHS = vectorizeTree(E->getOperand(1)); | |||
6231 | } else { | |||
6232 | setInsertPointAfterBundle(E); | |||
6233 | LHS = vectorizeTree(E->getOperand(0)); | |||
6234 | } | |||
6235 | ||||
6236 | if (E->VectorizedValue) { | |||
6237 | LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"; } } while (false); | |||
6238 | return E->VectorizedValue; | |||
6239 | } | |||
6240 | ||||
6241 | Value *V0, *V1; | |||
6242 | if (Instruction::isBinaryOp(E->getOpcode())) { | |||
6243 | V0 = Builder.CreateBinOp( | |||
6244 | static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS); | |||
6245 | V1 = Builder.CreateBinOp( | |||
6246 | static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS); | |||
6247 | } else { | |||
6248 | V0 = Builder.CreateCast( | |||
6249 | static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy); | |||
6250 | V1 = Builder.CreateCast( | |||
6251 | static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy); | |||
6252 | } | |||
6253 | ||||
6254 | // Create shuffle to take alternate operations from the vector. | |||
6255 | // Also, gather up main and alt scalar ops to propagate IR flags to | |||
6256 | // each vector operation. | |||
6257 | ValueList OpScalars, AltScalars; | |||
6258 | SmallVector<int> Mask; | |||
6259 | buildSuffleEntryMask( | |||
6260 | E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices, | |||
6261 | [E](Instruction *I) { | |||
6262 | assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode")(static_cast <bool> (E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode" ) ? void (0) : __assert_fail ("E->isOpcodeOrAlt(I) && \"Unexpected main/alternate opcode\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6262, __extension__ __PRETTY_FUNCTION__)); | |||
6263 | return I->getOpcode() == E->getAltOpcode(); | |||
6264 | }, | |||
6265 | Mask, &OpScalars, &AltScalars); | |||
6266 | ||||
6267 | propagateIRFlags(V0, OpScalars); | |||
6268 | propagateIRFlags(V1, AltScalars); | |||
6269 | ||||
6270 | Value *V = Builder.CreateShuffleVector(V0, V1, Mask); | |||
6271 | if (Instruction *I = dyn_cast<Instruction>(V)) | |||
6272 | V = propagateMetadata(I, E->Scalars); | |||
6273 | V = ShuffleBuilder.finalize(V); | |||
6274 | ||||
6275 | E->VectorizedValue = V; | |||
6276 | ++NumVectorInstructions; | |||
6277 | ||||
6278 | return V; | |||
6279 | } | |||
6280 | default: | |||
6281 | llvm_unreachable("unknown inst")::llvm::llvm_unreachable_internal("unknown inst", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6281); | |||
6282 | } | |||
6283 | return nullptr; | |||
6284 | } | |||
6285 | ||||
6286 | Value *BoUpSLP::vectorizeTree() { | |||
6287 | ExtraValueToDebugLocsMap ExternallyUsedValues; | |||
6288 | return vectorizeTree(ExternallyUsedValues); | |||
6289 | } | |||
6290 | ||||
6291 | Value * | |||
6292 | BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { | |||
6293 | // All blocks must be scheduled before any instructions are inserted. | |||
6294 | for (auto &BSIter : BlocksSchedules) { | |||
6295 | scheduleBlock(BSIter.second.get()); | |||
| ||||
6296 | } | |||
6297 | ||||
6298 | Builder.SetInsertPoint(&F->getEntryBlock().front()); | |||
6299 | auto *VectorRoot = vectorizeTree(VectorizableTree[0].get()); | |||
6300 | ||||
6301 | // If the vectorized tree can be rewritten in a smaller type, we truncate the | |||
6302 | // vectorized root. InstCombine will then rewrite the entire expression. We | |||
6303 | // sign extend the extracted values below. | |||
6304 | auto *ScalarRoot = VectorizableTree[0]->Scalars[0]; | |||
6305 | if (MinBWs.count(ScalarRoot)) { | |||
6306 | if (auto *I = dyn_cast<Instruction>(VectorRoot)) { | |||
6307 | // If current instr is a phi and not the last phi, insert it after the | |||
6308 | // last phi node. | |||
6309 | if (isa<PHINode>(I)) | |||
6310 | Builder.SetInsertPoint(&*I->getParent()->getFirstInsertionPt()); | |||
6311 | else | |||
6312 | Builder.SetInsertPoint(&*++BasicBlock::iterator(I)); | |||
6313 | } | |||
6314 | auto BundleWidth = VectorizableTree[0]->Scalars.size(); | |||
6315 | auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); | |||
6316 | auto *VecTy = FixedVectorType::get(MinTy, BundleWidth); | |||
6317 | auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy); | |||
6318 | VectorizableTree[0]->VectorizedValue = Trunc; | |||
6319 | } | |||
6320 | ||||
6321 | LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Extracting " << ExternalUses .size() << " values .\n"; } } while (false) | |||
6322 | << " values .\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Extracting " << ExternalUses .size() << " values .\n"; } } while (false); | |||
6323 | ||||
6324 | // Extract all of the elements with the external uses. | |||
6325 | for (const auto &ExternalUse : ExternalUses) { | |||
6326 | Value *Scalar = ExternalUse.Scalar; | |||
6327 | llvm::User *User = ExternalUse.User; | |||
6328 | ||||
6329 | // Skip users that we already RAUW. This happens when one instruction | |||
6330 | // has multiple uses of the same value. | |||
6331 | if (User && !is_contained(Scalar->users(), User)) | |||
6332 | continue; | |||
6333 | TreeEntry *E = getTreeEntry(Scalar); | |||
6334 | assert(E && "Invalid scalar")(static_cast <bool> (E && "Invalid scalar") ? void (0) : __assert_fail ("E && \"Invalid scalar\"", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6334, __extension__ __PRETTY_FUNCTION__)); | |||
6335 | assert(E->State != TreeEntry::NeedToGather &&(static_cast <bool> (E->State != TreeEntry::NeedToGather && "Extracting from a gather list") ? void (0) : __assert_fail ("E->State != TreeEntry::NeedToGather && \"Extracting from a gather list\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6336, __extension__ __PRETTY_FUNCTION__)) | |||
6336 | "Extracting from a gather list")(static_cast <bool> (E->State != TreeEntry::NeedToGather && "Extracting from a gather list") ? void (0) : __assert_fail ("E->State != TreeEntry::NeedToGather && \"Extracting from a gather list\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6336, __extension__ __PRETTY_FUNCTION__)); | |||
6337 | ||||
6338 | Value *Vec = E->VectorizedValue; | |||
6339 | assert(Vec && "Can't find vectorizable value")(static_cast <bool> (Vec && "Can't find vectorizable value" ) ? void (0) : __assert_fail ("Vec && \"Can't find vectorizable value\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6339, __extension__ __PRETTY_FUNCTION__)); | |||
6340 | ||||
6341 | Value *Lane = Builder.getInt32(ExternalUse.Lane); | |||
6342 | auto ExtractAndExtendIfNeeded = [&](Value *Vec) { | |||
6343 | if (Scalar->getType() != Vec->getType()) { | |||
6344 | Value *Ex; | |||
6345 | // "Reuse" the existing extract to improve final codegen. | |||
6346 | if (auto *ES = dyn_cast<ExtractElementInst>(Scalar)) { | |||
6347 | Ex = Builder.CreateExtractElement(ES->getOperand(0), | |||
6348 | ES->getOperand(1)); | |||
6349 | } else { | |||
6350 | Ex = Builder.CreateExtractElement(Vec, Lane); | |||
6351 | } | |||
6352 | // If necessary, sign-extend or zero-extend ScalarRoot | |||
6353 | // to the larger type. | |||
6354 | if (!MinBWs.count(ScalarRoot)) | |||
6355 | return Ex; | |||
6356 | if (MinBWs[ScalarRoot].second) | |||
6357 | return Builder.CreateSExt(Ex, Scalar->getType()); | |||
6358 | return Builder.CreateZExt(Ex, Scalar->getType()); | |||
6359 | } | |||
6360 | assert(isa<FixedVectorType>(Scalar->getType()) &&(static_cast <bool> (isa<FixedVectorType>(Scalar-> getType()) && isa<InsertElementInst>(Scalar) && "In-tree scalar of vector type is not insertelement?") ? void (0) : __assert_fail ("isa<FixedVectorType>(Scalar->getType()) && isa<InsertElementInst>(Scalar) && \"In-tree scalar of vector type is not insertelement?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6362, __extension__ __PRETTY_FUNCTION__)) | |||
6361 | isa<InsertElementInst>(Scalar) &&(static_cast <bool> (isa<FixedVectorType>(Scalar-> getType()) && isa<InsertElementInst>(Scalar) && "In-tree scalar of vector type is not insertelement?") ? void (0) : __assert_fail ("isa<FixedVectorType>(Scalar->getType()) && isa<InsertElementInst>(Scalar) && \"In-tree scalar of vector type is not insertelement?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6362, __extension__ __PRETTY_FUNCTION__)) | |||
6362 | "In-tree scalar of vector type is not insertelement?")(static_cast <bool> (isa<FixedVectorType>(Scalar-> getType()) && isa<InsertElementInst>(Scalar) && "In-tree scalar of vector type is not insertelement?") ? void (0) : __assert_fail ("isa<FixedVectorType>(Scalar->getType()) && isa<InsertElementInst>(Scalar) && \"In-tree scalar of vector type is not insertelement?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6362, __extension__ __PRETTY_FUNCTION__)); | |||
6363 | return Vec; | |||
6364 | }; | |||
6365 | // If User == nullptr, the Scalar is used as extra arg. Generate | |||
6366 | // ExtractElement instruction and update the record for this scalar in | |||
6367 | // ExternallyUsedValues. | |||
6368 | if (!User) { | |||
6369 | assert(ExternallyUsedValues.count(Scalar) &&(static_cast <bool> (ExternallyUsedValues.count(Scalar) && "Scalar with nullptr as an external user must be registered in " "ExternallyUsedValues map") ? void (0) : __assert_fail ("ExternallyUsedValues.count(Scalar) && \"Scalar with nullptr as an external user must be registered in \" \"ExternallyUsedValues map\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6371, __extension__ __PRETTY_FUNCTION__)) | |||
6370 | "Scalar with nullptr as an external user must be registered in "(static_cast <bool> (ExternallyUsedValues.count(Scalar) && "Scalar with nullptr as an external user must be registered in " "ExternallyUsedValues map") ? void (0) : __assert_fail ("ExternallyUsedValues.count(Scalar) && \"Scalar with nullptr as an external user must be registered in \" \"ExternallyUsedValues map\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6371, __extension__ __PRETTY_FUNCTION__)) | |||
6371 | "ExternallyUsedValues map")(static_cast <bool> (ExternallyUsedValues.count(Scalar) && "Scalar with nullptr as an external user must be registered in " "ExternallyUsedValues map") ? void (0) : __assert_fail ("ExternallyUsedValues.count(Scalar) && \"Scalar with nullptr as an external user must be registered in \" \"ExternallyUsedValues map\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6371, __extension__ __PRETTY_FUNCTION__)); | |||
6372 | if (auto *VecI = dyn_cast<Instruction>(Vec)) { | |||
6373 | Builder.SetInsertPoint(VecI->getParent(), | |||
6374 | std::next(VecI->getIterator())); | |||
6375 | } else { | |||
6376 | Builder.SetInsertPoint(&F->getEntryBlock().front()); | |||
6377 | } | |||
6378 | Value *NewInst = ExtractAndExtendIfNeeded(Vec); | |||
6379 | CSEBlocks.insert(cast<Instruction>(Scalar)->getParent()); | |||
6380 | auto &NewInstLocs = ExternallyUsedValues[NewInst]; | |||
6381 | auto It = ExternallyUsedValues.find(Scalar); | |||
6382 | assert(It != ExternallyUsedValues.end() &&(static_cast <bool> (It != ExternallyUsedValues.end() && "Externally used scalar is not found in ExternallyUsedValues" ) ? void (0) : __assert_fail ("It != ExternallyUsedValues.end() && \"Externally used scalar is not found in ExternallyUsedValues\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6383, __extension__ __PRETTY_FUNCTION__)) | |||
6383 | "Externally used scalar is not found in ExternallyUsedValues")(static_cast <bool> (It != ExternallyUsedValues.end() && "Externally used scalar is not found in ExternallyUsedValues" ) ? void (0) : __assert_fail ("It != ExternallyUsedValues.end() && \"Externally used scalar is not found in ExternallyUsedValues\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6383, __extension__ __PRETTY_FUNCTION__)); | |||
6384 | NewInstLocs.append(It->second); | |||
6385 | ExternallyUsedValues.erase(Scalar); | |||
6386 | // Required to update internally referenced instructions. | |||
6387 | Scalar->replaceAllUsesWith(NewInst); | |||
6388 | continue; | |||
6389 | } | |||
6390 | ||||
6391 | // Generate extracts for out-of-tree users. | |||
6392 | // Find the insertion point for the extractelement lane. | |||
6393 | if (auto *VecI = dyn_cast<Instruction>(Vec)) { | |||
6394 | if (PHINode *PH = dyn_cast<PHINode>(User)) { | |||
6395 | for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { | |||
6396 | if (PH->getIncomingValue(i) == Scalar) { | |||
6397 | Instruction *IncomingTerminator = | |||
6398 | PH->getIncomingBlock(i)->getTerminator(); | |||
6399 | if (isa<CatchSwitchInst>(IncomingTerminator)) { | |||
6400 | Builder.SetInsertPoint(VecI->getParent(), | |||
6401 | std::next(VecI->getIterator())); | |||
6402 | } else { | |||
6403 | Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); | |||
6404 | } | |||
6405 | Value *NewInst = ExtractAndExtendIfNeeded(Vec); | |||
6406 | CSEBlocks.insert(PH->getIncomingBlock(i)); | |||
6407 | PH->setOperand(i, NewInst); | |||
6408 | } | |||
6409 | } | |||
6410 | } else { | |||
6411 | Builder.SetInsertPoint(cast<Instruction>(User)); | |||
6412 | Value *NewInst = ExtractAndExtendIfNeeded(Vec); | |||
6413 | CSEBlocks.insert(cast<Instruction>(User)->getParent()); | |||
6414 | User->replaceUsesOfWith(Scalar, NewInst); | |||
6415 | } | |||
6416 | } else { | |||
6417 | Builder.SetInsertPoint(&F->getEntryBlock().front()); | |||
6418 | Value *NewInst = ExtractAndExtendIfNeeded(Vec); | |||
6419 | CSEBlocks.insert(&F->getEntryBlock()); | |||
6420 | User->replaceUsesOfWith(Scalar, NewInst); | |||
6421 | } | |||
6422 | ||||
6423 | LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Replaced:" << *User << ".\n"; } } while (false); | |||
6424 | } | |||
6425 | ||||
6426 | // For each vectorized value: | |||
6427 | for (auto &TEPtr : VectorizableTree) { | |||
6428 | TreeEntry *Entry = TEPtr.get(); | |||
6429 | ||||
6430 | // No need to handle users of gathered values. | |||
6431 | if (Entry->State == TreeEntry::NeedToGather) | |||
6432 | continue; | |||
6433 | ||||
6434 | assert(Entry->VectorizedValue && "Can't find vectorizable value")(static_cast <bool> (Entry->VectorizedValue && "Can't find vectorizable value") ? void (0) : __assert_fail ( "Entry->VectorizedValue && \"Can't find vectorizable value\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6434, __extension__ __PRETTY_FUNCTION__)); | |||
6435 | ||||
6436 | // For each lane: | |||
6437 | for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { | |||
6438 | Value *Scalar = Entry->Scalars[Lane]; | |||
6439 | ||||
6440 | #ifndef NDEBUG | |||
6441 | Type *Ty = Scalar->getType(); | |||
6442 | if (!Ty->isVoidTy()) { | |||
6443 | for (User *U : Scalar->users()) { | |||
6444 | LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: \tvalidating user:" << *U << ".\n"; } } while (false); | |||
6445 | ||||
6446 | // It is legal to delete users in the ignorelist. | |||
6447 | assert((getTreeEntry(U) || is_contained(UserIgnoreList, U) ||(static_cast <bool> ((getTreeEntry(U) || is_contained(UserIgnoreList , U) || (isa_and_nonnull<Instruction>(U) && isDeleted (cast<Instruction>(U)))) && "Deleting out-of-tree value" ) ? void (0) : __assert_fail ("(getTreeEntry(U) || is_contained(UserIgnoreList, U) || (isa_and_nonnull<Instruction>(U) && isDeleted(cast<Instruction>(U)))) && \"Deleting out-of-tree value\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6450, __extension__ __PRETTY_FUNCTION__)) | |||
6448 | (isa_and_nonnull<Instruction>(U) &&(static_cast <bool> ((getTreeEntry(U) || is_contained(UserIgnoreList , U) || (isa_and_nonnull<Instruction>(U) && isDeleted (cast<Instruction>(U)))) && "Deleting out-of-tree value" ) ? void (0) : __assert_fail ("(getTreeEntry(U) || is_contained(UserIgnoreList, U) || (isa_and_nonnull<Instruction>(U) && isDeleted(cast<Instruction>(U)))) && \"Deleting out-of-tree value\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6450, __extension__ __PRETTY_FUNCTION__)) | |||
6449 | isDeleted(cast<Instruction>(U)))) &&(static_cast <bool> ((getTreeEntry(U) || is_contained(UserIgnoreList , U) || (isa_and_nonnull<Instruction>(U) && isDeleted (cast<Instruction>(U)))) && "Deleting out-of-tree value" ) ? void (0) : __assert_fail ("(getTreeEntry(U) || is_contained(UserIgnoreList, U) || (isa_and_nonnull<Instruction>(U) && isDeleted(cast<Instruction>(U)))) && \"Deleting out-of-tree value\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6450, __extension__ __PRETTY_FUNCTION__)) | |||
6450 | "Deleting out-of-tree value")(static_cast <bool> ((getTreeEntry(U) || is_contained(UserIgnoreList , U) || (isa_and_nonnull<Instruction>(U) && isDeleted (cast<Instruction>(U)))) && "Deleting out-of-tree value" ) ? void (0) : __assert_fail ("(getTreeEntry(U) || is_contained(UserIgnoreList, U) || (isa_and_nonnull<Instruction>(U) && isDeleted(cast<Instruction>(U)))) && \"Deleting out-of-tree value\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6450, __extension__ __PRETTY_FUNCTION__)); | |||
6451 | } | |||
6452 | } | |||
6453 | #endif | |||
6454 | LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: \tErasing scalar:" << * Scalar << ".\n"; } } while (false); | |||
6455 | eraseInstruction(cast<Instruction>(Scalar)); | |||
6456 | } | |||
6457 | } | |||
6458 | ||||
6459 | Builder.ClearInsertionPoint(); | |||
6460 | InstrElementSize.clear(); | |||
6461 | ||||
6462 | return VectorizableTree[0]->VectorizedValue; | |||
6463 | } | |||
6464 | ||||
6465 | void BoUpSLP::optimizeGatherSequence() { | |||
6466 | LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Optimizing " << GatherSeq .size() << " gather sequences instructions.\n"; } } while (false) | |||
6467 | << " gather sequences instructions.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Optimizing " << GatherSeq .size() << " gather sequences instructions.\n"; } } while (false); | |||
6468 | // LICM InsertElementInst sequences. | |||
6469 | for (Instruction *I : GatherSeq) { | |||
6470 | if (isDeleted(I)) | |||
6471 | continue; | |||
6472 | ||||
6473 | // Check if this block is inside a loop. | |||
6474 | Loop *L = LI->getLoopFor(I->getParent()); | |||
6475 | if (!L) | |||
6476 | continue; | |||
6477 | ||||
6478 | // Check if it has a preheader. | |||
6479 | BasicBlock *PreHeader = L->getLoopPreheader(); | |||
6480 | if (!PreHeader) | |||
6481 | continue; | |||
6482 | ||||
6483 | // If the vector or the element that we insert into it are | |||
6484 | // instructions that are defined in this basic block then we can't | |||
6485 | // hoist this instruction. | |||
6486 | auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); | |||
6487 | auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); | |||
6488 | if (Op0 && L->contains(Op0)) | |||
6489 | continue; | |||
6490 | if (Op1 && L->contains(Op1)) | |||
6491 | continue; | |||
6492 | ||||
6493 | // We can hoist this instruction. Move it to the pre-header. | |||
6494 | I->moveBefore(PreHeader->getTerminator()); | |||
6495 | } | |||
6496 | ||||
6497 | // Make a list of all reachable blocks in our CSE queue. | |||
6498 | SmallVector<const DomTreeNode *, 8> CSEWorkList; | |||
6499 | CSEWorkList.reserve(CSEBlocks.size()); | |||
6500 | for (BasicBlock *BB : CSEBlocks) | |||
6501 | if (DomTreeNode *N = DT->getNode(BB)) { | |||
6502 | assert(DT->isReachableFromEntry(N))(static_cast <bool> (DT->isReachableFromEntry(N)) ? void (0) : __assert_fail ("DT->isReachableFromEntry(N)", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6502, __extension__ __PRETTY_FUNCTION__)); | |||
6503 | CSEWorkList.push_back(N); | |||
6504 | } | |||
6505 | ||||
6506 | // Sort blocks by domination. This ensures we visit a block after all blocks | |||
6507 | // dominating it are visited. | |||
6508 | llvm::sort(CSEWorkList, [](const DomTreeNode *A, const DomTreeNode *B) { | |||
6509 | assert((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) &&(static_cast <bool> ((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) && "Different nodes should have different DFS numbers" ) ? void (0) : __assert_fail ("(A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) && \"Different nodes should have different DFS numbers\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6510, __extension__ __PRETTY_FUNCTION__)) | |||
6510 | "Different nodes should have different DFS numbers")(static_cast <bool> ((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) && "Different nodes should have different DFS numbers" ) ? void (0) : __assert_fail ("(A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) && \"Different nodes should have different DFS numbers\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6510, __extension__ __PRETTY_FUNCTION__)); | |||
6511 | return A->getDFSNumIn() < B->getDFSNumIn(); | |||
6512 | }); | |||
6513 | ||||
6514 | // Perform O(N^2) search over the gather sequences and merge identical | |||
6515 | // instructions. TODO: We can further optimize this scan if we split the | |||
6516 | // instructions into different buckets based on the insert lane. | |||
6517 | SmallVector<Instruction *, 16> Visited; | |||
6518 | for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { | |||
6519 | assert(*I &&(static_cast <bool> (*I && (I == CSEWorkList.begin () || !DT->dominates(*I, *std::prev(I))) && "Worklist not sorted properly!" ) ? void (0) : __assert_fail ("*I && (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && \"Worklist not sorted properly!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6521, __extension__ __PRETTY_FUNCTION__)) | |||
6520 | (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) &&(static_cast <bool> (*I && (I == CSEWorkList.begin () || !DT->dominates(*I, *std::prev(I))) && "Worklist not sorted properly!" ) ? void (0) : __assert_fail ("*I && (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && \"Worklist not sorted properly!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6521, __extension__ __PRETTY_FUNCTION__)) | |||
6521 | "Worklist not sorted properly!")(static_cast <bool> (*I && (I == CSEWorkList.begin () || !DT->dominates(*I, *std::prev(I))) && "Worklist not sorted properly!" ) ? void (0) : __assert_fail ("*I && (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && \"Worklist not sorted properly!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6521, __extension__ __PRETTY_FUNCTION__)); | |||
6522 | BasicBlock *BB = (*I)->getBlock(); | |||
6523 | // For all instructions in blocks containing gather sequences: | |||
6524 | for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { | |||
6525 | Instruction *In = &*it++; | |||
6526 | if (isDeleted(In)) | |||
6527 | continue; | |||
6528 | if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In) && | |||
6529 | !isa<ShuffleVectorInst>(In)) | |||
6530 | continue; | |||
6531 | ||||
6532 | // Check if we can replace this instruction with any of the | |||
6533 | // visited instructions. | |||
6534 | for (Instruction *v : Visited) { | |||
6535 | if (In->isIdenticalTo(v) && | |||
6536 | DT->dominates(v->getParent(), In->getParent())) { | |||
6537 | In->replaceAllUsesWith(v); | |||
6538 | eraseInstruction(In); | |||
6539 | In = nullptr; | |||
6540 | break; | |||
6541 | } | |||
6542 | } | |||
6543 | if (In) { | |||
6544 | assert(!is_contained(Visited, In))(static_cast <bool> (!is_contained(Visited, In)) ? void (0) : __assert_fail ("!is_contained(Visited, In)", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6544, __extension__ __PRETTY_FUNCTION__)); | |||
6545 | Visited.push_back(In); | |||
6546 | } | |||
6547 | } | |||
6548 | } | |||
6549 | CSEBlocks.clear(); | |||
6550 | GatherSeq.clear(); | |||
6551 | } | |||
6552 | ||||
6553 | // Groups the instructions to a bundle (which is then a single scheduling entity) | |||
6554 | // and schedules instructions until the bundle gets ready. | |||
6555 | Optional<BoUpSLP::ScheduleData *> | |||
6556 | BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, | |||
6557 | const InstructionsState &S) { | |||
6558 | // No need to schedule PHIs, insertelement, extractelement and extractvalue | |||
6559 | // instructions. | |||
6560 | if (isa<PHINode>(S.OpValue) || isVectorLikeInstWithConstOps(S.OpValue)) | |||
6561 | return nullptr; | |||
6562 | ||||
6563 | // Initialize the instruction bundle. | |||
6564 | Instruction *OldScheduleEnd = ScheduleEnd; | |||
6565 | ScheduleData *PrevInBundle = nullptr; | |||
6566 | ScheduleData *Bundle = nullptr; | |||
6567 | bool ReSchedule = false; | |||
6568 | LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: bundle: " << *S.OpValue << "\n"; } } while (false); | |||
6569 | ||||
6570 | auto &&TryScheduleBundle = [this, OldScheduleEnd, SLP](bool ReSchedule, | |||
6571 | ScheduleData *Bundle) { | |||
6572 | // The scheduling region got new instructions at the lower end (or it is a | |||
6573 | // new region for the first bundle). This makes it necessary to | |||
6574 | // recalculate all dependencies. | |||
6575 | // It is seldom that this needs to be done a second time after adding the | |||
6576 | // initial bundle to the region. | |||
6577 | if (ScheduleEnd != OldScheduleEnd) { | |||
6578 | for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) | |||
6579 | doForAllOpcodes(I, [](ScheduleData *SD) { SD->clearDependencies(); }); | |||
6580 | ReSchedule = true; | |||
6581 | } | |||
6582 | if (ReSchedule) { | |||
6583 | resetSchedule(); | |||
6584 | initialFillReadyList(ReadyInsts); | |||
6585 | } | |||
6586 | if (Bundle) { | |||
6587 | LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundledo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: try schedule bundle " << *Bundle << " in block " << BB->getName() << "\n"; } } while (false) | |||
6588 | << " in block " << BB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: try schedule bundle " << *Bundle << " in block " << BB->getName() << "\n"; } } while (false); | |||
6589 | calculateDependencies(Bundle, /*InsertInReadyList=*/true, SLP); | |||
6590 | } | |||
6591 | ||||
6592 | // Now try to schedule the new bundle or (if no bundle) just calculate | |||
6593 | // dependencies. As soon as the bundle is "ready" it means that there are no | |||
6594 | // cyclic dependencies and we can schedule it. Note that's important that we | |||
6595 | // don't "schedule" the bundle yet (see cancelScheduling). | |||
6596 | while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) && | |||
6597 | !ReadyInsts.empty()) { | |||
6598 | ScheduleData *Picked = ReadyInsts.pop_back_val(); | |||
6599 | if (Picked->isSchedulingEntity() && Picked->isReady()) | |||
6600 | schedule(Picked, ReadyInsts); | |||
6601 | } | |||
6602 | }; | |||
6603 | ||||
6604 | // Make sure that the scheduling region contains all | |||
6605 | // instructions of the bundle. | |||
6606 | for (Value *V : VL) { | |||
6607 | if (!extendSchedulingRegion(V, S)) { | |||
6608 | // If the scheduling region got new instructions at the lower end (or it | |||
6609 | // is a new region for the first bundle). This makes it necessary to | |||
6610 | // recalculate all dependencies. | |||
6611 | // Otherwise the compiler may crash trying to incorrectly calculate | |||
6612 | // dependencies and emit instruction in the wrong order at the actual | |||
6613 | // scheduling. | |||
6614 | TryScheduleBundle(/*ReSchedule=*/false, nullptr); | |||
6615 | return None; | |||
6616 | } | |||
6617 | } | |||
6618 | ||||
6619 | for (Value *V : VL) { | |||
6620 | ScheduleData *BundleMember = getScheduleData(V); | |||
6621 | assert(BundleMember &&(static_cast <bool> (BundleMember && "no ScheduleData for bundle member (maybe not in same basic block)" ) ? void (0) : __assert_fail ("BundleMember && \"no ScheduleData for bundle member (maybe not in same basic block)\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6622, __extension__ __PRETTY_FUNCTION__)) | |||
6622 | "no ScheduleData for bundle member (maybe not in same basic block)")(static_cast <bool> (BundleMember && "no ScheduleData for bundle member (maybe not in same basic block)" ) ? void (0) : __assert_fail ("BundleMember && \"no ScheduleData for bundle member (maybe not in same basic block)\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6622, __extension__ __PRETTY_FUNCTION__)); | |||
6623 | if (BundleMember->IsScheduled) { | |||
6624 | // A bundle member was scheduled as single instruction before and now | |||
6625 | // needs to be scheduled as part of the bundle. We just get rid of the | |||
6626 | // existing schedule. | |||
6627 | LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMemberdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: reset schedule because " << *BundleMember << " was already scheduled\n"; } } while (false) | |||
6628 | << " was already scheduled\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: reset schedule because " << *BundleMember << " was already scheduled\n"; } } while (false); | |||
6629 | ReSchedule = true; | |||
6630 | } | |||
6631 | assert(BundleMember->isSchedulingEntity() &&(static_cast <bool> (BundleMember->isSchedulingEntity () && "bundle member already part of other bundle") ? void (0) : __assert_fail ("BundleMember->isSchedulingEntity() && \"bundle member already part of other bundle\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6632, __extension__ __PRETTY_FUNCTION__)) | |||
6632 | "bundle member already part of other bundle")(static_cast <bool> (BundleMember->isSchedulingEntity () && "bundle member already part of other bundle") ? void (0) : __assert_fail ("BundleMember->isSchedulingEntity() && \"bundle member already part of other bundle\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6632, __extension__ __PRETTY_FUNCTION__)); | |||
6633 | if (PrevInBundle) { | |||
6634 | PrevInBundle->NextInBundle = BundleMember; | |||
6635 | } else { | |||
6636 | Bundle = BundleMember; | |||
6637 | } | |||
6638 | BundleMember->UnscheduledDepsInBundle = 0; | |||
6639 | Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; | |||
6640 | ||||
6641 | // Group the instructions to a bundle. | |||
6642 | BundleMember->FirstInBundle = Bundle; | |||
6643 | PrevInBundle = BundleMember; | |||
6644 | } | |||
6645 | assert(Bundle && "Failed to find schedule bundle")(static_cast <bool> (Bundle && "Failed to find schedule bundle" ) ? void (0) : __assert_fail ("Bundle && \"Failed to find schedule bundle\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6645, __extension__ __PRETTY_FUNCTION__)); | |||
6646 | TryScheduleBundle(ReSchedule, Bundle); | |||
6647 | if (!Bundle->isReady()) { | |||
6648 | cancelScheduling(VL, S.OpValue); | |||
6649 | return None; | |||
6650 | } | |||
6651 | return Bundle; | |||
6652 | } | |||
6653 | ||||
6654 | void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, | |||
6655 | Value *OpValue) { | |||
6656 | if (isa<PHINode>(OpValue) || isVectorLikeInstWithConstOps(OpValue)) | |||
6657 | return; | |||
6658 | ||||
6659 | ScheduleData *Bundle = getScheduleData(OpValue); | |||
6660 | LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"; } } while (false); | |||
6661 | assert(!Bundle->IsScheduled &&(static_cast <bool> (!Bundle->IsScheduled && "Can't cancel bundle which is already scheduled") ? void (0) : __assert_fail ("!Bundle->IsScheduled && \"Can't cancel bundle which is already scheduled\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6662, __extension__ __PRETTY_FUNCTION__)) | |||
6662 | "Can't cancel bundle which is already scheduled")(static_cast <bool> (!Bundle->IsScheduled && "Can't cancel bundle which is already scheduled") ? void (0) : __assert_fail ("!Bundle->IsScheduled && \"Can't cancel bundle which is already scheduled\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6662, __extension__ __PRETTY_FUNCTION__)); | |||
6663 | assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() &&(static_cast <bool> (Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && "tried to unbundle something which is not a bundle" ) ? void (0) : __assert_fail ("Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && \"tried to unbundle something which is not a bundle\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6664, __extension__ __PRETTY_FUNCTION__)) | |||
6664 | "tried to unbundle something which is not a bundle")(static_cast <bool> (Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && "tried to unbundle something which is not a bundle" ) ? void (0) : __assert_fail ("Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && \"tried to unbundle something which is not a bundle\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6664, __extension__ __PRETTY_FUNCTION__)); | |||
6665 | ||||
6666 | // Un-bundle: make single instructions out of the bundle. | |||
6667 | ScheduleData *BundleMember = Bundle; | |||
6668 | while (BundleMember) { | |||
6669 | assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links")(static_cast <bool> (BundleMember->FirstInBundle == Bundle && "corrupt bundle links") ? void (0) : __assert_fail ("BundleMember->FirstInBundle == Bundle && \"corrupt bundle links\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6669, __extension__ __PRETTY_FUNCTION__)); | |||
6670 | BundleMember->FirstInBundle = BundleMember; | |||
6671 | ScheduleData *Next = BundleMember->NextInBundle; | |||
6672 | BundleMember->NextInBundle = nullptr; | |||
6673 | BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; | |||
6674 | if (BundleMember->UnscheduledDepsInBundle == 0) { | |||
6675 | ReadyInsts.insert(BundleMember); | |||
6676 | } | |||
6677 | BundleMember = Next; | |||
6678 | } | |||
6679 | } | |||
6680 | ||||
6681 | BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() { | |||
6682 | // Allocate a new ScheduleData for the instruction. | |||
6683 | if (ChunkPos >= ChunkSize) { | |||
6684 | ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize)); | |||
6685 | ChunkPos = 0; | |||
6686 | } | |||
6687 | return &(ScheduleDataChunks.back()[ChunkPos++]); | |||
6688 | } | |||
6689 | ||||
6690 | bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, | |||
6691 | const InstructionsState &S) { | |||
6692 | if (getScheduleData(V, isOneOf(S, V))) | |||
6693 | return true; | |||
6694 | Instruction *I = dyn_cast<Instruction>(V); | |||
6695 | assert(I && "bundle member must be an instruction")(static_cast <bool> (I && "bundle member must be an instruction" ) ? void (0) : __assert_fail ("I && \"bundle member must be an instruction\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6695, __extension__ __PRETTY_FUNCTION__)); | |||
6696 | assert(!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) &&(static_cast <bool> (!isa<PHINode>(I) && ! isVectorLikeInstWithConstOps(I) && "phi nodes/insertelements/extractelements/extractvalues don't need to " "be scheduled") ? void (0) : __assert_fail ("!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) && \"phi nodes/insertelements/extractelements/extractvalues don't need to \" \"be scheduled\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6698, __extension__ __PRETTY_FUNCTION__)) | |||
6697 | "phi nodes/insertelements/extractelements/extractvalues don't need to "(static_cast <bool> (!isa<PHINode>(I) && ! isVectorLikeInstWithConstOps(I) && "phi nodes/insertelements/extractelements/extractvalues don't need to " "be scheduled") ? void (0) : __assert_fail ("!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) && \"phi nodes/insertelements/extractelements/extractvalues don't need to \" \"be scheduled\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6698, __extension__ __PRETTY_FUNCTION__)) | |||
6698 | "be scheduled")(static_cast <bool> (!isa<PHINode>(I) && ! isVectorLikeInstWithConstOps(I) && "phi nodes/insertelements/extractelements/extractvalues don't need to " "be scheduled") ? void (0) : __assert_fail ("!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) && \"phi nodes/insertelements/extractelements/extractvalues don't need to \" \"be scheduled\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6698, __extension__ __PRETTY_FUNCTION__)); | |||
6699 | auto &&CheckSheduleForI = [this, &S](Instruction *I) -> bool { | |||
6700 | ScheduleData *ISD = getScheduleData(I); | |||
6701 | if (!ISD) | |||
6702 | return false; | |||
6703 | assert(isInSchedulingRegion(ISD) &&(static_cast <bool> (isInSchedulingRegion(ISD) && "ScheduleData not in scheduling region") ? void (0) : __assert_fail ("isInSchedulingRegion(ISD) && \"ScheduleData not in scheduling region\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6704, __extension__ __PRETTY_FUNCTION__)) | |||
6704 | "ScheduleData not in scheduling region")(static_cast <bool> (isInSchedulingRegion(ISD) && "ScheduleData not in scheduling region") ? void (0) : __assert_fail ("isInSchedulingRegion(ISD) && \"ScheduleData not in scheduling region\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6704, __extension__ __PRETTY_FUNCTION__)); | |||
6705 | ScheduleData *SD = allocateScheduleDataChunks(); | |||
6706 | SD->Inst = I; | |||
6707 | SD->init(SchedulingRegionID, S.OpValue); | |||
6708 | ExtraScheduleDataMap[I][S.OpValue] = SD; | |||
6709 | return true; | |||
6710 | }; | |||
6711 | if (CheckSheduleForI(I)) | |||
6712 | return true; | |||
6713 | if (!ScheduleStart) { | |||
6714 | // It's the first instruction in the new region. | |||
6715 | initScheduleData(I, I->getNextNode(), nullptr, nullptr); | |||
6716 | ScheduleStart = I; | |||
6717 | ScheduleEnd = I->getNextNode(); | |||
6718 | if (isOneOf(S, I) != I) | |||
6719 | CheckSheduleForI(I); | |||
6720 | assert(ScheduleEnd && "tried to vectorize a terminator?")(static_cast <bool> (ScheduleEnd && "tried to vectorize a terminator?" ) ? void (0) : __assert_fail ("ScheduleEnd && \"tried to vectorize a terminator?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6720, __extension__ __PRETTY_FUNCTION__)); | |||
6721 | LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: initialize schedule region to " << *I << "\n"; } } while (false); | |||
6722 | return true; | |||
6723 | } | |||
6724 | // Search up and down at the same time, because we don't know if the new | |||
6725 | // instruction is above or below the existing scheduling region. | |||
6726 | BasicBlock::reverse_iterator UpIter = | |||
6727 | ++ScheduleStart->getIterator().getReverse(); | |||
6728 | BasicBlock::reverse_iterator UpperEnd = BB->rend(); | |||
6729 | BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); | |||
6730 | BasicBlock::iterator LowerEnd = BB->end(); | |||
6731 | while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I && | |||
6732 | &*DownIter != I) { | |||
6733 | if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { | |||
6734 | LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: exceeded schedule region size limit\n" ; } } while (false); | |||
6735 | return false; | |||
6736 | } | |||
6737 | ||||
6738 | ++UpIter; | |||
6739 | ++DownIter; | |||
6740 | } | |||
6741 | if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) { | |||
6742 | assert(I->getParent() == ScheduleStart->getParent() &&(static_cast <bool> (I->getParent() == ScheduleStart ->getParent() && "Instruction is in wrong basic block." ) ? void (0) : __assert_fail ("I->getParent() == ScheduleStart->getParent() && \"Instruction is in wrong basic block.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6743, __extension__ __PRETTY_FUNCTION__)) | |||
6743 | "Instruction is in wrong basic block.")(static_cast <bool> (I->getParent() == ScheduleStart ->getParent() && "Instruction is in wrong basic block." ) ? void (0) : __assert_fail ("I->getParent() == ScheduleStart->getParent() && \"Instruction is in wrong basic block.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6743, __extension__ __PRETTY_FUNCTION__)); | |||
6744 | initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); | |||
6745 | ScheduleStart = I; | |||
6746 | if (isOneOf(S, I) != I) | |||
6747 | CheckSheduleForI(I); | |||
6748 | LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *Ido { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: extend schedule region start to " << *I << "\n"; } } while (false) | |||
6749 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: extend schedule region start to " << *I << "\n"; } } while (false); | |||
6750 | return true; | |||
6751 | } | |||
6752 | assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) &&(static_cast <bool> ((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) && "Expected to reach top of the basic block or instruction down the " "lower end.") ? void (0) : __assert_fail ("(UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) && \"Expected to reach top of the basic block or instruction down the \" \"lower end.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6754, __extension__ __PRETTY_FUNCTION__)) | |||
6753 | "Expected to reach top of the basic block or instruction down the "(static_cast <bool> ((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) && "Expected to reach top of the basic block or instruction down the " "lower end.") ? void (0) : __assert_fail ("(UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) && \"Expected to reach top of the basic block or instruction down the \" \"lower end.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6754, __extension__ __PRETTY_FUNCTION__)) | |||
6754 | "lower end.")(static_cast <bool> ((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) && "Expected to reach top of the basic block or instruction down the " "lower end.") ? void (0) : __assert_fail ("(UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) && \"Expected to reach top of the basic block or instruction down the \" \"lower end.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6754, __extension__ __PRETTY_FUNCTION__)); | |||
6755 | assert(I->getParent() == ScheduleEnd->getParent() &&(static_cast <bool> (I->getParent() == ScheduleEnd-> getParent() && "Instruction is in wrong basic block." ) ? void (0) : __assert_fail ("I->getParent() == ScheduleEnd->getParent() && \"Instruction is in wrong basic block.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6756, __extension__ __PRETTY_FUNCTION__)) | |||
6756 | "Instruction is in wrong basic block.")(static_cast <bool> (I->getParent() == ScheduleEnd-> getParent() && "Instruction is in wrong basic block." ) ? void (0) : __assert_fail ("I->getParent() == ScheduleEnd->getParent() && \"Instruction is in wrong basic block.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6756, __extension__ __PRETTY_FUNCTION__)); | |||
6757 | initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, | |||
6758 | nullptr); | |||
6759 | ScheduleEnd = I->getNextNode(); | |||
6760 | if (isOneOf(S, I) != I) | |||
6761 | CheckSheduleForI(I); | |||
6762 | assert(ScheduleEnd && "tried to vectorize a terminator?")(static_cast <bool> (ScheduleEnd && "tried to vectorize a terminator?" ) ? void (0) : __assert_fail ("ScheduleEnd && \"tried to vectorize a terminator?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6762, __extension__ __PRETTY_FUNCTION__)); | |||
6763 | LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: extend schedule region end to " << *I << "\n"; } } while (false); | |||
6764 | return true; | |||
6765 | } | |||
6766 | ||||
6767 | void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, | |||
6768 | Instruction *ToI, | |||
6769 | ScheduleData *PrevLoadStore, | |||
6770 | ScheduleData *NextLoadStore) { | |||
6771 | ScheduleData *CurrentLoadStore = PrevLoadStore; | |||
6772 | for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { | |||
6773 | ScheduleData *SD = ScheduleDataMap[I]; | |||
6774 | if (!SD) { | |||
6775 | SD = allocateScheduleDataChunks(); | |||
6776 | ScheduleDataMap[I] = SD; | |||
6777 | SD->Inst = I; | |||
6778 | } | |||
6779 | assert(!isInSchedulingRegion(SD) &&(static_cast <bool> (!isInSchedulingRegion(SD) && "new ScheduleData already in scheduling region") ? void (0) : __assert_fail ("!isInSchedulingRegion(SD) && \"new ScheduleData already in scheduling region\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6780, __extension__ __PRETTY_FUNCTION__)) | |||
6780 | "new ScheduleData already in scheduling region")(static_cast <bool> (!isInSchedulingRegion(SD) && "new ScheduleData already in scheduling region") ? void (0) : __assert_fail ("!isInSchedulingRegion(SD) && \"new ScheduleData already in scheduling region\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6780, __extension__ __PRETTY_FUNCTION__)); | |||
6781 | SD->init(SchedulingRegionID, I); | |||
6782 | ||||
6783 | if (I->mayReadOrWriteMemory() && | |||
6784 | (!isa<IntrinsicInst>(I) || | |||
6785 | (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect && | |||
6786 | cast<IntrinsicInst>(I)->getIntrinsicID() != | |||
6787 | Intrinsic::pseudoprobe))) { | |||
6788 | // Update the linked list of memory accessing instructions. | |||
6789 | if (CurrentLoadStore) { | |||
6790 | CurrentLoadStore->NextLoadStore = SD; | |||
6791 | } else { | |||
6792 | FirstLoadStoreInRegion = SD; | |||
6793 | } | |||
6794 | CurrentLoadStore = SD; | |||
6795 | } | |||
6796 | } | |||
6797 | if (NextLoadStore) { | |||
6798 | if (CurrentLoadStore) | |||
6799 | CurrentLoadStore->NextLoadStore = NextLoadStore; | |||
6800 | } else { | |||
6801 | LastLoadStoreInRegion = CurrentLoadStore; | |||
6802 | } | |||
6803 | } | |||
6804 | ||||
6805 | void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, | |||
6806 | bool InsertInReadyList, | |||
6807 | BoUpSLP *SLP) { | |||
6808 | assert(SD->isSchedulingEntity())(static_cast <bool> (SD->isSchedulingEntity()) ? void (0) : __assert_fail ("SD->isSchedulingEntity()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6808, __extension__ __PRETTY_FUNCTION__)); | |||
6809 | ||||
6810 | SmallVector<ScheduleData *, 10> WorkList; | |||
6811 | WorkList.push_back(SD); | |||
6812 | ||||
6813 | while (!WorkList.empty()) { | |||
6814 | ScheduleData *SD = WorkList.pop_back_val(); | |||
6815 | ||||
6816 | ScheduleData *BundleMember = SD; | |||
6817 | while (BundleMember) { | |||
6818 | assert(isInSchedulingRegion(BundleMember))(static_cast <bool> (isInSchedulingRegion(BundleMember) ) ? void (0) : __assert_fail ("isInSchedulingRegion(BundleMember)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6818, __extension__ __PRETTY_FUNCTION__)); | |||
6819 | if (!BundleMember->hasValidDependencies()) { | |||
6820 | ||||
6821 | LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMemberdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: update deps of " << *BundleMember << "\n"; } } while (false) | |||
6822 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: update deps of " << *BundleMember << "\n"; } } while (false); | |||
6823 | BundleMember->Dependencies = 0; | |||
6824 | BundleMember->resetUnscheduledDeps(); | |||
6825 | ||||
6826 | // Handle def-use chain dependencies. | |||
6827 | if (BundleMember->OpValue != BundleMember->Inst) { | |||
6828 | ScheduleData *UseSD = getScheduleData(BundleMember->Inst); | |||
6829 | if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { | |||
6830 | BundleMember->Dependencies++; | |||
6831 | ScheduleData *DestBundle = UseSD->FirstInBundle; | |||
6832 | if (!DestBundle->IsScheduled) | |||
6833 | BundleMember->incrementUnscheduledDeps(1); | |||
6834 | if (!DestBundle->hasValidDependencies()) | |||
6835 | WorkList.push_back(DestBundle); | |||
6836 | } | |||
6837 | } else { | |||
6838 | for (User *U : BundleMember->Inst->users()) { | |||
6839 | if (isa<Instruction>(U)) { | |||
6840 | ScheduleData *UseSD = getScheduleData(U); | |||
6841 | if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { | |||
6842 | BundleMember->Dependencies++; | |||
6843 | ScheduleData *DestBundle = UseSD->FirstInBundle; | |||
6844 | if (!DestBundle->IsScheduled) | |||
6845 | BundleMember->incrementUnscheduledDeps(1); | |||
6846 | if (!DestBundle->hasValidDependencies()) | |||
6847 | WorkList.push_back(DestBundle); | |||
6848 | } | |||
6849 | } else { | |||
6850 | // I'm not sure if this can ever happen. But we need to be safe. | |||
6851 | // This lets the instruction/bundle never be scheduled and | |||
6852 | // eventually disable vectorization. | |||
6853 | BundleMember->Dependencies++; | |||
6854 | BundleMember->incrementUnscheduledDeps(1); | |||
6855 | } | |||
6856 | } | |||
6857 | } | |||
6858 | ||||
6859 | // Handle the memory dependencies. | |||
6860 | ScheduleData *DepDest = BundleMember->NextLoadStore; | |||
6861 | if (DepDest) { | |||
6862 | Instruction *SrcInst = BundleMember->Inst; | |||
6863 | MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA); | |||
6864 | bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); | |||
6865 | unsigned numAliased = 0; | |||
6866 | unsigned DistToSrc = 1; | |||
6867 | ||||
6868 | while (DepDest) { | |||
6869 | assert(isInSchedulingRegion(DepDest))(static_cast <bool> (isInSchedulingRegion(DepDest)) ? void (0) : __assert_fail ("isInSchedulingRegion(DepDest)", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6869, __extension__ __PRETTY_FUNCTION__)); | |||
6870 | ||||
6871 | // We have two limits to reduce the complexity: | |||
6872 | // 1) AliasedCheckLimit: It's a small limit to reduce calls to | |||
6873 | // SLP->isAliased (which is the expensive part in this loop). | |||
6874 | // 2) MaxMemDepDistance: It's for very large blocks and it aborts | |||
6875 | // the whole loop (even if the loop is fast, it's quadratic). | |||
6876 | // It's important for the loop break condition (see below) to | |||
6877 | // check this limit even between two read-only instructions. | |||
6878 | if (DistToSrc >= MaxMemDepDistance || | |||
6879 | ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && | |||
6880 | (numAliased >= AliasedCheckLimit || | |||
6881 | SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { | |||
6882 | ||||
6883 | // We increment the counter only if the locations are aliased | |||
6884 | // (instead of counting all alias checks). This gives a better | |||
6885 | // balance between reduced runtime and accurate dependencies. | |||
6886 | numAliased++; | |||
6887 | ||||
6888 | DepDest->MemoryDependencies.push_back(BundleMember); | |||
6889 | BundleMember->Dependencies++; | |||
6890 | ScheduleData *DestBundle = DepDest->FirstInBundle; | |||
6891 | if (!DestBundle->IsScheduled) { | |||
6892 | BundleMember->incrementUnscheduledDeps(1); | |||
6893 | } | |||
6894 | if (!DestBundle->hasValidDependencies()) { | |||
6895 | WorkList.push_back(DestBundle); | |||
6896 | } | |||
6897 | } | |||
6898 | DepDest = DepDest->NextLoadStore; | |||
6899 | ||||
6900 | // Example, explaining the loop break condition: Let's assume our | |||
6901 | // starting instruction is i0 and MaxMemDepDistance = 3. | |||
6902 | // | |||
6903 | // +--------v--v--v | |||
6904 | // i0,i1,i2,i3,i4,i5,i6,i7,i8 | |||
6905 | // +--------^--^--^ | |||
6906 | // | |||
6907 | // MaxMemDepDistance let us stop alias-checking at i3 and we add | |||
6908 | // dependencies from i0 to i3,i4,.. (even if they are not aliased). | |||
6909 | // Previously we already added dependencies from i3 to i6,i7,i8 | |||
6910 | // (because of MaxMemDepDistance). As we added a dependency from | |||
6911 | // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 | |||
6912 | // and we can abort this loop at i6. | |||
6913 | if (DistToSrc >= 2 * MaxMemDepDistance) | |||
6914 | break; | |||
6915 | DistToSrc++; | |||
6916 | } | |||
6917 | } | |||
6918 | } | |||
6919 | BundleMember = BundleMember->NextInBundle; | |||
6920 | } | |||
6921 | if (InsertInReadyList && SD->isReady()) { | |||
6922 | ReadyInsts.push_back(SD); | |||
6923 | LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Instdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: gets ready on update: " << *SD->Inst << "\n"; } } while (false) | |||
6924 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: gets ready on update: " << *SD->Inst << "\n"; } } while (false); | |||
6925 | } | |||
6926 | } | |||
6927 | } | |||
6928 | ||||
6929 | void BoUpSLP::BlockScheduling::resetSchedule() { | |||
6930 | assert(ScheduleStart &&(static_cast <bool> (ScheduleStart && "tried to reset schedule on block which has not been scheduled" ) ? void (0) : __assert_fail ("ScheduleStart && \"tried to reset schedule on block which has not been scheduled\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6931, __extension__ __PRETTY_FUNCTION__)) | |||
6931 | "tried to reset schedule on block which has not been scheduled")(static_cast <bool> (ScheduleStart && "tried to reset schedule on block which has not been scheduled" ) ? void (0) : __assert_fail ("ScheduleStart && \"tried to reset schedule on block which has not been scheduled\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6931, __extension__ __PRETTY_FUNCTION__)); | |||
6932 | for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { | |||
6933 | doForAllOpcodes(I, [&](ScheduleData *SD) { | |||
6934 | assert(isInSchedulingRegion(SD) &&(static_cast <bool> (isInSchedulingRegion(SD) && "ScheduleData not in scheduling region") ? void (0) : __assert_fail ("isInSchedulingRegion(SD) && \"ScheduleData not in scheduling region\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6935, __extension__ __PRETTY_FUNCTION__)) | |||
6935 | "ScheduleData not in scheduling region")(static_cast <bool> (isInSchedulingRegion(SD) && "ScheduleData not in scheduling region") ? void (0) : __assert_fail ("isInSchedulingRegion(SD) && \"ScheduleData not in scheduling region\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6935, __extension__ __PRETTY_FUNCTION__)); | |||
6936 | SD->IsScheduled = false; | |||
6937 | SD->resetUnscheduledDeps(); | |||
6938 | }); | |||
6939 | } | |||
6940 | ReadyInsts.clear(); | |||
6941 | } | |||
6942 | ||||
6943 | void BoUpSLP::scheduleBlock(BlockScheduling *BS) { | |||
6944 | if (!BS->ScheduleStart) | |||
6945 | return; | |||
6946 | ||||
6947 | LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: schedule block " << BS ->BB->getName() << "\n"; } } while (false); | |||
6948 | ||||
6949 | BS->resetSchedule(); | |||
6950 | ||||
6951 | // For the real scheduling we use a more sophisticated ready-list: it is | |||
6952 | // sorted by the original instruction location. This lets the final schedule | |||
6953 | // be as close as possible to the original instruction order. | |||
6954 | struct ScheduleDataCompare { | |||
6955 | bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { | |||
6956 | return SD2->SchedulingPriority < SD1->SchedulingPriority; | |||
6957 | } | |||
6958 | }; | |||
6959 | std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; | |||
6960 | ||||
6961 | // Ensure that all dependency data is updated and fill the ready-list with | |||
6962 | // initial instructions. | |||
6963 | int Idx = 0; | |||
6964 | int NumToSchedule = 0; | |||
6965 | for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; | |||
6966 | I = I->getNextNode()) { | |||
6967 | BS->doForAllOpcodes(I, [this, &Idx, &NumToSchedule, BS](ScheduleData *SD) { | |||
6968 | assert((isVectorLikeInstWithConstOps(SD->Inst) ||(static_cast <bool> ((isVectorLikeInstWithConstOps(SD-> Inst) || SD->isPartOfBundle() == (getTreeEntry(SD->Inst ) != nullptr)) && "scheduler and vectorizer bundle mismatch" ) ? void (0) : __assert_fail ("(isVectorLikeInstWithConstOps(SD->Inst) || SD->isPartOfBundle() == (getTreeEntry(SD->Inst) != nullptr)) && \"scheduler and vectorizer bundle mismatch\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6970, __extension__ __PRETTY_FUNCTION__)) | |||
6969 | SD->isPartOfBundle() == (getTreeEntry(SD->Inst) != nullptr)) &&(static_cast <bool> ((isVectorLikeInstWithConstOps(SD-> Inst) || SD->isPartOfBundle() == (getTreeEntry(SD->Inst ) != nullptr)) && "scheduler and vectorizer bundle mismatch" ) ? void (0) : __assert_fail ("(isVectorLikeInstWithConstOps(SD->Inst) || SD->isPartOfBundle() == (getTreeEntry(SD->Inst) != nullptr)) && \"scheduler and vectorizer bundle mismatch\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6970, __extension__ __PRETTY_FUNCTION__)) | |||
6970 | "scheduler and vectorizer bundle mismatch")(static_cast <bool> ((isVectorLikeInstWithConstOps(SD-> Inst) || SD->isPartOfBundle() == (getTreeEntry(SD->Inst ) != nullptr)) && "scheduler and vectorizer bundle mismatch" ) ? void (0) : __assert_fail ("(isVectorLikeInstWithConstOps(SD->Inst) || SD->isPartOfBundle() == (getTreeEntry(SD->Inst) != nullptr)) && \"scheduler and vectorizer bundle mismatch\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 6970, __extension__ __PRETTY_FUNCTION__)); | |||
6971 | SD->FirstInBundle->SchedulingPriority = Idx++; | |||
6972 | if (SD->isSchedulingEntity()) { | |||
6973 | BS->calculateDependencies(SD, false, this); | |||
6974 | NumToSchedule++; | |||
6975 | } | |||
6976 | }); | |||
6977 | } | |||
6978 | BS->initialFillReadyList(ReadyInsts); | |||
6979 | ||||
6980 | Instruction *LastScheduledInst = BS->ScheduleEnd; | |||
6981 | ||||
6982 | // Do the "real" scheduling. | |||
6983 | while (!ReadyInsts.empty()) { | |||
6984 | ScheduleData *picked = *ReadyInsts.begin(); | |||
6985 | ReadyInsts.erase(ReadyInsts.begin()); | |||
6986 | ||||
6987 | // Move the scheduled instruction(s) to their dedicated places, if not | |||
6988 | // there yet. | |||
6989 | ScheduleData *BundleMember = picked; | |||
6990 | while (BundleMember) { | |||
6991 | Instruction *pickedInst = BundleMember->Inst; | |||
6992 | if (pickedInst->getNextNode() != LastScheduledInst) { | |||
6993 | BS->BB->getInstList().remove(pickedInst); | |||
6994 | BS->BB->getInstList().insert(LastScheduledInst->getIterator(), | |||
6995 | pickedInst); | |||
6996 | } | |||
6997 | LastScheduledInst = pickedInst; | |||
6998 | BundleMember = BundleMember->NextInBundle; | |||
6999 | } | |||
7000 | ||||
7001 | BS->schedule(picked, ReadyInsts); | |||
7002 | NumToSchedule--; | |||
7003 | } | |||
7004 | assert(NumToSchedule == 0 && "could not schedule all instructions")(static_cast <bool> (NumToSchedule == 0 && "could not schedule all instructions" ) ? void (0) : __assert_fail ("NumToSchedule == 0 && \"could not schedule all instructions\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 7004, __extension__ __PRETTY_FUNCTION__)); | |||
7005 | ||||
7006 | // Avoid duplicate scheduling of the block. | |||
7007 | BS->ScheduleStart = nullptr; | |||
7008 | } | |||
7009 | ||||
7010 | unsigned BoUpSLP::getVectorElementSize(Value *V) { | |||
7011 | // If V is a store, just return the width of the stored value (or value | |||
7012 | // truncated just before storing) without traversing the expression tree. | |||
7013 | // This is the common case. | |||
7014 | if (auto *Store = dyn_cast<StoreInst>(V)) { | |||
7015 | if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand())) | |||
7016 | return DL->getTypeSizeInBits(Trunc->getSrcTy()); | |||
7017 | return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); | |||
7018 | } | |||
7019 | ||||
7020 | if (auto *IEI = dyn_cast<InsertElementInst>(V)) | |||
7021 | return getVectorElementSize(IEI->getOperand(1)); | |||
7022 | ||||
7023 | auto E = InstrElementSize.find(V); | |||
7024 | if (E != InstrElementSize.end()) | |||
7025 | return E->second; | |||
7026 | ||||
7027 | // If V is not a store, we can traverse the expression tree to find loads | |||
7028 | // that feed it. The type of the loaded value may indicate a more suitable | |||
7029 | // width than V's type. We want to base the vector element size on the width | |||
7030 | // of memory operations where possible. | |||
7031 | SmallVector<std::pair<Instruction *, BasicBlock *>, 16> Worklist; | |||
7032 | SmallPtrSet<Instruction *, 16> Visited; | |||
7033 | if (auto *I = dyn_cast<Instruction>(V)) { | |||
7034 | Worklist.emplace_back(I, I->getParent()); | |||
7035 | Visited.insert(I); | |||
7036 | } | |||
7037 | ||||
7038 | // Traverse the expression tree in bottom-up order looking for loads. If we | |||
7039 | // encounter an instruction we don't yet handle, we give up. | |||
7040 | auto Width = 0u; | |||
7041 | while (!Worklist.empty()) { | |||
7042 | Instruction *I; | |||
7043 | BasicBlock *Parent; | |||
7044 | std::tie(I, Parent) = Worklist.pop_back_val(); | |||
7045 | ||||
7046 | // We should only be looking at scalar instructions here. If the current | |||
7047 | // instruction has a vector type, skip. | |||
7048 | auto *Ty = I->getType(); | |||
7049 | if (isa<VectorType>(Ty)) | |||
7050 | continue; | |||
7051 | ||||
7052 | // If the current instruction is a load, update MaxWidth to reflect the | |||
7053 | // width of the loaded value. | |||
7054 | if (isa<LoadInst>(I) || isa<ExtractElementInst>(I) || | |||
7055 | isa<ExtractValueInst>(I)) | |||
7056 | Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty)); | |||
7057 | ||||
7058 | // Otherwise, we need to visit the operands of the instruction. We only | |||
7059 | // handle the interesting cases from buildTree here. If an operand is an | |||
7060 | // instruction we haven't yet visited and from the same basic block as the | |||
7061 | // user or the use is a PHI node, we add it to the worklist. | |||
7062 | else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || | |||
7063 | isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I) || | |||
7064 | isa<UnaryOperator>(I)) { | |||
7065 | for (Use &U : I->operands()) | |||
7066 | if (auto *J = dyn_cast<Instruction>(U.get())) | |||
7067 | if (Visited.insert(J).second && | |||
7068 | (isa<PHINode>(I) || J->getParent() == Parent)) | |||
7069 | Worklist.emplace_back(J, J->getParent()); | |||
7070 | } else { | |||
7071 | break; | |||
7072 | } | |||
7073 | } | |||
7074 | ||||
7075 | // If we didn't encounter a memory access in the expression tree, or if we | |||
7076 | // gave up for some reason, just return the width of V. Otherwise, return the | |||
7077 | // maximum width we found. | |||
7078 | if (!Width) { | |||
7079 | if (auto *CI = dyn_cast<CmpInst>(V)) | |||
7080 | V = CI->getOperand(0); | |||
7081 | Width = DL->getTypeSizeInBits(V->getType()); | |||
7082 | } | |||
7083 | ||||
7084 | for (Instruction *I : Visited) | |||
7085 | InstrElementSize[I] = Width; | |||
7086 | ||||
7087 | return Width; | |||
7088 | } | |||
7089 | ||||
7090 | // Determine if a value V in a vectorizable expression Expr can be demoted to a | |||
7091 | // smaller type with a truncation. We collect the values that will be demoted | |||
7092 | // in ToDemote and additional roots that require investigating in Roots. | |||
7093 | static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr, | |||
7094 | SmallVectorImpl<Value *> &ToDemote, | |||
7095 | SmallVectorImpl<Value *> &Roots) { | |||
7096 | // We can always demote constants. | |||
7097 | if (isa<Constant>(V)) { | |||
7098 | ToDemote.push_back(V); | |||
7099 | return true; | |||
7100 | } | |||
7101 | ||||
7102 | // If the value is not an instruction in the expression with only one use, it | |||
7103 | // cannot be demoted. | |||
7104 | auto *I = dyn_cast<Instruction>(V); | |||
7105 | if (!I || !I->hasOneUse() || !Expr.count(I)) | |||
7106 | return false; | |||
7107 | ||||
7108 | switch (I->getOpcode()) { | |||
7109 | ||||
7110 | // We can always demote truncations and extensions. Since truncations can | |||
7111 | // seed additional demotion, we save the truncated value. | |||
7112 | case Instruction::Trunc: | |||
7113 | Roots.push_back(I->getOperand(0)); | |||
7114 | break; | |||
7115 | case Instruction::ZExt: | |||
7116 | case Instruction::SExt: | |||
7117 | if (isa<ExtractElementInst>(I->getOperand(0)) || | |||
7118 | isa<InsertElementInst>(I->getOperand(0))) | |||
7119 | return false; | |||
7120 | break; | |||
7121 | ||||
7122 | // We can demote certain binary operations if we can demote both of their | |||
7123 | // operands. | |||
7124 | case Instruction::Add: | |||
7125 | case Instruction::Sub: | |||
7126 | case Instruction::Mul: | |||
7127 | case Instruction::And: | |||
7128 | case Instruction::Or: | |||
7129 | case Instruction::Xor: | |||
7130 | if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) || | |||
7131 | !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots)) | |||
7132 | return false; | |||
7133 | break; | |||
7134 | ||||
7135 | // We can demote selects if we can demote their true and false values. | |||
7136 | case Instruction::Select: { | |||
7137 | SelectInst *SI = cast<SelectInst>(I); | |||
7138 | if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) || | |||
7139 | !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots)) | |||
7140 | return false; | |||
7141 | break; | |||
7142 | } | |||
7143 | ||||
7144 | // We can demote phis if we can demote all their incoming operands. Note that | |||
7145 | // we don't need to worry about cycles since we ensure single use above. | |||
7146 | case Instruction::PHI: { | |||
7147 | PHINode *PN = cast<PHINode>(I); | |||
7148 | for (Value *IncValue : PN->incoming_values()) | |||
7149 | if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots)) | |||
7150 | return false; | |||
7151 | break; | |||
7152 | } | |||
7153 | ||||
7154 | // Otherwise, conservatively give up. | |||
7155 | default: | |||
7156 | return false; | |||
7157 | } | |||
7158 | ||||
7159 | // Record the value that we can demote. | |||
7160 | ToDemote.push_back(V); | |||
7161 | return true; | |||
7162 | } | |||
7163 | ||||
7164 | void BoUpSLP::computeMinimumValueSizes() { | |||
7165 | // If there are no external uses, the expression tree must be rooted by a | |||
7166 | // store. We can't demote in-memory values, so there is nothing to do here. | |||
7167 | if (ExternalUses.empty()) | |||
7168 | return; | |||
7169 | ||||
7170 | // We only attempt to truncate integer expressions. | |||
7171 | auto &TreeRoot = VectorizableTree[0]->Scalars; | |||
7172 | auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); | |||
7173 | if (!TreeRootIT) | |||
7174 | return; | |||
7175 | ||||
7176 | // If the expression is not rooted by a store, these roots should have | |||
7177 | // external uses. We will rely on InstCombine to rewrite the expression in | |||
7178 | // the narrower type. However, InstCombine only rewrites single-use values. | |||
7179 | // This means that if a tree entry other than a root is used externally, it | |||
7180 | // must have multiple uses and InstCombine will not rewrite it. The code | |||
7181 | // below ensures that only the roots are used externally. | |||
7182 | SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end()); | |||
7183 | for (auto &EU : ExternalUses) | |||
7184 | if (!Expr.erase(EU.Scalar)) | |||
7185 | return; | |||
7186 | if (!Expr.empty()) | |||
7187 | return; | |||
7188 | ||||
7189 | // Collect the scalar values of the vectorizable expression. We will use this | |||
7190 | // context to determine which values can be demoted. If we see a truncation, | |||
7191 | // we mark it as seeding another demotion. | |||
7192 | for (auto &EntryPtr : VectorizableTree) | |||
7193 | Expr.insert(EntryPtr->Scalars.begin(), EntryPtr->Scalars.end()); | |||
7194 | ||||
7195 | // Ensure the roots of the vectorizable tree don't form a cycle. They must | |||
7196 | // have a single external user that is not in the vectorizable tree. | |||
7197 | for (auto *Root : TreeRoot) | |||
7198 | if (!Root->hasOneUse() || Expr.count(*Root->user_begin())) | |||
7199 | return; | |||
7200 | ||||
7201 | // Conservatively determine if we can actually truncate the roots of the | |||
7202 | // expression. Collect the values that can be demoted in ToDemote and | |||
7203 | // additional roots that require investigating in Roots. | |||
7204 | SmallVector<Value *, 32> ToDemote; | |||
7205 | SmallVector<Value *, 4> Roots; | |||
7206 | for (auto *Root : TreeRoot) | |||
7207 | if (!collectValuesToDemote(Root, Expr, ToDemote, Roots)) | |||
7208 | return; | |||
7209 | ||||
7210 | // The maximum bit width required to represent all the values that can be | |||
7211 | // demoted without loss of precision. It would be safe to truncate the roots | |||
7212 | // of the expression to this width. | |||
7213 | auto MaxBitWidth = 8u; | |||
7214 | ||||
7215 | // We first check if all the bits of the roots are demanded. If they're not, | |||
7216 | // we can truncate the roots to this narrower type. | |||
7217 | for (auto *Root : TreeRoot) { | |||
7218 | auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); | |||
7219 | MaxBitWidth = std::max<unsigned>( | |||
7220 | Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth); | |||
7221 | } | |||
7222 | ||||
7223 | // True if the roots can be zero-extended back to their original type, rather | |||
7224 | // than sign-extended. We know that if the leading bits are not demanded, we | |||
7225 | // can safely zero-extend. So we initialize IsKnownPositive to True. | |||
7226 | bool IsKnownPositive = true; | |||
7227 | ||||
7228 | // If all the bits of the roots are demanded, we can try a little harder to | |||
7229 | // compute a narrower type. This can happen, for example, if the roots are | |||
7230 | // getelementptr indices. InstCombine promotes these indices to the pointer | |||
7231 | // width. Thus, all their bits are technically demanded even though the | |||
7232 | // address computation might be vectorized in a smaller type. | |||
7233 | // | |||
7234 | // We start by looking at each entry that can be demoted. We compute the | |||
7235 | // maximum bit width required to store the scalar by using ValueTracking to | |||
7236 | // compute the number of high-order bits we can truncate. | |||
7237 | if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) && | |||
7238 | llvm::all_of(TreeRoot, [](Value *R) { | |||
7239 | assert(R->hasOneUse() && "Root should have only one use!")(static_cast <bool> (R->hasOneUse() && "Root should have only one use!" ) ? void (0) : __assert_fail ("R->hasOneUse() && \"Root should have only one use!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 7239, __extension__ __PRETTY_FUNCTION__)); | |||
7240 | return isa<GetElementPtrInst>(R->user_back()); | |||
7241 | })) { | |||
7242 | MaxBitWidth = 8u; | |||
7243 | ||||
7244 | // Determine if the sign bit of all the roots is known to be zero. If not, | |||
7245 | // IsKnownPositive is set to False. | |||
7246 | IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) { | |||
7247 | KnownBits Known = computeKnownBits(R, *DL); | |||
7248 | return Known.isNonNegative(); | |||
7249 | }); | |||
7250 | ||||
7251 | // Determine the maximum number of bits required to store the scalar | |||
7252 | // values. | |||
7253 | for (auto *Scalar : ToDemote) { | |||
7254 | auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT); | |||
7255 | auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); | |||
7256 | MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); | |||
7257 | } | |||
7258 | ||||
7259 | // If we can't prove that the sign bit is zero, we must add one to the | |||
7260 | // maximum bit width to account for the unknown sign bit. This preserves | |||
7261 | // the existing sign bit so we can safely sign-extend the root back to the | |||
7262 | // original type. Otherwise, if we know the sign bit is zero, we will | |||
7263 | // zero-extend the root instead. | |||
7264 | // | |||
7265 | // FIXME: This is somewhat suboptimal, as there will be cases where adding | |||
7266 | // one to the maximum bit width will yield a larger-than-necessary | |||
7267 | // type. In general, we need to add an extra bit only if we can't | |||
7268 | // prove that the upper bit of the original type is equal to the | |||
7269 | // upper bit of the proposed smaller type. If these two bits are the | |||
7270 | // same (either zero or one) we know that sign-extending from the | |||
7271 | // smaller type will result in the same value. Here, since we can't | |||
7272 | // yet prove this, we are just making the proposed smaller type | |||
7273 | // larger to ensure correctness. | |||
7274 | if (!IsKnownPositive) | |||
7275 | ++MaxBitWidth; | |||
7276 | } | |||
7277 | ||||
7278 | // Round MaxBitWidth up to the next power-of-two. | |||
7279 | if (!isPowerOf2_64(MaxBitWidth)) | |||
7280 | MaxBitWidth = NextPowerOf2(MaxBitWidth); | |||
7281 | ||||
7282 | // If the maximum bit width we compute is less than the with of the roots' | |||
7283 | // type, we can proceed with the narrowing. Otherwise, do nothing. | |||
7284 | if (MaxBitWidth >= TreeRootIT->getBitWidth()) | |||
7285 | return; | |||
7286 | ||||
7287 | // If we can truncate the root, we must collect additional values that might | |||
7288 | // be demoted as a result. That is, those seeded by truncations we will | |||
7289 | // modify. | |||
7290 | while (!Roots.empty()) | |||
7291 | collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots); | |||
7292 | ||||
7293 | // Finally, map the values we can demote to the maximum bit with we computed. | |||
7294 | for (auto *Scalar : ToDemote) | |||
7295 | MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive); | |||
7296 | } | |||
7297 | ||||
7298 | namespace { | |||
7299 | ||||
7300 | /// The SLPVectorizer Pass. | |||
7301 | struct SLPVectorizer : public FunctionPass { | |||
7302 | SLPVectorizerPass Impl; | |||
7303 | ||||
7304 | /// Pass identification, replacement for typeid | |||
7305 | static char ID; | |||
7306 | ||||
7307 | explicit SLPVectorizer() : FunctionPass(ID) { | |||
7308 | initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); | |||
7309 | } | |||
7310 | ||||
7311 | bool doInitialization(Module &M) override { | |||
7312 | return false; | |||
7313 | } | |||
7314 | ||||
7315 | bool runOnFunction(Function &F) override { | |||
7316 | if (skipFunction(F)) | |||
7317 | return false; | |||
7318 | ||||
7319 | auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); | |||
7320 | auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); | |||
7321 | auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); | |||
7322 | auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; | |||
7323 | auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); | |||
7324 | auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); | |||
7325 | auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); | |||
7326 | auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); | |||
7327 | auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); | |||
7328 | auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); | |||
7329 | ||||
7330 | return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); | |||
7331 | } | |||
7332 | ||||
7333 | void getAnalysisUsage(AnalysisUsage &AU) const override { | |||
7334 | FunctionPass::getAnalysisUsage(AU); | |||
7335 | AU.addRequired<AssumptionCacheTracker>(); | |||
7336 | AU.addRequired<ScalarEvolutionWrapperPass>(); | |||
7337 | AU.addRequired<AAResultsWrapperPass>(); | |||
7338 | AU.addRequired<TargetTransformInfoWrapperPass>(); | |||
7339 | AU.addRequired<LoopInfoWrapperPass>(); | |||
7340 | AU.addRequired<DominatorTreeWrapperPass>(); | |||
7341 | AU.addRequired<DemandedBitsWrapperPass>(); | |||
7342 | AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); | |||
7343 | AU.addRequired<InjectTLIMappingsLegacy>(); | |||
7344 | AU.addPreserved<LoopInfoWrapperPass>(); | |||
7345 | AU.addPreserved<DominatorTreeWrapperPass>(); | |||
7346 | AU.addPreserved<AAResultsWrapperPass>(); | |||
7347 | AU.addPreserved<GlobalsAAWrapperPass>(); | |||
7348 | AU.setPreservesCFG(); | |||
7349 | } | |||
7350 | }; | |||
7351 | ||||
7352 | } // end anonymous namespace | |||
7353 | ||||
7354 | PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { | |||
7355 | auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); | |||
7356 | auto *TTI = &AM.getResult<TargetIRAnalysis>(F); | |||
7357 | auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); | |||
7358 | auto *AA = &AM.getResult<AAManager>(F); | |||
7359 | auto *LI = &AM.getResult<LoopAnalysis>(F); | |||
7360 | auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); | |||
7361 | auto *AC = &AM.getResult<AssumptionAnalysis>(F); | |||
7362 | auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); | |||
7363 | auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); | |||
7364 | ||||
7365 | bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); | |||
7366 | if (!Changed) | |||
7367 | return PreservedAnalyses::all(); | |||
7368 | ||||
7369 | PreservedAnalyses PA; | |||
7370 | PA.preserveSet<CFGAnalyses>(); | |||
7371 | return PA; | |||
7372 | } | |||
7373 | ||||
7374 | bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, | |||
7375 | TargetTransformInfo *TTI_, | |||
7376 | TargetLibraryInfo *TLI_, AAResults *AA_, | |||
7377 | LoopInfo *LI_, DominatorTree *DT_, | |||
7378 | AssumptionCache *AC_, DemandedBits *DB_, | |||
7379 | OptimizationRemarkEmitter *ORE_) { | |||
7380 | if (!RunSLPVectorization) | |||
7381 | return false; | |||
7382 | SE = SE_; | |||
7383 | TTI = TTI_; | |||
7384 | TLI = TLI_; | |||
7385 | AA = AA_; | |||
7386 | LI = LI_; | |||
7387 | DT = DT_; | |||
7388 | AC = AC_; | |||
7389 | DB = DB_; | |||
7390 | DL = &F.getParent()->getDataLayout(); | |||
7391 | ||||
7392 | Stores.clear(); | |||
7393 | GEPs.clear(); | |||
7394 | bool Changed = false; | |||
7395 | ||||
7396 | // If the target claims to have no vector registers don't attempt | |||
7397 | // vectorization. | |||
7398 | if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true))) | |||
7399 | return false; | |||
7400 | ||||
7401 | // Don't vectorize when the attribute NoImplicitFloat is used. | |||
7402 | if (F.hasFnAttribute(Attribute::NoImplicitFloat)) | |||
7403 | return false; | |||
7404 | ||||
7405 | LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"; } } while (false); | |||
7406 | ||||
7407 | // Use the bottom up slp vectorizer to construct chains that start with | |||
7408 | // store instructions. | |||
7409 | BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_); | |||
7410 | ||||
7411 | // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to | |||
7412 | // delete instructions. | |||
7413 | ||||
7414 | // Update DFS numbers now so that we can use them for ordering. | |||
7415 | DT->updateDFSNumbers(); | |||
7416 | ||||
7417 | // Scan the blocks in the function in post order. | |||
7418 | for (auto BB : post_order(&F.getEntryBlock())) { | |||
7419 | collectSeedInstructions(BB); | |||
7420 | ||||
7421 | // Vectorize trees that end at stores. | |||
7422 | if (!Stores.empty()) { | |||
7423 | LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Found stores for " << Stores .size() << " underlying objects.\n"; } } while (false) | |||
7424 | << " underlying objects.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Found stores for " << Stores .size() << " underlying objects.\n"; } } while (false); | |||
7425 | Changed |= vectorizeStoreChains(R); | |||
7426 | } | |||
7427 | ||||
7428 | // Vectorize trees that end at reductions. | |||
7429 | Changed |= vectorizeChainsInBlock(BB, R); | |||
7430 | ||||
7431 | // Vectorize the index computations of getelementptr instructions. This | |||
7432 | // is primarily intended to catch gather-like idioms ending at | |||
7433 | // non-consecutive loads. | |||
7434 | if (!GEPs.empty()) { | |||
7435 | LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Found GEPs for " << GEPs .size() << " underlying objects.\n"; } } while (false) | |||
7436 | << " underlying objects.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Found GEPs for " << GEPs .size() << " underlying objects.\n"; } } while (false); | |||
7437 | Changed |= vectorizeGEPIndices(BB, R); | |||
7438 | } | |||
7439 | } | |||
7440 | ||||
7441 | if (Changed) { | |||
7442 | R.optimizeGatherSequence(); | |||
7443 | LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: vectorized \"" << F.getName () << "\"\n"; } } while (false); | |||
7444 | } | |||
7445 | return Changed; | |||
7446 | } | |||
7447 | ||||
7448 | bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, | |||
7449 | unsigned Idx) { | |||
7450 | LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing a store chain of length " << Chain.size() << "\n"; } } while (false) | |||
7451 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing a store chain of length " << Chain.size() << "\n"; } } while (false); | |||
7452 | const unsigned Sz = R.getVectorElementSize(Chain[0]); | |||
7453 | const unsigned MinVF = R.getMinVecRegSize() / Sz; | |||
7454 | unsigned VF = Chain.size(); | |||
7455 | ||||
7456 | if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF) | |||
7457 | return false; | |||
7458 | ||||
7459 | LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idxdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx << "\n"; } } while ( false) | |||
7460 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx << "\n"; } } while ( false); | |||
7461 | ||||
7462 | R.buildTree(Chain); | |||
7463 | if (R.isTreeTinyAndNotFullyVectorizable()) | |||
7464 | return false; | |||
7465 | if (R.isLoadCombineCandidate()) | |||
7466 | return false; | |||
7467 | R.reorderTopToBottom(); | |||
7468 | R.reorderBottomToTop(); | |||
7469 | R.buildExternalUses(); | |||
7470 | ||||
7471 | R.computeMinimumValueSizes(); | |||
7472 | ||||
7473 | InstructionCost Cost = R.getTreeCost(); | |||
7474 | ||||
7475 | LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF =" << VF << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Found cost = " << Cost << " for VF =" << VF << "\n"; } } while (false ); | |||
7476 | if (Cost < -SLPCostThreshold) { | |||
7477 | LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n"; } } while (false); | |||
7478 | ||||
7479 | using namespace ore; | |||
7480 | ||||
7481 | R.getORE()->emit(OptimizationRemark(SV_NAME"slp-vectorizer", "StoresVectorized", | |||
7482 | cast<StoreInst>(Chain[0])) | |||
7483 | << "Stores SLP vectorized with cost " << NV("Cost", Cost) | |||
7484 | << " and with tree size " | |||
7485 | << NV("TreeSize", R.getTreeSize())); | |||
7486 | ||||
7487 | R.vectorizeTree(); | |||
7488 | return true; | |||
7489 | } | |||
7490 | ||||
7491 | return false; | |||
7492 | } | |||
7493 | ||||
7494 | bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, | |||
7495 | BoUpSLP &R) { | |||
7496 | // We may run into multiple chains that merge into a single chain. We mark the | |||
7497 | // stores that we vectorized so that we don't visit the same store twice. | |||
7498 | BoUpSLP::ValueSet VectorizedStores; | |||
7499 | bool Changed = false; | |||
7500 | ||||
7501 | int E = Stores.size(); | |||
7502 | SmallBitVector Tails(E, false); | |||
7503 | int MaxIter = MaxStoreLookup.getValue(); | |||
7504 | SmallVector<std::pair<int, int>, 16> ConsecutiveChain( | |||
7505 | E, std::make_pair(E, INT_MAX2147483647)); | |||
7506 | SmallVector<SmallBitVector, 4> CheckedPairs(E, SmallBitVector(E, false)); | |||
7507 | int IterCnt; | |||
7508 | auto &&FindConsecutiveAccess = [this, &Stores, &Tails, &IterCnt, MaxIter, | |||
7509 | &CheckedPairs, | |||
7510 | &ConsecutiveChain](int K, int Idx) { | |||
7511 | if (IterCnt >= MaxIter) | |||
7512 | return true; | |||
7513 | if (CheckedPairs[Idx].test(K)) | |||
7514 | return ConsecutiveChain[K].second == 1 && | |||
7515 | ConsecutiveChain[K].first == Idx; | |||
7516 | ++IterCnt; | |||
7517 | CheckedPairs[Idx].set(K); | |||
7518 | CheckedPairs[K].set(Idx); | |||
7519 | Optional<int> Diff = getPointersDiff( | |||
7520 | Stores[K]->getValueOperand()->getType(), Stores[K]->getPointerOperand(), | |||
7521 | Stores[Idx]->getValueOperand()->getType(), | |||
7522 | Stores[Idx]->getPointerOperand(), *DL, *SE, /*StrictCheck=*/true); | |||
7523 | if (!Diff || *Diff == 0) | |||
7524 | return false; | |||
7525 | int Val = *Diff; | |||
7526 | if (Val < 0) { | |||
7527 | if (ConsecutiveChain[Idx].second > -Val) { | |||
7528 | Tails.set(K); | |||
7529 | ConsecutiveChain[Idx] = std::make_pair(K, -Val); | |||
7530 | } | |||
7531 | return false; | |||
7532 | } | |||
7533 | if (ConsecutiveChain[K].second <= Val) | |||
7534 | return false; | |||
7535 | ||||
7536 | Tails.set(Idx); | |||
7537 | ConsecutiveChain[K] = std::make_pair(Idx, Val); | |||
7538 | return Val == 1; | |||
7539 | }; | |||
7540 | // Do a quadratic search on all of the given stores in reverse order and find | |||
7541 | // all of the pairs of stores that follow each other. | |||
7542 | for (int Idx = E - 1; Idx >= 0; --Idx) { | |||
7543 | // If a store has multiple consecutive store candidates, search according | |||
7544 | // to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ... | |||
7545 | // This is because usually pairing with immediate succeeding or preceding | |||
7546 | // candidate create the best chance to find slp vectorization opportunity. | |||
7547 | const int MaxLookDepth = std::max(E - Idx, Idx + 1); | |||
7548 | IterCnt = 0; | |||
7549 | for (int Offset = 1, F = MaxLookDepth; Offset < F; ++Offset) | |||
7550 | if ((Idx >= Offset && FindConsecutiveAccess(Idx - Offset, Idx)) || | |||
7551 | (Idx + Offset < E && FindConsecutiveAccess(Idx + Offset, Idx))) | |||
7552 | break; | |||
7553 | } | |||
7554 | ||||
7555 | // Tracks if we tried to vectorize stores starting from the given tail | |||
7556 | // already. | |||
7557 | SmallBitVector TriedTails(E, false); | |||
7558 | // For stores that start but don't end a link in the chain: | |||
7559 | for (int Cnt = E; Cnt > 0; --Cnt) { | |||
7560 | int I = Cnt - 1; | |||
7561 | if (ConsecutiveChain[I].first == E || Tails.test(I)) | |||
7562 | continue; | |||
7563 | // We found a store instr that starts a chain. Now follow the chain and try | |||
7564 | // to vectorize it. | |||
7565 | BoUpSLP::ValueList Operands; | |||
7566 | // Collect the chain into a list. | |||
7567 | while (I != E && !VectorizedStores.count(Stores[I])) { | |||
7568 | Operands.push_back(Stores[I]); | |||
7569 | Tails.set(I); | |||
7570 | if (ConsecutiveChain[I].second != 1) { | |||
7571 | // Mark the new end in the chain and go back, if required. It might be | |||
7572 | // required if the original stores come in reversed order, for example. | |||
7573 | if (ConsecutiveChain[I].first != E && | |||
7574 | Tails.test(ConsecutiveChain[I].first) && !TriedTails.test(I) && | |||
7575 | !VectorizedStores.count(Stores[ConsecutiveChain[I].first])) { | |||
7576 | TriedTails.set(I); | |||
7577 | Tails.reset(ConsecutiveChain[I].first); | |||
7578 | if (Cnt < ConsecutiveChain[I].first + 2) | |||
7579 | Cnt = ConsecutiveChain[I].first + 2; | |||
7580 | } | |||
7581 | break; | |||
7582 | } | |||
7583 | // Move to the next value in the chain. | |||
7584 | I = ConsecutiveChain[I].first; | |||
7585 | } | |||
7586 | assert(!Operands.empty() && "Expected non-empty list of stores.")(static_cast <bool> (!Operands.empty() && "Expected non-empty list of stores." ) ? void (0) : __assert_fail ("!Operands.empty() && \"Expected non-empty list of stores.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 7586, __extension__ __PRETTY_FUNCTION__)); | |||
7587 | ||||
7588 | unsigned MaxVecRegSize = R.getMaxVecRegSize(); | |||
7589 | unsigned EltSize = R.getVectorElementSize(Operands[0]); | |||
7590 | unsigned MaxElts = llvm::PowerOf2Floor(MaxVecRegSize / EltSize); | |||
7591 | ||||
7592 | unsigned MinVF = R.getMinVF(EltSize); | |||
7593 | unsigned MaxVF = std::min(R.getMaximumVF(EltSize, Instruction::Store), | |||
7594 | MaxElts); | |||
7595 | ||||
7596 | // FIXME: Is division-by-2 the correct step? Should we assert that the | |||
7597 | // register size is a power-of-2? | |||
7598 | unsigned StartIdx = 0; | |||
7599 | for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) { | |||
7600 | for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) { | |||
7601 | ArrayRef<Value *> Slice = makeArrayRef(Operands).slice(Cnt, Size); | |||
7602 | if (!VectorizedStores.count(Slice.front()) && | |||
7603 | !VectorizedStores.count(Slice.back()) && | |||
7604 | vectorizeStoreChain(Slice, R, Cnt)) { | |||
7605 | // Mark the vectorized stores so that we don't vectorize them again. | |||
7606 | VectorizedStores.insert(Slice.begin(), Slice.end()); | |||
7607 | Changed = true; | |||
7608 | // If we vectorized initial block, no need to try to vectorize it | |||
7609 | // again. | |||
7610 | if (Cnt == StartIdx) | |||
7611 | StartIdx += Size; | |||
7612 | Cnt += Size; | |||
7613 | continue; | |||
7614 | } | |||
7615 | ++Cnt; | |||
7616 | } | |||
7617 | // Check if the whole array was vectorized already - exit. | |||
7618 | if (StartIdx >= Operands.size()) | |||
7619 | break; | |||
7620 | } | |||
7621 | } | |||
7622 | ||||
7623 | return Changed; | |||
7624 | } | |||
7625 | ||||
7626 | void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { | |||
7627 | // Initialize the collections. We will make a single pass over the block. | |||
7628 | Stores.clear(); | |||
7629 | GEPs.clear(); | |||
7630 | ||||
7631 | // Visit the store and getelementptr instructions in BB and organize them in | |||
7632 | // Stores and GEPs according to the underlying objects of their pointer | |||
7633 | // operands. | |||
7634 | for (Instruction &I : *BB) { | |||
7635 | // Ignore store instructions that are volatile or have a pointer operand | |||
7636 | // that doesn't point to a scalar type. | |||
7637 | if (auto *SI = dyn_cast<StoreInst>(&I)) { | |||
7638 | if (!SI->isSimple()) | |||
7639 | continue; | |||
7640 | if (!isValidElementType(SI->getValueOperand()->getType())) | |||
7641 | continue; | |||
7642 | Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI); | |||
7643 | } | |||
7644 | ||||
7645 | // Ignore getelementptr instructions that have more than one index, a | |||
7646 | // constant index, or a pointer operand that doesn't point to a scalar | |||
7647 | // type. | |||
7648 | else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { | |||
7649 | auto Idx = GEP->idx_begin()->get(); | |||
7650 | if (GEP->getNumIndices() > 1 || isa<Constant>(Idx)) | |||
7651 | continue; | |||
7652 | if (!isValidElementType(Idx->getType())) | |||
7653 | continue; | |||
7654 | if (GEP->getType()->isVectorTy()) | |||
7655 | continue; | |||
7656 | GEPs[GEP->getPointerOperand()].push_back(GEP); | |||
7657 | } | |||
7658 | } | |||
7659 | } | |||
7660 | ||||
7661 | bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { | |||
7662 | if (!A || !B) | |||
7663 | return false; | |||
7664 | Value *VL[] = {A, B}; | |||
7665 | return tryToVectorizeList(VL, R); | |||
7666 | } | |||
7667 | ||||
7668 | bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R) { | |||
7669 | if (VL.size() < 2) | |||
7670 | return false; | |||
7671 | ||||
7672 | LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Trying to vectorize a list of length = " << VL.size() << ".\n"; } } while (false) | |||
7673 | << VL.size() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Trying to vectorize a list of length = " << VL.size() << ".\n"; } } while (false); | |||
7674 | ||||
7675 | // Check that all of the parts are instructions of the same type, | |||
7676 | // we permit an alternate opcode via InstructionsState. | |||
7677 | InstructionsState S = getSameOpcode(VL); | |||
7678 | if (!S.getOpcode()) | |||
7679 | return false; | |||
7680 | ||||
7681 | Instruction *I0 = cast<Instruction>(S.OpValue); | |||
7682 | // Make sure invalid types (including vector type) are rejected before | |||
7683 | // determining vectorization factor for scalar instructions. | |||
7684 | for (Value *V : VL) { | |||
7685 | Type *Ty = V->getType(); | |||
7686 | if (!isa<InsertElementInst>(V) && !isValidElementType(Ty)) { | |||
7687 | // NOTE: the following will give user internal llvm type name, which may | |||
7688 | // not be useful. | |||
7689 | R.getORE()->emit([&]() { | |||
7690 | std::string type_str; | |||
7691 | llvm::raw_string_ostream rso(type_str); | |||
7692 | Ty->print(rso); | |||
7693 | return OptimizationRemarkMissed(SV_NAME"slp-vectorizer", "UnsupportedType", I0) | |||
7694 | << "Cannot SLP vectorize list: type " | |||
7695 | << rso.str() + " is unsupported by vectorizer"; | |||
7696 | }); | |||
7697 | return false; | |||
7698 | } | |||
7699 | } | |||
7700 | ||||
7701 | unsigned Sz = R.getVectorElementSize(I0); | |||
7702 | unsigned MinVF = R.getMinVF(Sz); | |||
7703 | unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF); | |||
7704 | MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF); | |||
7705 | if (MaxVF < 2) { | |||
7706 | R.getORE()->emit([&]() { | |||
7707 | return OptimizationRemarkMissed(SV_NAME"slp-vectorizer", "SmallVF", I0) | |||
7708 | << "Cannot SLP vectorize list: vectorization factor " | |||
7709 | << "less than 2 is not supported"; | |||
7710 | }); | |||
7711 | return false; | |||
7712 | } | |||
7713 | ||||
7714 | bool Changed = false; | |||
7715 | bool CandidateFound = false; | |||
7716 | InstructionCost MinCost = SLPCostThreshold.getValue(); | |||
7717 | Type *ScalarTy = VL[0]->getType(); | |||
7718 | if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) | |||
7719 | ScalarTy = IE->getOperand(1)->getType(); | |||
7720 | ||||
7721 | unsigned NextInst = 0, MaxInst = VL.size(); | |||
7722 | for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) { | |||
7723 | // No actual vectorization should happen, if number of parts is the same as | |||
7724 | // provided vectorization factor (i.e. the scalar type is used for vector | |||
7725 | // code during codegen). | |||
7726 | auto *VecTy = FixedVectorType::get(ScalarTy, VF); | |||
7727 | if (TTI->getNumberOfParts(VecTy) == VF) | |||
7728 | continue; | |||
7729 | for (unsigned I = NextInst; I < MaxInst; ++I) { | |||
7730 | unsigned OpsWidth = 0; | |||
7731 | ||||
7732 | if (I + VF > MaxInst) | |||
7733 | OpsWidth = MaxInst - I; | |||
7734 | else | |||
7735 | OpsWidth = VF; | |||
7736 | ||||
7737 | if (!isPowerOf2_32(OpsWidth)) | |||
7738 | continue; | |||
7739 | ||||
7740 | if ((VF > MinVF && OpsWidth <= VF / 2) || (VF == MinVF && OpsWidth < 2)) | |||
7741 | break; | |||
7742 | ||||
7743 | ArrayRef<Value *> Ops = VL.slice(I, OpsWidth); | |||
7744 | // Check that a previous iteration of this loop did not delete the Value. | |||
7745 | if (llvm::any_of(Ops, [&R](Value *V) { | |||
7746 | auto *I = dyn_cast<Instruction>(V); | |||
7747 | return I && R.isDeleted(I); | |||
7748 | })) | |||
7749 | continue; | |||
7750 | ||||
7751 | LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing " << OpsWidth << " operations " << "\n"; } } while (false) | |||
7752 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing " << OpsWidth << " operations " << "\n"; } } while (false); | |||
7753 | ||||
7754 | R.buildTree(Ops); | |||
7755 | if (R.isTreeTinyAndNotFullyVectorizable()) | |||
7756 | continue; | |||
7757 | R.reorderTopToBottom(); | |||
7758 | R.reorderBottomToTop(); | |||
7759 | R.buildExternalUses(); | |||
7760 | ||||
7761 | R.computeMinimumValueSizes(); | |||
7762 | InstructionCost Cost = R.getTreeCost(); | |||
7763 | CandidateFound = true; | |||
7764 | MinCost = std::min(MinCost, Cost); | |||
7765 | ||||
7766 | if (Cost < -SLPCostThreshold) { | |||
7767 | LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"; } } while (false); | |||
7768 | R.getORE()->emit(OptimizationRemark(SV_NAME"slp-vectorizer", "VectorizedList", | |||
7769 | cast<Instruction>(Ops[0])) | |||
7770 | << "SLP vectorized with cost " << ore::NV("Cost", Cost) | |||
7771 | << " and with tree size " | |||
7772 | << ore::NV("TreeSize", R.getTreeSize())); | |||
7773 | ||||
7774 | R.vectorizeTree(); | |||
7775 | // Move to the next bundle. | |||
7776 | I += VF - 1; | |||
7777 | NextInst = I + 1; | |||
7778 | Changed = true; | |||
7779 | } | |||
7780 | } | |||
7781 | } | |||
7782 | ||||
7783 | if (!Changed && CandidateFound) { | |||
7784 | R.getORE()->emit([&]() { | |||
7785 | return OptimizationRemarkMissed(SV_NAME"slp-vectorizer", "NotBeneficial", I0) | |||
7786 | << "List vectorization was possible but not beneficial with cost " | |||
7787 | << ore::NV("Cost", MinCost) << " >= " | |||
7788 | << ore::NV("Treshold", -SLPCostThreshold); | |||
7789 | }); | |||
7790 | } else if (!Changed) { | |||
7791 | R.getORE()->emit([&]() { | |||
7792 | return OptimizationRemarkMissed(SV_NAME"slp-vectorizer", "NotPossible", I0) | |||
7793 | << "Cannot SLP vectorize list: vectorization was impossible" | |||
7794 | << " with available vectorization factors"; | |||
7795 | }); | |||
7796 | } | |||
7797 | return Changed; | |||
7798 | } | |||
7799 | ||||
7800 | bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { | |||
7801 | if (!I) | |||
7802 | return false; | |||
7803 | ||||
7804 | if (!isa<BinaryOperator>(I) && !isa<CmpInst>(I)) | |||
7805 | return false; | |||
7806 | ||||
7807 | Value *P = I->getParent(); | |||
7808 | ||||
7809 | // Vectorize in current basic block only. | |||
7810 | auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); | |||
7811 | auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); | |||
7812 | if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) | |||
7813 | return false; | |||
7814 | ||||
7815 | // Try to vectorize V. | |||
7816 | if (tryToVectorizePair(Op0, Op1, R)) | |||
7817 | return true; | |||
7818 | ||||
7819 | auto *A = dyn_cast<BinaryOperator>(Op0); | |||
7820 | auto *B = dyn_cast<BinaryOperator>(Op1); | |||
7821 | // Try to skip B. | |||
7822 | if (B && B->hasOneUse()) { | |||
7823 | auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); | |||
7824 | auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); | |||
7825 | if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R)) | |||
7826 | return true; | |||
7827 | if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R)) | |||
7828 | return true; | |||
7829 | } | |||
7830 | ||||
7831 | // Try to skip A. | |||
7832 | if (A && A->hasOneUse()) { | |||
7833 | auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); | |||
7834 | auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); | |||
7835 | if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R)) | |||
7836 | return true; | |||
7837 | if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R)) | |||
7838 | return true; | |||
7839 | } | |||
7840 | return false; | |||
7841 | } | |||
7842 | ||||
7843 | namespace { | |||
7844 | ||||
7845 | /// Model horizontal reductions. | |||
7846 | /// | |||
7847 | /// A horizontal reduction is a tree of reduction instructions that has values | |||
7848 | /// that can be put into a vector as its leaves. For example: | |||
7849 | /// | |||
7850 | /// mul mul mul mul | |||
7851 | /// \ / \ / | |||
7852 | /// + + | |||
7853 | /// \ / | |||
7854 | /// + | |||
7855 | /// This tree has "mul" as its leaf values and "+" as its reduction | |||
7856 | /// instructions. A reduction can feed into a store or a binary operation | |||
7857 | /// feeding a phi. | |||
7858 | /// ... | |||
7859 | /// \ / | |||
7860 | /// + | |||
7861 | /// | | |||
7862 | /// phi += | |||
7863 | /// | |||
7864 | /// Or: | |||
7865 | /// ... | |||
7866 | /// \ / | |||
7867 | /// + | |||
7868 | /// | | |||
7869 | /// *p = | |||
7870 | /// | |||
7871 | class HorizontalReduction { | |||
7872 | using ReductionOpsType = SmallVector<Value *, 16>; | |||
7873 | using ReductionOpsListType = SmallVector<ReductionOpsType, 2>; | |||
7874 | ReductionOpsListType ReductionOps; | |||
7875 | SmallVector<Value *, 32> ReducedVals; | |||
7876 | // Use map vector to make stable output. | |||
7877 | MapVector<Instruction *, Value *> ExtraArgs; | |||
7878 | WeakTrackingVH ReductionRoot; | |||
7879 | /// The type of reduction operation. | |||
7880 | RecurKind RdxKind; | |||
7881 | ||||
7882 | const unsigned INVALID_OPERAND_INDEX = std::numeric_limits<unsigned>::max(); | |||
7883 | ||||
7884 | static bool isCmpSelMinMax(Instruction *I) { | |||
7885 | return match(I, m_Select(m_Cmp(), m_Value(), m_Value())) && | |||
7886 | RecurrenceDescriptor::isMinMaxRecurrenceKind(getRdxKind(I)); | |||
7887 | } | |||
7888 | ||||
7889 | // And/or are potentially poison-safe logical patterns like: | |||
7890 | // select x, y, false | |||
7891 | // select x, true, y | |||
7892 | static bool isBoolLogicOp(Instruction *I) { | |||
7893 | return match(I, m_LogicalAnd(m_Value(), m_Value())) || | |||
7894 | match(I, m_LogicalOr(m_Value(), m_Value())); | |||
7895 | } | |||
7896 | ||||
7897 | /// Checks if instruction is associative and can be vectorized. | |||
7898 | static bool isVectorizable(RecurKind Kind, Instruction *I) { | |||
7899 | if (Kind == RecurKind::None) | |||
7900 | return false; | |||
7901 | ||||
7902 | // Integer ops that map to select instructions or intrinsics are fine. | |||
7903 | if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind) || | |||
7904 | isBoolLogicOp(I)) | |||
7905 | return true; | |||
7906 | ||||
7907 | if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) { | |||
7908 | // FP min/max are associative except for NaN and -0.0. We do not | |||
7909 | // have to rule out -0.0 here because the intrinsic semantics do not | |||
7910 | // specify a fixed result for it. | |||
7911 | return I->getFastMathFlags().noNaNs(); | |||
7912 | } | |||
7913 | ||||
7914 | return I->isAssociative(); | |||
7915 | } | |||
7916 | ||||
7917 | static Value *getRdxOperand(Instruction *I, unsigned Index) { | |||
7918 | // Poison-safe 'or' takes the form: select X, true, Y | |||
7919 | // To make that work with the normal operand processing, we skip the | |||
7920 | // true value operand. | |||
7921 | // TODO: Change the code and data structures to handle this without a hack. | |||
7922 | if (getRdxKind(I) == RecurKind::Or && isa<SelectInst>(I) && Index == 1) | |||
7923 | return I->getOperand(2); | |||
7924 | return I->getOperand(Index); | |||
7925 | } | |||
7926 | ||||
7927 | /// Checks if the ParentStackElem.first should be marked as a reduction | |||
7928 | /// operation with an extra argument or as extra argument itself. | |||
7929 | void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem, | |||
7930 | Value *ExtraArg) { | |||
7931 | if (ExtraArgs.count(ParentStackElem.first)) { | |||
7932 | ExtraArgs[ParentStackElem.first] = nullptr; | |||
7933 | // We ran into something like: | |||
7934 | // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg. | |||
7935 | // The whole ParentStackElem.first should be considered as an extra value | |||
7936 | // in this case. | |||
7937 | // Do not perform analysis of remaining operands of ParentStackElem.first | |||
7938 | // instruction, this whole instruction is an extra argument. | |||
7939 | ParentStackElem.second = INVALID_OPERAND_INDEX; | |||
7940 | } else { | |||
7941 | // We ran into something like: | |||
7942 | // ParentStackElem.first += ... + ExtraArg + ... | |||
7943 | ExtraArgs[ParentStackElem.first] = ExtraArg; | |||
7944 | } | |||
7945 | } | |||
7946 | ||||
7947 | /// Creates reduction operation with the current opcode. | |||
7948 | static Value *createOp(IRBuilder<> &Builder, RecurKind Kind, Value *LHS, | |||
7949 | Value *RHS, const Twine &Name, bool UseSelect) { | |||
7950 | unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind); | |||
7951 | switch (Kind) { | |||
7952 | case RecurKind::Add: | |||
7953 | case RecurKind::Mul: | |||
7954 | case RecurKind::Or: | |||
7955 | case RecurKind::And: | |||
7956 | case RecurKind::Xor: | |||
7957 | case RecurKind::FAdd: | |||
7958 | case RecurKind::FMul: | |||
7959 | return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, | |||
7960 | Name); | |||
7961 | case RecurKind::FMax: | |||
7962 | return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS); | |||
7963 | case RecurKind::FMin: | |||
7964 | return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS); | |||
7965 | case RecurKind::SMax: | |||
7966 | if (UseSelect) { | |||
7967 | Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name); | |||
7968 | return Builder.CreateSelect(Cmp, LHS, RHS, Name); | |||
7969 | } | |||
7970 | return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS); | |||
7971 | case RecurKind::SMin: | |||
7972 | if (UseSelect) { | |||
7973 | Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name); | |||
7974 | return Builder.CreateSelect(Cmp, LHS, RHS, Name); | |||
7975 | } | |||
7976 | return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS); | |||
7977 | case RecurKind::UMax: | |||
7978 | if (UseSelect) { | |||
7979 | Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name); | |||
7980 | return Builder.CreateSelect(Cmp, LHS, RHS, Name); | |||
7981 | } | |||
7982 | return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS); | |||
7983 | case RecurKind::UMin: | |||
7984 | if (UseSelect) { | |||
7985 | Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name); | |||
7986 | return Builder.CreateSelect(Cmp, LHS, RHS, Name); | |||
7987 | } | |||
7988 | return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS); | |||
7989 | default: | |||
7990 | llvm_unreachable("Unknown reduction operation.")::llvm::llvm_unreachable_internal("Unknown reduction operation." , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 7990); | |||
7991 | } | |||
7992 | } | |||
7993 | ||||
7994 | /// Creates reduction operation with the current opcode with the IR flags | |||
7995 | /// from \p ReductionOps. | |||
7996 | static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS, | |||
7997 | Value *RHS, const Twine &Name, | |||
7998 | const ReductionOpsListType &ReductionOps) { | |||
7999 | bool UseSelect = ReductionOps.size() == 2; | |||
8000 | assert((!UseSelect || isa<SelectInst>(ReductionOps[1][0])) &&(static_cast <bool> ((!UseSelect || isa<SelectInst> (ReductionOps[1][0])) && "Expected cmp + select pairs for reduction" ) ? void (0) : __assert_fail ("(!UseSelect || isa<SelectInst>(ReductionOps[1][0])) && \"Expected cmp + select pairs for reduction\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8001, __extension__ __PRETTY_FUNCTION__)) | |||
8001 | "Expected cmp + select pairs for reduction")(static_cast <bool> ((!UseSelect || isa<SelectInst> (ReductionOps[1][0])) && "Expected cmp + select pairs for reduction" ) ? void (0) : __assert_fail ("(!UseSelect || isa<SelectInst>(ReductionOps[1][0])) && \"Expected cmp + select pairs for reduction\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8001, __extension__ __PRETTY_FUNCTION__)); | |||
8002 | Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect); | |||
8003 | if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) { | |||
8004 | if (auto *Sel = dyn_cast<SelectInst>(Op)) { | |||
8005 | propagateIRFlags(Sel->getCondition(), ReductionOps[0]); | |||
8006 | propagateIRFlags(Op, ReductionOps[1]); | |||
8007 | return Op; | |||
8008 | } | |||
8009 | } | |||
8010 | propagateIRFlags(Op, ReductionOps[0]); | |||
8011 | return Op; | |||
8012 | } | |||
8013 | ||||
8014 | /// Creates reduction operation with the current opcode with the IR flags | |||
8015 | /// from \p I. | |||
8016 | static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS, | |||
8017 | Value *RHS, const Twine &Name, Instruction *I) { | |||
8018 | auto *SelI = dyn_cast<SelectInst>(I); | |||
8019 | Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, SelI != nullptr); | |||
8020 | if (SelI && RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) { | |||
8021 | if (auto *Sel = dyn_cast<SelectInst>(Op)) | |||
8022 | propagateIRFlags(Sel->getCondition(), SelI->getCondition()); | |||
8023 | } | |||
8024 | propagateIRFlags(Op, I); | |||
8025 | return Op; | |||
8026 | } | |||
8027 | ||||
8028 | static RecurKind getRdxKind(Instruction *I) { | |||
8029 | assert(I && "Expected instruction for reduction matching")(static_cast <bool> (I && "Expected instruction for reduction matching" ) ? void (0) : __assert_fail ("I && \"Expected instruction for reduction matching\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8029, __extension__ __PRETTY_FUNCTION__)); | |||
8030 | TargetTransformInfo::ReductionFlags RdxFlags; | |||
8031 | if (match(I, m_Add(m_Value(), m_Value()))) | |||
8032 | return RecurKind::Add; | |||
8033 | if (match(I, m_Mul(m_Value(), m_Value()))) | |||
8034 | return RecurKind::Mul; | |||
8035 | if (match(I, m_And(m_Value(), m_Value())) || | |||
8036 | match(I, m_LogicalAnd(m_Value(), m_Value()))) | |||
8037 | return RecurKind::And; | |||
8038 | if (match(I, m_Or(m_Value(), m_Value())) || | |||
8039 | match(I, m_LogicalOr(m_Value(), m_Value()))) | |||
8040 | return RecurKind::Or; | |||
8041 | if (match(I, m_Xor(m_Value(), m_Value()))) | |||
8042 | return RecurKind::Xor; | |||
8043 | if (match(I, m_FAdd(m_Value(), m_Value()))) | |||
8044 | return RecurKind::FAdd; | |||
8045 | if (match(I, m_FMul(m_Value(), m_Value()))) | |||
8046 | return RecurKind::FMul; | |||
8047 | ||||
8048 | if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value()))) | |||
8049 | return RecurKind::FMax; | |||
8050 | if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value()))) | |||
8051 | return RecurKind::FMin; | |||
8052 | ||||
8053 | // This matches either cmp+select or intrinsics. SLP is expected to handle | |||
8054 | // either form. | |||
8055 | // TODO: If we are canonicalizing to intrinsics, we can remove several | |||
8056 | // special-case paths that deal with selects. | |||
8057 | if (match(I, m_SMax(m_Value(), m_Value()))) | |||
8058 | return RecurKind::SMax; | |||
8059 | if (match(I, m_SMin(m_Value(), m_Value()))) | |||
8060 | return RecurKind::SMin; | |||
8061 | if (match(I, m_UMax(m_Value(), m_Value()))) | |||
8062 | return RecurKind::UMax; | |||
8063 | if (match(I, m_UMin(m_Value(), m_Value()))) | |||
8064 | return RecurKind::UMin; | |||
8065 | ||||
8066 | if (auto *Select = dyn_cast<SelectInst>(I)) { | |||
8067 | // Try harder: look for min/max pattern based on instructions producing | |||
8068 | // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2). | |||
8069 | // During the intermediate stages of SLP, it's very common to have | |||
8070 | // pattern like this (since optimizeGatherSequence is run only once | |||
8071 | // at the end): | |||
8072 | // %1 = extractelement <2 x i32> %a, i32 0 | |||
8073 | // %2 = extractelement <2 x i32> %a, i32 1 | |||
8074 | // %cond = icmp sgt i32 %1, %2 | |||
8075 | // %3 = extractelement <2 x i32> %a, i32 0 | |||
8076 | // %4 = extractelement <2 x i32> %a, i32 1 | |||
8077 | // %select = select i1 %cond, i32 %3, i32 %4 | |||
8078 | CmpInst::Predicate Pred; | |||
8079 | Instruction *L1; | |||
8080 | Instruction *L2; | |||
8081 | ||||
8082 | Value *LHS = Select->getTrueValue(); | |||
8083 | Value *RHS = Select->getFalseValue(); | |||
8084 | Value *Cond = Select->getCondition(); | |||
8085 | ||||
8086 | // TODO: Support inverse predicates. | |||
8087 | if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) { | |||
8088 | if (!isa<ExtractElementInst>(RHS) || | |||
8089 | !L2->isIdenticalTo(cast<Instruction>(RHS))) | |||
8090 | return RecurKind::None; | |||
8091 | } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) { | |||
8092 | if (!isa<ExtractElementInst>(LHS) || | |||
8093 | !L1->isIdenticalTo(cast<Instruction>(LHS))) | |||
8094 | return RecurKind::None; | |||
8095 | } else { | |||
8096 | if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS)) | |||
8097 | return RecurKind::None; | |||
8098 | if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) || | |||
8099 | !L1->isIdenticalTo(cast<Instruction>(LHS)) || | |||
8100 | !L2->isIdenticalTo(cast<Instruction>(RHS))) | |||
8101 | return RecurKind::None; | |||
8102 | } | |||
8103 | ||||
8104 | TargetTransformInfo::ReductionFlags RdxFlags; | |||
8105 | switch (Pred) { | |||
8106 | default: | |||
8107 | return RecurKind::None; | |||
8108 | case CmpInst::ICMP_SGT: | |||
8109 | case CmpInst::ICMP_SGE: | |||
8110 | return RecurKind::SMax; | |||
8111 | case CmpInst::ICMP_SLT: | |||
8112 | case CmpInst::ICMP_SLE: | |||
8113 | return RecurKind::SMin; | |||
8114 | case CmpInst::ICMP_UGT: | |||
8115 | case CmpInst::ICMP_UGE: | |||
8116 | return RecurKind::UMax; | |||
8117 | case CmpInst::ICMP_ULT: | |||
8118 | case CmpInst::ICMP_ULE: | |||
8119 | return RecurKind::UMin; | |||
8120 | } | |||
8121 | } | |||
8122 | return RecurKind::None; | |||
8123 | } | |||
8124 | ||||
8125 | /// Get the index of the first operand. | |||
8126 | static unsigned getFirstOperandIndex(Instruction *I) { | |||
8127 | return isCmpSelMinMax(I) ? 1 : 0; | |||
8128 | } | |||
8129 | ||||
8130 | /// Total number of operands in the reduction operation. | |||
8131 | static unsigned getNumberOfOperands(Instruction *I) { | |||
8132 | return isCmpSelMinMax(I) ? 3 : 2; | |||
8133 | } | |||
8134 | ||||
8135 | /// Checks if the instruction is in basic block \p BB. | |||
8136 | /// For a cmp+sel min/max reduction check that both ops are in \p BB. | |||
8137 | static bool hasSameParent(Instruction *I, BasicBlock *BB) { | |||
8138 | if (isCmpSelMinMax(I)) { | |||
8139 | auto *Sel = cast<SelectInst>(I); | |||
8140 | auto *Cmp = cast<Instruction>(Sel->getCondition()); | |||
8141 | return Sel->getParent() == BB && Cmp->getParent() == BB; | |||
8142 | } | |||
8143 | return I->getParent() == BB; | |||
8144 | } | |||
8145 | ||||
8146 | /// Expected number of uses for reduction operations/reduced values. | |||
8147 | static bool hasRequiredNumberOfUses(bool IsCmpSelMinMax, Instruction *I) { | |||
8148 | if (IsCmpSelMinMax) { | |||
8149 | // SelectInst must be used twice while the condition op must have single | |||
8150 | // use only. | |||
8151 | if (auto *Sel = dyn_cast<SelectInst>(I)) | |||
8152 | return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse(); | |||
8153 | return I->hasNUses(2); | |||
8154 | } | |||
8155 | ||||
8156 | // Arithmetic reduction operation must be used once only. | |||
8157 | return I->hasOneUse(); | |||
8158 | } | |||
8159 | ||||
8160 | /// Initializes the list of reduction operations. | |||
8161 | void initReductionOps(Instruction *I) { | |||
8162 | if (isCmpSelMinMax(I)) | |||
8163 | ReductionOps.assign(2, ReductionOpsType()); | |||
8164 | else | |||
8165 | ReductionOps.assign(1, ReductionOpsType()); | |||
8166 | } | |||
8167 | ||||
8168 | /// Add all reduction operations for the reduction instruction \p I. | |||
8169 | void addReductionOps(Instruction *I) { | |||
8170 | if (isCmpSelMinMax(I)) { | |||
8171 | ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition()); | |||
8172 | ReductionOps[1].emplace_back(I); | |||
8173 | } else { | |||
8174 | ReductionOps[0].emplace_back(I); | |||
8175 | } | |||
8176 | } | |||
8177 | ||||
8178 | static Value *getLHS(RecurKind Kind, Instruction *I) { | |||
8179 | if (Kind == RecurKind::None) | |||
8180 | return nullptr; | |||
8181 | return I->getOperand(getFirstOperandIndex(I)); | |||
8182 | } | |||
8183 | static Value *getRHS(RecurKind Kind, Instruction *I) { | |||
8184 | if (Kind == RecurKind::None) | |||
8185 | return nullptr; | |||
8186 | return I->getOperand(getFirstOperandIndex(I) + 1); | |||
8187 | } | |||
8188 | ||||
8189 | public: | |||
8190 | HorizontalReduction() = default; | |||
8191 | ||||
8192 | /// Try to find a reduction tree. | |||
8193 | bool matchAssociativeReduction(PHINode *Phi, Instruction *Inst) { | |||
8194 | assert((!Phi || is_contained(Phi->operands(), Inst)) &&(static_cast <bool> ((!Phi || is_contained(Phi->operands (), Inst)) && "Phi needs to use the binary operator") ? void (0) : __assert_fail ("(!Phi || is_contained(Phi->operands(), Inst)) && \"Phi needs to use the binary operator\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8195, __extension__ __PRETTY_FUNCTION__)) | |||
8195 | "Phi needs to use the binary operator")(static_cast <bool> ((!Phi || is_contained(Phi->operands (), Inst)) && "Phi needs to use the binary operator") ? void (0) : __assert_fail ("(!Phi || is_contained(Phi->operands(), Inst)) && \"Phi needs to use the binary operator\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8195, __extension__ __PRETTY_FUNCTION__)); | |||
8196 | assert((isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) ||(static_cast <bool> ((isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) || isa<IntrinsicInst>(Inst )) && "Expected binop, select, or intrinsic for reduction matching" ) ? void (0) : __assert_fail ("(isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) || isa<IntrinsicInst>(Inst)) && \"Expected binop, select, or intrinsic for reduction matching\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8198, __extension__ __PRETTY_FUNCTION__)) | |||
8197 | isa<IntrinsicInst>(Inst)) &&(static_cast <bool> ((isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) || isa<IntrinsicInst>(Inst )) && "Expected binop, select, or intrinsic for reduction matching" ) ? void (0) : __assert_fail ("(isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) || isa<IntrinsicInst>(Inst)) && \"Expected binop, select, or intrinsic for reduction matching\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8198, __extension__ __PRETTY_FUNCTION__)) | |||
8198 | "Expected binop, select, or intrinsic for reduction matching")(static_cast <bool> ((isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) || isa<IntrinsicInst>(Inst )) && "Expected binop, select, or intrinsic for reduction matching" ) ? void (0) : __assert_fail ("(isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) || isa<IntrinsicInst>(Inst)) && \"Expected binop, select, or intrinsic for reduction matching\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8198, __extension__ __PRETTY_FUNCTION__)); | |||
8199 | RdxKind = getRdxKind(Inst); | |||
8200 | ||||
8201 | // We could have a initial reductions that is not an add. | |||
8202 | // r *= v1 + v2 + v3 + v4 | |||
8203 | // In such a case start looking for a tree rooted in the first '+'. | |||
8204 | if (Phi) { | |||
8205 | if (getLHS(RdxKind, Inst) == Phi) { | |||
8206 | Phi = nullptr; | |||
8207 | Inst = dyn_cast<Instruction>(getRHS(RdxKind, Inst)); | |||
8208 | if (!Inst) | |||
8209 | return false; | |||
8210 | RdxKind = getRdxKind(Inst); | |||
8211 | } else if (getRHS(RdxKind, Inst) == Phi) { | |||
8212 | Phi = nullptr; | |||
8213 | Inst = dyn_cast<Instruction>(getLHS(RdxKind, Inst)); | |||
8214 | if (!Inst) | |||
8215 | return false; | |||
8216 | RdxKind = getRdxKind(Inst); | |||
8217 | } | |||
8218 | } | |||
8219 | ||||
8220 | if (!isVectorizable(RdxKind, Inst)) | |||
8221 | return false; | |||
8222 | ||||
8223 | // Analyze "regular" integer/FP types for reductions - no target-specific | |||
8224 | // types or pointers. | |||
8225 | Type *Ty = Inst->getType(); | |||
8226 | if (!isValidElementType(Ty) || Ty->isPointerTy()) | |||
8227 | return false; | |||
8228 | ||||
8229 | // Though the ultimate reduction may have multiple uses, its condition must | |||
8230 | // have only single use. | |||
8231 | if (auto *Sel = dyn_cast<SelectInst>(Inst)) | |||
8232 | if (!Sel->getCondition()->hasOneUse()) | |||
8233 | return false; | |||
8234 | ||||
8235 | ReductionRoot = Inst; | |||
8236 | ||||
8237 | // The opcode for leaf values that we perform a reduction on. | |||
8238 | // For example: load(x) + load(y) + load(z) + fptoui(w) | |||
8239 | // The leaf opcode for 'w' does not match, so we don't include it as a | |||
8240 | // potential candidate for the reduction. | |||
8241 | unsigned LeafOpcode = 0; | |||
8242 | ||||
8243 | // Post-order traverse the reduction tree starting at Inst. We only handle | |||
8244 | // true trees containing binary operators or selects. | |||
8245 | SmallVector<std::pair<Instruction *, unsigned>, 32> Stack; | |||
8246 | Stack.push_back(std::make_pair(Inst, getFirstOperandIndex(Inst))); | |||
8247 | initReductionOps(Inst); | |||
8248 | while (!Stack.empty()) { | |||
8249 | Instruction *TreeN = Stack.back().first; | |||
8250 | unsigned EdgeToVisit = Stack.back().second++; | |||
8251 | const RecurKind TreeRdxKind = getRdxKind(TreeN); | |||
8252 | bool IsReducedValue = TreeRdxKind != RdxKind; | |||
8253 | ||||
8254 | // Postorder visit. | |||
8255 | if (IsReducedValue || EdgeToVisit >= getNumberOfOperands(TreeN)) { | |||
8256 | if (IsReducedValue) | |||
8257 | ReducedVals.push_back(TreeN); | |||
8258 | else { | |||
8259 | auto ExtraArgsIter = ExtraArgs.find(TreeN); | |||
8260 | if (ExtraArgsIter != ExtraArgs.end() && !ExtraArgsIter->second) { | |||
8261 | // Check if TreeN is an extra argument of its parent operation. | |||
8262 | if (Stack.size() <= 1) { | |||
8263 | // TreeN can't be an extra argument as it is a root reduction | |||
8264 | // operation. | |||
8265 | return false; | |||
8266 | } | |||
8267 | // Yes, TreeN is an extra argument, do not add it to a list of | |||
8268 | // reduction operations. | |||
8269 | // Stack[Stack.size() - 2] always points to the parent operation. | |||
8270 | markExtraArg(Stack[Stack.size() - 2], TreeN); | |||
8271 | ExtraArgs.erase(TreeN); | |||
8272 | } else | |||
8273 | addReductionOps(TreeN); | |||
8274 | } | |||
8275 | // Retract. | |||
8276 | Stack.pop_back(); | |||
8277 | continue; | |||
8278 | } | |||
8279 | ||||
8280 | // Visit operands. | |||
8281 | Value *EdgeVal = getRdxOperand(TreeN, EdgeToVisit); | |||
8282 | auto *EdgeInst = dyn_cast<Instruction>(EdgeVal); | |||
8283 | if (!EdgeInst) { | |||
8284 | // Edge value is not a reduction instruction or a leaf instruction. | |||
8285 | // (It may be a constant, function argument, or something else.) | |||
8286 | markExtraArg(Stack.back(), EdgeVal); | |||
8287 | continue; | |||
8288 | } | |||
8289 | RecurKind EdgeRdxKind = getRdxKind(EdgeInst); | |||
8290 | // Continue analysis if the next operand is a reduction operation or | |||
8291 | // (possibly) a leaf value. If the leaf value opcode is not set, | |||
8292 | // the first met operation != reduction operation is considered as the | |||
8293 | // leaf opcode. | |||
8294 | // Only handle trees in the current basic block. | |||
8295 | // Each tree node needs to have minimal number of users except for the | |||
8296 | // ultimate reduction. | |||
8297 | const bool IsRdxInst = EdgeRdxKind == RdxKind; | |||
8298 | if (EdgeInst != Phi && EdgeInst != Inst && | |||
8299 | hasSameParent(EdgeInst, Inst->getParent()) && | |||
8300 | hasRequiredNumberOfUses(isCmpSelMinMax(Inst), EdgeInst) && | |||
8301 | (!LeafOpcode || LeafOpcode == EdgeInst->getOpcode() || IsRdxInst)) { | |||
8302 | if (IsRdxInst) { | |||
8303 | // We need to be able to reassociate the reduction operations. | |||
8304 | if (!isVectorizable(EdgeRdxKind, EdgeInst)) { | |||
8305 | // I is an extra argument for TreeN (its parent operation). | |||
8306 | markExtraArg(Stack.back(), EdgeInst); | |||
8307 | continue; | |||
8308 | } | |||
8309 | } else if (!LeafOpcode) { | |||
8310 | LeafOpcode = EdgeInst->getOpcode(); | |||
8311 | } | |||
8312 | Stack.push_back( | |||
8313 | std::make_pair(EdgeInst, getFirstOperandIndex(EdgeInst))); | |||
8314 | continue; | |||
8315 | } | |||
8316 | // I is an extra argument for TreeN (its parent operation). | |||
8317 | markExtraArg(Stack.back(), EdgeInst); | |||
8318 | } | |||
8319 | return true; | |||
8320 | } | |||
8321 | ||||
8322 | /// Attempt to vectorize the tree found by matchAssociativeReduction. | |||
8323 | bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { | |||
8324 | // If there are a sufficient number of reduction values, reduce | |||
8325 | // to a nearby power-of-2. We can safely generate oversized | |||
8326 | // vectors and rely on the backend to split them to legal sizes. | |||
8327 | unsigned NumReducedVals = ReducedVals.size(); | |||
8328 | if (NumReducedVals < 4) | |||
8329 | return false; | |||
8330 | ||||
8331 | // Intersect the fast-math-flags from all reduction operations. | |||
8332 | FastMathFlags RdxFMF; | |||
8333 | RdxFMF.set(); | |||
8334 | for (ReductionOpsType &RdxOp : ReductionOps) { | |||
8335 | for (Value *RdxVal : RdxOp) { | |||
8336 | if (auto *FPMO = dyn_cast<FPMathOperator>(RdxVal)) | |||
8337 | RdxFMF &= FPMO->getFastMathFlags(); | |||
8338 | } | |||
8339 | } | |||
8340 | ||||
8341 | IRBuilder<> Builder(cast<Instruction>(ReductionRoot)); | |||
8342 | Builder.setFastMathFlags(RdxFMF); | |||
8343 | ||||
8344 | BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; | |||
8345 | // The same extra argument may be used several times, so log each attempt | |||
8346 | // to use it. | |||
8347 | for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) { | |||
8348 | assert(Pair.first && "DebugLoc must be set.")(static_cast <bool> (Pair.first && "DebugLoc must be set." ) ? void (0) : __assert_fail ("Pair.first && \"DebugLoc must be set.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8348, __extension__ __PRETTY_FUNCTION__)); | |||
8349 | ExternallyUsedValues[Pair.second].push_back(Pair.first); | |||
8350 | } | |||
8351 | ||||
8352 | // The compare instruction of a min/max is the insertion point for new | |||
8353 | // instructions and may be replaced with a new compare instruction. | |||
8354 | auto getCmpForMinMaxReduction = [](Instruction *RdxRootInst) { | |||
8355 | assert(isa<SelectInst>(RdxRootInst) &&(static_cast <bool> (isa<SelectInst>(RdxRootInst) && "Expected min/max reduction to have select root instruction" ) ? void (0) : __assert_fail ("isa<SelectInst>(RdxRootInst) && \"Expected min/max reduction to have select root instruction\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8356, __extension__ __PRETTY_FUNCTION__)) | |||
8356 | "Expected min/max reduction to have select root instruction")(static_cast <bool> (isa<SelectInst>(RdxRootInst) && "Expected min/max reduction to have select root instruction" ) ? void (0) : __assert_fail ("isa<SelectInst>(RdxRootInst) && \"Expected min/max reduction to have select root instruction\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8356, __extension__ __PRETTY_FUNCTION__)); | |||
8357 | Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition(); | |||
8358 | assert(isa<Instruction>(ScalarCond) &&(static_cast <bool> (isa<Instruction>(ScalarCond) && "Expected min/max reduction to have compare condition" ) ? void (0) : __assert_fail ("isa<Instruction>(ScalarCond) && \"Expected min/max reduction to have compare condition\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8359, __extension__ __PRETTY_FUNCTION__)) | |||
8359 | "Expected min/max reduction to have compare condition")(static_cast <bool> (isa<Instruction>(ScalarCond) && "Expected min/max reduction to have compare condition" ) ? void (0) : __assert_fail ("isa<Instruction>(ScalarCond) && \"Expected min/max reduction to have compare condition\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8359, __extension__ __PRETTY_FUNCTION__)); | |||
8360 | return cast<Instruction>(ScalarCond); | |||
8361 | }; | |||
8362 | ||||
8363 | // The reduction root is used as the insertion point for new instructions, | |||
8364 | // so set it as externally used to prevent it from being deleted. | |||
8365 | ExternallyUsedValues[ReductionRoot]; | |||
8366 | SmallVector<Value *, 16> IgnoreList; | |||
8367 | for (ReductionOpsType &RdxOp : ReductionOps) | |||
8368 | IgnoreList.append(RdxOp.begin(), RdxOp.end()); | |||
8369 | ||||
8370 | unsigned ReduxWidth = PowerOf2Floor(NumReducedVals); | |||
8371 | if (NumReducedVals > ReduxWidth) { | |||
8372 | // In the loop below, we are building a tree based on a window of | |||
8373 | // 'ReduxWidth' values. | |||
8374 | // If the operands of those values have common traits (compare predicate, | |||
8375 | // constant operand, etc), then we want to group those together to | |||
8376 | // minimize the cost of the reduction. | |||
8377 | ||||
8378 | // TODO: This should be extended to count common operands for | |||
8379 | // compares and binops. | |||
8380 | ||||
8381 | // Step 1: Count the number of times each compare predicate occurs. | |||
8382 | SmallDenseMap<unsigned, unsigned> PredCountMap; | |||
8383 | for (Value *RdxVal : ReducedVals) { | |||
8384 | CmpInst::Predicate Pred; | |||
8385 | if (match(RdxVal, m_Cmp(Pred, m_Value(), m_Value()))) | |||
8386 | ++PredCountMap[Pred]; | |||
8387 | } | |||
8388 | // Step 2: Sort the values so the most common predicates come first. | |||
8389 | stable_sort(ReducedVals, [&PredCountMap](Value *A, Value *B) { | |||
8390 | CmpInst::Predicate PredA, PredB; | |||
8391 | if (match(A, m_Cmp(PredA, m_Value(), m_Value())) && | |||
8392 | match(B, m_Cmp(PredB, m_Value(), m_Value()))) { | |||
8393 | return PredCountMap[PredA] > PredCountMap[PredB]; | |||
8394 | } | |||
8395 | return false; | |||
8396 | }); | |||
8397 | } | |||
8398 | ||||
8399 | Value *VectorizedTree = nullptr; | |||
8400 | unsigned i = 0; | |||
8401 | while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) { | |||
8402 | ArrayRef<Value *> VL(&ReducedVals[i], ReduxWidth); | |||
8403 | V.buildTree(VL, IgnoreList); | |||
8404 | if (V.isTreeTinyAndNotFullyVectorizable()) | |||
8405 | break; | |||
8406 | if (V.isLoadCombineReductionCandidate(RdxKind)) | |||
8407 | break; | |||
8408 | V.reorderTopToBottom(); | |||
8409 | V.reorderBottomToTop(); | |||
8410 | V.buildExternalUses(ExternallyUsedValues); | |||
8411 | ||||
8412 | // For a poison-safe boolean logic reduction, do not replace select | |||
8413 | // instructions with logic ops. All reduced values will be frozen (see | |||
8414 | // below) to prevent leaking poison. | |||
8415 | if (isa<SelectInst>(ReductionRoot) && | |||
8416 | isBoolLogicOp(cast<Instruction>(ReductionRoot)) && | |||
8417 | NumReducedVals != ReduxWidth) | |||
8418 | break; | |||
8419 | ||||
8420 | V.computeMinimumValueSizes(); | |||
8421 | ||||
8422 | // Estimate cost. | |||
8423 | InstructionCost TreeCost = | |||
8424 | V.getTreeCost(makeArrayRef(&ReducedVals[i], ReduxWidth)); | |||
8425 | InstructionCost ReductionCost = | |||
8426 | getReductionCost(TTI, ReducedVals[i], ReduxWidth, RdxFMF); | |||
8427 | InstructionCost Cost = TreeCost + ReductionCost; | |||
8428 | if (!Cost.isValid()) { | |||
8429 | LLVM_DEBUG(dbgs() << "Encountered invalid baseline cost.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "Encountered invalid baseline cost.\n" ; } } while (false); | |||
8430 | return false; | |||
8431 | } | |||
8432 | if (Cost >= -SLPCostThreshold) { | |||
8433 | V.getORE()->emit([&]() { | |||
8434 | return OptimizationRemarkMissed(SV_NAME"slp-vectorizer", "HorSLPNotBeneficial", | |||
8435 | cast<Instruction>(VL[0])) | |||
8436 | << "Vectorizing horizontal reduction is possible" | |||
8437 | << "but not beneficial with cost " << ore::NV("Cost", Cost) | |||
8438 | << " and threshold " | |||
8439 | << ore::NV("Threshold", -SLPCostThreshold); | |||
8440 | }); | |||
8441 | break; | |||
8442 | } | |||
8443 | ||||
8444 | LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost << ". (HorRdx)\n"; } } while (false) | |||
8445 | << Cost << ". (HorRdx)\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost << ". (HorRdx)\n"; } } while (false); | |||
8446 | V.getORE()->emit([&]() { | |||
8447 | return OptimizationRemark(SV_NAME"slp-vectorizer", "VectorizedHorizontalReduction", | |||
8448 | cast<Instruction>(VL[0])) | |||
8449 | << "Vectorized horizontal reduction with cost " | |||
8450 | << ore::NV("Cost", Cost) << " and with tree size " | |||
8451 | << ore::NV("TreeSize", V.getTreeSize()); | |||
8452 | }); | |||
8453 | ||||
8454 | // Vectorize a tree. | |||
8455 | DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); | |||
8456 | Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues); | |||
8457 | ||||
8458 | // Emit a reduction. If the root is a select (min/max idiom), the insert | |||
8459 | // point is the compare condition of that select. | |||
8460 | Instruction *RdxRootInst = cast<Instruction>(ReductionRoot); | |||
8461 | if (isCmpSelMinMax(RdxRootInst)) | |||
8462 | Builder.SetInsertPoint(getCmpForMinMaxReduction(RdxRootInst)); | |||
8463 | else | |||
8464 | Builder.SetInsertPoint(RdxRootInst); | |||
8465 | ||||
8466 | // To prevent poison from leaking across what used to be sequential, safe, | |||
8467 | // scalar boolean logic operations, the reduction operand must be frozen. | |||
8468 | if (isa<SelectInst>(RdxRootInst) && isBoolLogicOp(RdxRootInst)) | |||
8469 | VectorizedRoot = Builder.CreateFreeze(VectorizedRoot); | |||
8470 | ||||
8471 | Value *ReducedSubTree = | |||
8472 | emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI); | |||
8473 | ||||
8474 | if (!VectorizedTree) { | |||
8475 | // Initialize the final value in the reduction. | |||
8476 | VectorizedTree = ReducedSubTree; | |||
8477 | } else { | |||
8478 | // Update the final value in the reduction. | |||
8479 | Builder.SetCurrentDebugLocation(Loc); | |||
8480 | VectorizedTree = createOp(Builder, RdxKind, VectorizedTree, | |||
8481 | ReducedSubTree, "op.rdx", ReductionOps); | |||
8482 | } | |||
8483 | i += ReduxWidth; | |||
8484 | ReduxWidth = PowerOf2Floor(NumReducedVals - i); | |||
8485 | } | |||
8486 | ||||
8487 | if (VectorizedTree) { | |||
8488 | // Finish the reduction. | |||
8489 | for (; i < NumReducedVals; ++i) { | |||
8490 | auto *I = cast<Instruction>(ReducedVals[i]); | |||
8491 | Builder.SetCurrentDebugLocation(I->getDebugLoc()); | |||
8492 | VectorizedTree = | |||
8493 | createOp(Builder, RdxKind, VectorizedTree, I, "", ReductionOps); | |||
8494 | } | |||
8495 | for (auto &Pair : ExternallyUsedValues) { | |||
8496 | // Add each externally used value to the final reduction. | |||
8497 | for (auto *I : Pair.second) { | |||
8498 | Builder.SetCurrentDebugLocation(I->getDebugLoc()); | |||
8499 | VectorizedTree = createOp(Builder, RdxKind, VectorizedTree, | |||
8500 | Pair.first, "op.extra", I); | |||
8501 | } | |||
8502 | } | |||
8503 | ||||
8504 | ReductionRoot->replaceAllUsesWith(VectorizedTree); | |||
8505 | ||||
8506 | // Mark all scalar reduction ops for deletion, they are replaced by the | |||
8507 | // vector reductions. | |||
8508 | V.eraseInstructions(IgnoreList); | |||
8509 | } | |||
8510 | return VectorizedTree != nullptr; | |||
8511 | } | |||
8512 | ||||
8513 | unsigned numReductionValues() const { return ReducedVals.size(); } | |||
8514 | ||||
8515 | private: | |||
8516 | /// Calculate the cost of a reduction. | |||
8517 | InstructionCost getReductionCost(TargetTransformInfo *TTI, | |||
8518 | Value *FirstReducedVal, unsigned ReduxWidth, | |||
8519 | FastMathFlags FMF) { | |||
8520 | Type *ScalarTy = FirstReducedVal->getType(); | |||
8521 | FixedVectorType *VectorTy = FixedVectorType::get(ScalarTy, ReduxWidth); | |||
8522 | InstructionCost VectorCost, ScalarCost; | |||
8523 | switch (RdxKind) { | |||
8524 | case RecurKind::Add: | |||
8525 | case RecurKind::Mul: | |||
8526 | case RecurKind::Or: | |||
8527 | case RecurKind::And: | |||
8528 | case RecurKind::Xor: | |||
8529 | case RecurKind::FAdd: | |||
8530 | case RecurKind::FMul: { | |||
8531 | unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind); | |||
8532 | VectorCost = TTI->getArithmeticReductionCost(RdxOpcode, VectorTy, FMF); | |||
8533 | ScalarCost = TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy); | |||
8534 | break; | |||
8535 | } | |||
8536 | case RecurKind::FMax: | |||
8537 | case RecurKind::FMin: { | |||
8538 | auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy)); | |||
8539 | VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy, | |||
8540 | /*unsigned=*/false); | |||
8541 | ScalarCost = | |||
8542 | TTI->getCmpSelInstrCost(Instruction::FCmp, ScalarTy) + | |||
8543 | TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, | |||
8544 | CmpInst::makeCmpResultType(ScalarTy)); | |||
8545 | break; | |||
8546 | } | |||
8547 | case RecurKind::SMax: | |||
8548 | case RecurKind::SMin: | |||
8549 | case RecurKind::UMax: | |||
8550 | case RecurKind::UMin: { | |||
8551 | auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy)); | |||
8552 | bool IsUnsigned = | |||
8553 | RdxKind == RecurKind::UMax || RdxKind == RecurKind::UMin; | |||
8554 | VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy, IsUnsigned); | |||
8555 | ScalarCost = | |||
8556 | TTI->getCmpSelInstrCost(Instruction::ICmp, ScalarTy) + | |||
8557 | TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, | |||
8558 | CmpInst::makeCmpResultType(ScalarTy)); | |||
8559 | break; | |||
8560 | } | |||
8561 | default: | |||
8562 | llvm_unreachable("Expected arithmetic or min/max reduction operation")::llvm::llvm_unreachable_internal("Expected arithmetic or min/max reduction operation" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8562); | |||
8563 | } | |||
8564 | ||||
8565 | // Scalar cost is repeated for N-1 elements. | |||
8566 | ScalarCost *= (ReduxWidth - 1); | |||
8567 | LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCostdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost << " for reduction that starts with " << *FirstReducedVal << " (It is a splitting reduction)\n" ; } } while (false) | |||
8568 | << " for reduction that starts with " << *FirstReducedValdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost << " for reduction that starts with " << *FirstReducedVal << " (It is a splitting reduction)\n" ; } } while (false) | |||
8569 | << " (It is a splitting reduction)\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost << " for reduction that starts with " << *FirstReducedVal << " (It is a splitting reduction)\n" ; } } while (false); | |||
8570 | return VectorCost - ScalarCost; | |||
8571 | } | |||
8572 | ||||
8573 | /// Emit a horizontal reduction of the vectorized value. | |||
8574 | Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, | |||
8575 | unsigned ReduxWidth, const TargetTransformInfo *TTI) { | |||
8576 | assert(VectorizedValue && "Need to have a vectorized tree node")(static_cast <bool> (VectorizedValue && "Need to have a vectorized tree node" ) ? void (0) : __assert_fail ("VectorizedValue && \"Need to have a vectorized tree node\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8576, __extension__ __PRETTY_FUNCTION__)); | |||
8577 | assert(isPowerOf2_32(ReduxWidth) &&(static_cast <bool> (isPowerOf2_32(ReduxWidth) && "We only handle power-of-two reductions for now") ? void (0) : __assert_fail ("isPowerOf2_32(ReduxWidth) && \"We only handle power-of-two reductions for now\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8578, __extension__ __PRETTY_FUNCTION__)) | |||
8578 | "We only handle power-of-two reductions for now")(static_cast <bool> (isPowerOf2_32(ReduxWidth) && "We only handle power-of-two reductions for now") ? void (0) : __assert_fail ("isPowerOf2_32(ReduxWidth) && \"We only handle power-of-two reductions for now\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8578, __extension__ __PRETTY_FUNCTION__)); | |||
8579 | ||||
8580 | return createSimpleTargetReduction(Builder, TTI, VectorizedValue, RdxKind, | |||
8581 | ReductionOps.back()); | |||
8582 | } | |||
8583 | }; | |||
8584 | ||||
8585 | } // end anonymous namespace | |||
8586 | ||||
8587 | static Optional<unsigned> getAggregateSize(Instruction *InsertInst) { | |||
8588 | if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) | |||
8589 | return cast<FixedVectorType>(IE->getType())->getNumElements(); | |||
8590 | ||||
8591 | unsigned AggregateSize = 1; | |||
8592 | auto *IV = cast<InsertValueInst>(InsertInst); | |||
8593 | Type *CurrentType = IV->getType(); | |||
8594 | do { | |||
8595 | if (auto *ST = dyn_cast<StructType>(CurrentType)) { | |||
8596 | for (auto *Elt : ST->elements()) | |||
8597 | if (Elt != ST->getElementType(0)) // check homogeneity | |||
8598 | return None; | |||
8599 | AggregateSize *= ST->getNumElements(); | |||
8600 | CurrentType = ST->getElementType(0); | |||
8601 | } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { | |||
8602 | AggregateSize *= AT->getNumElements(); | |||
8603 | CurrentType = AT->getElementType(); | |||
8604 | } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) { | |||
8605 | AggregateSize *= VT->getNumElements(); | |||
8606 | return AggregateSize; | |||
8607 | } else if (CurrentType->isSingleValueType()) { | |||
8608 | return AggregateSize; | |||
8609 | } else { | |||
8610 | return None; | |||
8611 | } | |||
8612 | } while (true); | |||
8613 | } | |||
8614 | ||||
8615 | static bool findBuildAggregate_rec(Instruction *LastInsertInst, | |||
8616 | TargetTransformInfo *TTI, | |||
8617 | SmallVectorImpl<Value *> &BuildVectorOpds, | |||
8618 | SmallVectorImpl<Value *> &InsertElts, | |||
8619 | unsigned OperandOffset) { | |||
8620 | do { | |||
8621 | Value *InsertedOperand = LastInsertInst->getOperand(1); | |||
8622 | Optional<int> OperandIndex = getInsertIndex(LastInsertInst, OperandOffset); | |||
8623 | if (!OperandIndex) | |||
8624 | return false; | |||
8625 | if (isa<InsertElementInst>(InsertedOperand) || | |||
8626 | isa<InsertValueInst>(InsertedOperand)) { | |||
8627 | if (!findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI, | |||
8628 | BuildVectorOpds, InsertElts, *OperandIndex)) | |||
8629 | return false; | |||
8630 | } else { | |||
8631 | BuildVectorOpds[*OperandIndex] = InsertedOperand; | |||
8632 | InsertElts[*OperandIndex] = LastInsertInst; | |||
8633 | } | |||
8634 | LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0)); | |||
8635 | } while (LastInsertInst != nullptr && | |||
8636 | (isa<InsertValueInst>(LastInsertInst) || | |||
8637 | isa<InsertElementInst>(LastInsertInst)) && | |||
8638 | LastInsertInst->hasOneUse()); | |||
8639 | return true; | |||
8640 | } | |||
8641 | ||||
8642 | /// Recognize construction of vectors like | |||
8643 | /// %ra = insertelement <4 x float> poison, float %s0, i32 0 | |||
8644 | /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 | |||
8645 | /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 | |||
8646 | /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 | |||
8647 | /// starting from the last insertelement or insertvalue instruction. | |||
8648 | /// | |||
8649 | /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>}, | |||
8650 | /// {{float, float}, {float, float}}, [2 x {float, float}] and so on. | |||
8651 | /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples. | |||
8652 | /// | |||
8653 | /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type. | |||
8654 | /// | |||
8655 | /// \return true if it matches. | |||
8656 | static bool findBuildAggregate(Instruction *LastInsertInst, | |||
8657 | TargetTransformInfo *TTI, | |||
8658 | SmallVectorImpl<Value *> &BuildVectorOpds, | |||
8659 | SmallVectorImpl<Value *> &InsertElts) { | |||
8660 | ||||
8661 | assert((isa<InsertElementInst>(LastInsertInst) ||(static_cast <bool> ((isa<InsertElementInst>(LastInsertInst ) || isa<InsertValueInst>(LastInsertInst)) && "Expected insertelement or insertvalue instruction!" ) ? void (0) : __assert_fail ("(isa<InsertElementInst>(LastInsertInst) || isa<InsertValueInst>(LastInsertInst)) && \"Expected insertelement or insertvalue instruction!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8663, __extension__ __PRETTY_FUNCTION__)) | |||
8662 | isa<InsertValueInst>(LastInsertInst)) &&(static_cast <bool> ((isa<InsertElementInst>(LastInsertInst ) || isa<InsertValueInst>(LastInsertInst)) && "Expected insertelement or insertvalue instruction!" ) ? void (0) : __assert_fail ("(isa<InsertElementInst>(LastInsertInst) || isa<InsertValueInst>(LastInsertInst)) && \"Expected insertelement or insertvalue instruction!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8663, __extension__ __PRETTY_FUNCTION__)) | |||
8663 | "Expected insertelement or insertvalue instruction!")(static_cast <bool> ((isa<InsertElementInst>(LastInsertInst ) || isa<InsertValueInst>(LastInsertInst)) && "Expected insertelement or insertvalue instruction!" ) ? void (0) : __assert_fail ("(isa<InsertElementInst>(LastInsertInst) || isa<InsertValueInst>(LastInsertInst)) && \"Expected insertelement or insertvalue instruction!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8663, __extension__ __PRETTY_FUNCTION__)); | |||
8664 | ||||
8665 | assert((BuildVectorOpds.empty() && InsertElts.empty()) &&(static_cast <bool> ((BuildVectorOpds.empty() && InsertElts.empty()) && "Expected empty result vectors!" ) ? void (0) : __assert_fail ("(BuildVectorOpds.empty() && InsertElts.empty()) && \"Expected empty result vectors!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8666, __extension__ __PRETTY_FUNCTION__)) | |||
8666 | "Expected empty result vectors!")(static_cast <bool> ((BuildVectorOpds.empty() && InsertElts.empty()) && "Expected empty result vectors!" ) ? void (0) : __assert_fail ("(BuildVectorOpds.empty() && InsertElts.empty()) && \"Expected empty result vectors!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8666, __extension__ __PRETTY_FUNCTION__)); | |||
8667 | ||||
8668 | Optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst); | |||
8669 | if (!AggregateSize) | |||
8670 | return false; | |||
8671 | BuildVectorOpds.resize(*AggregateSize); | |||
8672 | InsertElts.resize(*AggregateSize); | |||
8673 | ||||
8674 | if (findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts, | |||
8675 | 0)) { | |||
8676 | llvm::erase_value(BuildVectorOpds, nullptr); | |||
8677 | llvm::erase_value(InsertElts, nullptr); | |||
8678 | if (BuildVectorOpds.size() >= 2) | |||
8679 | return true; | |||
8680 | } | |||
8681 | ||||
8682 | return false; | |||
8683 | } | |||
8684 | ||||
8685 | /// Try and get a reduction value from a phi node. | |||
8686 | /// | |||
8687 | /// Given a phi node \p P in a block \p ParentBB, consider possible reductions | |||
8688 | /// if they come from either \p ParentBB or a containing loop latch. | |||
8689 | /// | |||
8690 | /// \returns A candidate reduction value if possible, or \code nullptr \endcode | |||
8691 | /// if not possible. | |||
8692 | static Value *getReductionValue(const DominatorTree *DT, PHINode *P, | |||
8693 | BasicBlock *ParentBB, LoopInfo *LI) { | |||
8694 | // There are situations where the reduction value is not dominated by the | |||
8695 | // reduction phi. Vectorizing such cases has been reported to cause | |||
8696 | // miscompiles. See PR25787. | |||
8697 | auto DominatedReduxValue = [&](Value *R) { | |||
8698 | return isa<Instruction>(R) && | |||
8699 | DT->dominates(P->getParent(), cast<Instruction>(R)->getParent()); | |||
8700 | }; | |||
8701 | ||||
8702 | Value *Rdx = nullptr; | |||
8703 | ||||
8704 | // Return the incoming value if it comes from the same BB as the phi node. | |||
8705 | if (P->getIncomingBlock(0) == ParentBB) { | |||
8706 | Rdx = P->getIncomingValue(0); | |||
8707 | } else if (P->getIncomingBlock(1) == ParentBB) { | |||
8708 | Rdx = P->getIncomingValue(1); | |||
8709 | } | |||
8710 | ||||
8711 | if (Rdx && DominatedReduxValue(Rdx)) | |||
8712 | return Rdx; | |||
8713 | ||||
8714 | // Otherwise, check whether we have a loop latch to look at. | |||
8715 | Loop *BBL = LI->getLoopFor(ParentBB); | |||
8716 | if (!BBL) | |||
8717 | return nullptr; | |||
8718 | BasicBlock *BBLatch = BBL->getLoopLatch(); | |||
8719 | if (!BBLatch) | |||
8720 | return nullptr; | |||
8721 | ||||
8722 | // There is a loop latch, return the incoming value if it comes from | |||
8723 | // that. This reduction pattern occasionally turns up. | |||
8724 | if (P->getIncomingBlock(0) == BBLatch) { | |||
8725 | Rdx = P->getIncomingValue(0); | |||
8726 | } else if (P->getIncomingBlock(1) == BBLatch) { | |||
8727 | Rdx = P->getIncomingValue(1); | |||
8728 | } | |||
8729 | ||||
8730 | if (Rdx && DominatedReduxValue(Rdx)) | |||
8731 | return Rdx; | |||
8732 | ||||
8733 | return nullptr; | |||
8734 | } | |||
8735 | ||||
8736 | static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) { | |||
8737 | if (match(I, m_BinOp(m_Value(V0), m_Value(V1)))) | |||
8738 | return true; | |||
8739 | if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1)))) | |||
8740 | return true; | |||
8741 | if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1)))) | |||
8742 | return true; | |||
8743 | if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1)))) | |||
8744 | return true; | |||
8745 | if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1)))) | |||
8746 | return true; | |||
8747 | if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1)))) | |||
8748 | return true; | |||
8749 | if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1)))) | |||
8750 | return true; | |||
8751 | return false; | |||
8752 | } | |||
8753 | ||||
8754 | /// Attempt to reduce a horizontal reduction. | |||
8755 | /// If it is legal to match a horizontal reduction feeding the phi node \a P | |||
8756 | /// with reduction operators \a Root (or one of its operands) in a basic block | |||
8757 | /// \a BB, then check if it can be done. If horizontal reduction is not found | |||
8758 | /// and root instruction is a binary operation, vectorization of the operands is | |||
8759 | /// attempted. | |||
8760 | /// \returns true if a horizontal reduction was matched and reduced or operands | |||
8761 | /// of one of the binary instruction were vectorized. | |||
8762 | /// \returns false if a horizontal reduction was not matched (or not possible) | |||
8763 | /// or no vectorization of any binary operation feeding \a Root instruction was | |||
8764 | /// performed. | |||
8765 | static bool tryToVectorizeHorReductionOrInstOperands( | |||
8766 | PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, | |||
8767 | TargetTransformInfo *TTI, | |||
8768 | const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) { | |||
8769 | if (!ShouldVectorizeHor) | |||
8770 | return false; | |||
8771 | ||||
8772 | if (!Root) | |||
8773 | return false; | |||
8774 | ||||
8775 | if (Root->getParent() != BB || isa<PHINode>(Root)) | |||
8776 | return false; | |||
8777 | // Start analysis starting from Root instruction. If horizontal reduction is | |||
8778 | // found, try to vectorize it. If it is not a horizontal reduction or | |||
8779 | // vectorization is not possible or not effective, and currently analyzed | |||
8780 | // instruction is a binary operation, try to vectorize the operands, using | |||
8781 | // pre-order DFS traversal order. If the operands were not vectorized, repeat | |||
8782 | // the same procedure considering each operand as a possible root of the | |||
8783 | // horizontal reduction. | |||
8784 | // Interrupt the process if the Root instruction itself was vectorized or all | |||
8785 | // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized. | |||
8786 | // Skip the analysis of CmpInsts.Compiler implements postanalysis of the | |||
8787 | // CmpInsts so we can skip extra attempts in | |||
8788 | // tryToVectorizeHorReductionOrInstOperands and save compile time. | |||
8789 | SmallVector<std::pair<Instruction *, unsigned>, 8> Stack(1, {Root, 0}); | |||
8790 | SmallPtrSet<Value *, 8> VisitedInstrs; | |||
8791 | bool Res = false; | |||
8792 | while (!Stack.empty()) { | |||
8793 | Instruction *Inst; | |||
8794 | unsigned Level; | |||
8795 | std::tie(Inst, Level) = Stack.pop_back_val(); | |||
8796 | // Do not try to analyze instruction that has already been vectorized. | |||
8797 | // This may happen when we vectorize instruction operands on a previous | |||
8798 | // iteration while stack was populated before that happened. | |||
8799 | if (R.isDeleted(Inst)) | |||
8800 | continue; | |||
8801 | Value *B0, *B1; | |||
8802 | bool IsBinop = matchRdxBop(Inst, B0, B1); | |||
8803 | bool IsSelect = match(Inst, m_Select(m_Value(), m_Value(), m_Value())); | |||
8804 | if (IsBinop || IsSelect) { | |||
8805 | HorizontalReduction HorRdx; | |||
8806 | if (HorRdx.matchAssociativeReduction(P, Inst)) { | |||
8807 | if (HorRdx.tryToReduce(R, TTI)) { | |||
8808 | Res = true; | |||
8809 | // Set P to nullptr to avoid re-analysis of phi node in | |||
8810 | // matchAssociativeReduction function unless this is the root node. | |||
8811 | P = nullptr; | |||
8812 | continue; | |||
8813 | } | |||
8814 | } | |||
8815 | if (P && IsBinop) { | |||
8816 | Inst = dyn_cast<Instruction>(B0); | |||
8817 | if (Inst == P) | |||
8818 | Inst = dyn_cast<Instruction>(B1); | |||
8819 | if (!Inst) { | |||
8820 | // Set P to nullptr to avoid re-analysis of phi node in | |||
8821 | // matchAssociativeReduction function unless this is the root node. | |||
8822 | P = nullptr; | |||
8823 | continue; | |||
8824 | } | |||
8825 | } | |||
8826 | } | |||
8827 | // Set P to nullptr to avoid re-analysis of phi node in | |||
8828 | // matchAssociativeReduction function unless this is the root node. | |||
8829 | P = nullptr; | |||
8830 | // Do not try to vectorize CmpInst operands, this is done separately. | |||
8831 | if (!isa<CmpInst>(Inst) && Vectorize(Inst, R)) { | |||
8832 | Res = true; | |||
8833 | continue; | |||
8834 | } | |||
8835 | ||||
8836 | // Try to vectorize operands. | |||
8837 | // Continue analysis for the instruction from the same basic block only to | |||
8838 | // save compile time. | |||
8839 | if (++Level < RecursionMaxDepth) | |||
8840 | for (auto *Op : Inst->operand_values()) | |||
8841 | if (VisitedInstrs.insert(Op).second) | |||
8842 | if (auto *I = dyn_cast<Instruction>(Op)) | |||
8843 | // Do not try to vectorize CmpInst operands, this is done | |||
8844 | // separately. | |||
8845 | if (!isa<PHINode>(I) && !isa<CmpInst>(I) && !R.isDeleted(I) && | |||
8846 | I->getParent() == BB) | |||
8847 | Stack.emplace_back(I, Level); | |||
8848 | } | |||
8849 | return Res; | |||
8850 | } | |||
8851 | ||||
8852 | bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V, | |||
8853 | BasicBlock *BB, BoUpSLP &R, | |||
8854 | TargetTransformInfo *TTI) { | |||
8855 | auto *I = dyn_cast_or_null<Instruction>(V); | |||
8856 | if (!I) | |||
8857 | return false; | |||
8858 | ||||
8859 | if (!isa<BinaryOperator>(I)) | |||
8860 | P = nullptr; | |||
8861 | // Try to match and vectorize a horizontal reduction. | |||
8862 | auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool { | |||
8863 | return tryToVectorize(I, R); | |||
8864 | }; | |||
8865 | return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI, | |||
8866 | ExtraVectorization); | |||
8867 | } | |||
8868 | ||||
8869 | bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, | |||
8870 | BasicBlock *BB, BoUpSLP &R) { | |||
8871 | const DataLayout &DL = BB->getModule()->getDataLayout(); | |||
8872 | if (!R.canMapToVector(IVI->getType(), DL)) | |||
8873 | return false; | |||
8874 | ||||
8875 | SmallVector<Value *, 16> BuildVectorOpds; | |||
8876 | SmallVector<Value *, 16> BuildVectorInsts; | |||
8877 | if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts)) | |||
8878 | return false; | |||
8879 | ||||
8880 | LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"; } } while (false); | |||
8881 | // Aggregate value is unlikely to be processed in vector register, we need to | |||
8882 | // extract scalars into scalar registers, so NeedExtraction is set true. | |||
8883 | return tryToVectorizeList(BuildVectorOpds, R); | |||
8884 | } | |||
8885 | ||||
8886 | bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, | |||
8887 | BasicBlock *BB, BoUpSLP &R) { | |||
8888 | SmallVector<Value *, 16> BuildVectorInsts; | |||
8889 | SmallVector<Value *, 16> BuildVectorOpds; | |||
8890 | SmallVector<int> Mask; | |||
8891 | if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) || | |||
8892 | (llvm::all_of(BuildVectorOpds, | |||
8893 | [](Value *V) { return isa<ExtractElementInst>(V); }) && | |||
8894 | isShuffle(BuildVectorOpds, Mask))) | |||
8895 | return false; | |||
8896 | ||||
8897 | LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: array mappable to vector: " << *IEI << "\n"; } } while (false); | |||
8898 | return tryToVectorizeList(BuildVectorInsts, R); | |||
8899 | } | |||
8900 | ||||
8901 | bool SLPVectorizerPass::vectorizeSimpleInstructions( | |||
8902 | SmallVectorImpl<Instruction *> &Instructions, BasicBlock *BB, BoUpSLP &R, | |||
8903 | bool AtTerminator) { | |||
8904 | bool OpsChanged = false; | |||
8905 | SmallVector<Instruction *, 4> PostponedCmps; | |||
8906 | for (auto *I : reverse(Instructions)) { | |||
8907 | if (R.isDeleted(I)) | |||
8908 | continue; | |||
8909 | if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) | |||
8910 | OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); | |||
8911 | else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) | |||
8912 | OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); | |||
8913 | else if (isa<CmpInst>(I)) | |||
8914 | PostponedCmps.push_back(I); | |||
8915 | } | |||
8916 | if (AtTerminator) { | |||
8917 | // Try to find reductions first. | |||
8918 | for (Instruction *I : PostponedCmps) { | |||
8919 | if (R.isDeleted(I)) | |||
8920 | continue; | |||
8921 | for (Value *Op : I->operands()) | |||
8922 | OpsChanged |= vectorizeRootInstruction(nullptr, Op, BB, R, TTI); | |||
8923 | } | |||
8924 | // Try to vectorize operands as vector bundles. | |||
8925 | for (Instruction *I : PostponedCmps) { | |||
8926 | if (R.isDeleted(I)) | |||
8927 | continue; | |||
8928 | OpsChanged |= tryToVectorize(I, R); | |||
8929 | } | |||
8930 | Instructions.clear(); | |||
8931 | } else { | |||
8932 | // Insert in reverse order since the PostponedCmps vector was filled in | |||
8933 | // reverse order. | |||
8934 | Instructions.assign(PostponedCmps.rbegin(), PostponedCmps.rend()); | |||
8935 | } | |||
8936 | return OpsChanged; | |||
8937 | } | |||
8938 | ||||
8939 | bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { | |||
8940 | bool Changed = false; | |||
8941 | SmallVector<Value *, 4> Incoming; | |||
8942 | SmallPtrSet<Value *, 16> VisitedInstrs; | |||
8943 | // Maps phi nodes to the non-phi nodes found in the use tree for each phi | |||
8944 | // node. Allows better to identify the chains that can be vectorized in the | |||
8945 | // better way. | |||
8946 | DenseMap<Value *, SmallVector<Value *, 4>> PHIToOpcodes; | |||
8947 | ||||
8948 | bool HaveVectorizedPhiNodes = true; | |||
8949 | while (HaveVectorizedPhiNodes) { | |||
8950 | HaveVectorizedPhiNodes = false; | |||
8951 | ||||
8952 | // Collect the incoming values from the PHIs. | |||
8953 | Incoming.clear(); | |||
8954 | for (Instruction &I : *BB) { | |||
8955 | PHINode *P = dyn_cast<PHINode>(&I); | |||
8956 | if (!P) | |||
8957 | break; | |||
8958 | ||||
8959 | // No need to analyze deleted, vectorized and non-vectorizable | |||
8960 | // instructions. | |||
8961 | if (!VisitedInstrs.count(P) && !R.isDeleted(P) && | |||
8962 | isValidElementType(P->getType())) | |||
8963 | Incoming.push_back(P); | |||
8964 | } | |||
8965 | ||||
8966 | // Find the corresponding non-phi nodes for better matching when trying to | |||
8967 | // build the tree. | |||
8968 | for (Value *V : Incoming) { | |||
8969 | SmallVectorImpl<Value *> &Opcodes = | |||
8970 | PHIToOpcodes.try_emplace(V).first->getSecond(); | |||
8971 | if (!Opcodes.empty()) | |||
8972 | continue; | |||
8973 | SmallVector<Value *, 4> Nodes(1, V); | |||
8974 | SmallPtrSet<Value *, 4> Visited; | |||
8975 | while (!Nodes.empty()) { | |||
8976 | auto *PHI = cast<PHINode>(Nodes.pop_back_val()); | |||
8977 | if (!Visited.insert(PHI).second) | |||
8978 | continue; | |||
8979 | for (Value *V : PHI->incoming_values()) { | |||
8980 | if (auto *PHI1 = dyn_cast<PHINode>((V))) { | |||
8981 | Nodes.push_back(PHI1); | |||
8982 | continue; | |||
8983 | } | |||
8984 | Opcodes.emplace_back(V); | |||
8985 | } | |||
8986 | } | |||
8987 | } | |||
8988 | ||||
8989 | // Sort by type, parent, operands. | |||
8990 | stable_sort(Incoming, [this, &PHIToOpcodes](Value *V1, Value *V2) { | |||
8991 | assert(isValidElementType(V1->getType()) &&(static_cast <bool> (isValidElementType(V1->getType( )) && isValidElementType(V2->getType()) && "Expected vectorizable types only.") ? void (0) : __assert_fail ("isValidElementType(V1->getType()) && isValidElementType(V2->getType()) && \"Expected vectorizable types only.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8993, __extension__ __PRETTY_FUNCTION__)) | |||
8992 | isValidElementType(V2->getType()) &&(static_cast <bool> (isValidElementType(V1->getType( )) && isValidElementType(V2->getType()) && "Expected vectorizable types only.") ? void (0) : __assert_fail ("isValidElementType(V1->getType()) && isValidElementType(V2->getType()) && \"Expected vectorizable types only.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8993, __extension__ __PRETTY_FUNCTION__)) | |||
8993 | "Expected vectorizable types only.")(static_cast <bool> (isValidElementType(V1->getType( )) && isValidElementType(V2->getType()) && "Expected vectorizable types only.") ? void (0) : __assert_fail ("isValidElementType(V1->getType()) && isValidElementType(V2->getType()) && \"Expected vectorizable types only.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 8993, __extension__ __PRETTY_FUNCTION__)); | |||
8994 | // It is fine to compare type IDs here, since we expect only vectorizable | |||
8995 | // types, like ints, floats and pointers, we don't care about other type. | |||
8996 | if (V1->getType()->getTypeID() < V2->getType()->getTypeID()) | |||
8997 | return true; | |||
8998 | if (V1->getType()->getTypeID() > V2->getType()->getTypeID()) | |||
8999 | return false; | |||
9000 | ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; | |||
9001 | ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; | |||
9002 | if (Opcodes1.size() < Opcodes2.size()) | |||
9003 | return true; | |||
9004 | if (Opcodes1.size() > Opcodes2.size()) | |||
9005 | return false; | |||
9006 | for (int I = 0, E = Opcodes1.size(); I < E; ++I) { | |||
9007 | // Undefs are compatible with any other value. | |||
9008 | if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) | |||
9009 | continue; | |||
9010 | if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) | |||
9011 | if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { | |||
9012 | DomTreeNodeBase<BasicBlock> *NodeI1 = DT->getNode(I1->getParent()); | |||
9013 | DomTreeNodeBase<BasicBlock> *NodeI2 = DT->getNode(I2->getParent()); | |||
9014 | if (!NodeI1) | |||
9015 | return NodeI2 != nullptr; | |||
9016 | if (!NodeI2) | |||
9017 | return false; | |||
9018 | assert((NodeI1 == NodeI2) ==(static_cast <bool> ((NodeI1 == NodeI2) == (NodeI1-> getDFSNumIn() == NodeI2->getDFSNumIn()) && "Different nodes should have different DFS numbers" ) ? void (0) : __assert_fail ("(NodeI1 == NodeI2) == (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && \"Different nodes should have different DFS numbers\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 9020, __extension__ __PRETTY_FUNCTION__)) | |||
9019 | (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) &&(static_cast <bool> ((NodeI1 == NodeI2) == (NodeI1-> getDFSNumIn() == NodeI2->getDFSNumIn()) && "Different nodes should have different DFS numbers" ) ? void (0) : __assert_fail ("(NodeI1 == NodeI2) == (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && \"Different nodes should have different DFS numbers\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 9020, __extension__ __PRETTY_FUNCTION__)) | |||
9020 | "Different nodes should have different DFS numbers")(static_cast <bool> ((NodeI1 == NodeI2) == (NodeI1-> getDFSNumIn() == NodeI2->getDFSNumIn()) && "Different nodes should have different DFS numbers" ) ? void (0) : __assert_fail ("(NodeI1 == NodeI2) == (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && \"Different nodes should have different DFS numbers\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 9020, __extension__ __PRETTY_FUNCTION__)); | |||
9021 | if (NodeI1 != NodeI2) | |||
9022 | return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); | |||
9023 | InstructionsState S = getSameOpcode({I1, I2}); | |||
9024 | if (S.getOpcode()) | |||
9025 | continue; | |||
9026 | return I1->getOpcode() < I2->getOpcode(); | |||
9027 | } | |||
9028 | if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) | |||
9029 | continue; | |||
9030 | if (Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID()) | |||
9031 | return true; | |||
9032 | if (Opcodes1[I]->getValueID() > Opcodes2[I]->getValueID()) | |||
9033 | return false; | |||
9034 | } | |||
9035 | return false; | |||
9036 | }); | |||
9037 | ||||
9038 | auto &&AreCompatiblePHIs = [&PHIToOpcodes](Value *V1, Value *V2) { | |||
9039 | if (V1 == V2) | |||
9040 | return true; | |||
9041 | if (V1->getType() != V2->getType()) | |||
9042 | return false; | |||
9043 | ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; | |||
9044 | ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; | |||
9045 | if (Opcodes1.size() != Opcodes2.size()) | |||
9046 | return false; | |||
9047 | for (int I = 0, E = Opcodes1.size(); I < E; ++I) { | |||
9048 | // Undefs are compatible with any other value. | |||
9049 | if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) | |||
9050 | continue; | |||
9051 | if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) | |||
9052 | if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { | |||
9053 | if (I1->getParent() != I2->getParent()) | |||
9054 | return false; | |||
9055 | InstructionsState S = getSameOpcode({I1, I2}); | |||
9056 | if (S.getOpcode()) | |||
9057 | continue; | |||
9058 | return false; | |||
9059 | } | |||
9060 | if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) | |||
9061 | continue; | |||
9062 | if (Opcodes1[I]->getValueID() != Opcodes2[I]->getValueID()) | |||
9063 | return false; | |||
9064 | } | |||
9065 | return true; | |||
9066 | }; | |||
9067 | ||||
9068 | // Try to vectorize elements base on their type. | |||
9069 | SmallVector<Value *, 4> Candidates; | |||
9070 | for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), | |||
9071 | E = Incoming.end(); | |||
9072 | IncIt != E;) { | |||
9073 | ||||
9074 | // Look for the next elements with the same type, parent and operand | |||
9075 | // kinds. | |||
9076 | SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; | |||
9077 | while (SameTypeIt != E && AreCompatiblePHIs(*SameTypeIt, *IncIt)) { | |||
9078 | VisitedInstrs.insert(*SameTypeIt); | |||
9079 | ++SameTypeIt; | |||
9080 | } | |||
9081 | ||||
9082 | // Try to vectorize them. | |||
9083 | unsigned NumElts = (SameTypeIt - IncIt); | |||
9084 | LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at PHIs ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"; } } while (false) | |||
9085 | << NumElts << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"; } } while (false); | |||
9086 | // The order in which the phi nodes appear in the program does not matter. | |||
9087 | // So allow tryToVectorizeList to reorder them if it is beneficial. This | |||
9088 | // is done when there are exactly two elements since tryToVectorizeList | |||
9089 | // asserts that there are only two values when AllowReorder is true. | |||
9090 | if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R)) { | |||
9091 | // Success start over because instructions might have been changed. | |||
9092 | HaveVectorizedPhiNodes = true; | |||
9093 | Changed = true; | |||
9094 | } else if (NumElts < 4 && | |||
9095 | (Candidates.empty() || | |||
9096 | Candidates.front()->getType() == (*IncIt)->getType())) { | |||
9097 | Candidates.append(IncIt, std::next(IncIt, NumElts)); | |||
9098 | } | |||
9099 | // Final attempt to vectorize phis with the same types. | |||
9100 | if (SameTypeIt == E || (*SameTypeIt)->getType() != (*IncIt)->getType()) { | |||
9101 | if (Candidates.size() > 1 && tryToVectorizeList(Candidates, R)) { | |||
9102 | // Success start over because instructions might have been changed. | |||
9103 | HaveVectorizedPhiNodes = true; | |||
9104 | Changed = true; | |||
9105 | } | |||
9106 | Candidates.clear(); | |||
9107 | } | |||
9108 | ||||
9109 | // Start over at the next instruction of a different type (or the end). | |||
9110 | IncIt = SameTypeIt; | |||
9111 | } | |||
9112 | } | |||
9113 | ||||
9114 | VisitedInstrs.clear(); | |||
9115 | ||||
9116 | SmallVector<Instruction *, 8> PostProcessInstructions; | |||
9117 | SmallDenseSet<Instruction *, 4> KeyNodes; | |||
9118 | for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { | |||
9119 | // Skip instructions with scalable type. The num of elements is unknown at | |||
9120 | // compile-time for scalable type. | |||
9121 | if (isa<ScalableVectorType>(it->getType())) | |||
9122 | continue; | |||
9123 | ||||
9124 | // Skip instructions marked for the deletion. | |||
9125 | if (R.isDeleted(&*it)) | |||
9126 | continue; | |||
9127 | // We may go through BB multiple times so skip the one we have checked. | |||
9128 | if (!VisitedInstrs.insert(&*it).second) { | |||
9129 | if (it->use_empty() && KeyNodes.contains(&*it) && | |||
9130 | vectorizeSimpleInstructions(PostProcessInstructions, BB, R, | |||
9131 | it->isTerminator())) { | |||
9132 | // We would like to start over since some instructions are deleted | |||
9133 | // and the iterator may become invalid value. | |||
9134 | Changed = true; | |||
9135 | it = BB->begin(); | |||
9136 | e = BB->end(); | |||
9137 | } | |||
9138 | continue; | |||
9139 | } | |||
9140 | ||||
9141 | if (isa<DbgInfoIntrinsic>(it)) | |||
9142 | continue; | |||
9143 | ||||
9144 | // Try to vectorize reductions that use PHINodes. | |||
9145 | if (PHINode *P = dyn_cast<PHINode>(it)) { | |||
9146 | // Check that the PHI is a reduction PHI. | |||
9147 | if (P->getNumIncomingValues() == 2) { | |||
9148 | // Try to match and vectorize a horizontal reduction. | |||
9149 | if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R, | |||
9150 | TTI)) { | |||
9151 | Changed = true; | |||
9152 | it = BB->begin(); | |||
9153 | e = BB->end(); | |||
9154 | continue; | |||
9155 | } | |||
9156 | } | |||
9157 | // Try to vectorize the incoming values of the PHI, to catch reductions | |||
9158 | // that feed into PHIs. | |||
9159 | for (unsigned I = 0, E = P->getNumIncomingValues(); I != E; I++) { | |||
9160 | // Skip if the incoming block is the current BB for now. Also, bypass | |||
9161 | // unreachable IR for efficiency and to avoid crashing. | |||
9162 | // TODO: Collect the skipped incoming values and try to vectorize them | |||
9163 | // after processing BB. | |||
9164 | if (BB == P->getIncomingBlock(I) || | |||
9165 | !DT->isReachableFromEntry(P->getIncomingBlock(I))) | |||
9166 | continue; | |||
9167 | ||||
9168 | Changed |= vectorizeRootInstruction(nullptr, P->getIncomingValue(I), | |||
9169 | P->getIncomingBlock(I), R, TTI); | |||
9170 | } | |||
9171 | continue; | |||
9172 | } | |||
9173 | ||||
9174 | // Ran into an instruction without users, like terminator, or function call | |||
9175 | // with ignored return value, store. Ignore unused instructions (basing on | |||
9176 | // instruction type, except for CallInst and InvokeInst). | |||
9177 | if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) || | |||
9178 | isa<InvokeInst>(it))) { | |||
9179 | KeyNodes.insert(&*it); | |||
9180 | bool OpsChanged = false; | |||
9181 | if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) { | |||
9182 | for (auto *V : it->operand_values()) { | |||
9183 | // Try to match and vectorize a horizontal reduction. | |||
9184 | OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI); | |||
9185 | } | |||
9186 | } | |||
9187 | // Start vectorization of post-process list of instructions from the | |||
9188 | // top-tree instructions to try to vectorize as many instructions as | |||
9189 | // possible. | |||
9190 | OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R, | |||
9191 | it->isTerminator()); | |||
9192 | if (OpsChanged) { | |||
9193 | // We would like to start over since some instructions are deleted | |||
9194 | // and the iterator may become invalid value. | |||
9195 | Changed = true; | |||
9196 | it = BB->begin(); | |||
9197 | e = BB->end(); | |||
9198 | continue; | |||
9199 | } | |||
9200 | } | |||
9201 | ||||
9202 | if (isa<InsertElementInst>(it) || isa<CmpInst>(it) || | |||
9203 | isa<InsertValueInst>(it)) | |||
9204 | PostProcessInstructions.push_back(&*it); | |||
9205 | } | |||
9206 | ||||
9207 | return Changed; | |||
9208 | } | |||
9209 | ||||
9210 | bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { | |||
9211 | auto Changed = false; | |||
9212 | for (auto &Entry : GEPs) { | |||
9213 | // If the getelementptr list has fewer than two elements, there's nothing | |||
9214 | // to do. | |||
9215 | if (Entry.second.size() < 2) | |||
9216 | continue; | |||
9217 | ||||
9218 | LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing a getelementptr list of length " << Entry.second.size() << ".\n"; } } while (false ) | |||
9219 | << Entry.second.size() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing a getelementptr list of length " << Entry.second.size() << ".\n"; } } while (false ); | |||
9220 | ||||
9221 | // Process the GEP list in chunks suitable for the target's supported | |||
9222 | // vector size. If a vector register can't hold 1 element, we are done. We | |||
9223 | // are trying to vectorize the index computations, so the maximum number of | |||
9224 | // elements is based on the size of the index expression, rather than the | |||
9225 | // size of the GEP itself (the target's pointer size). | |||
9226 | unsigned MaxVecRegSize = R.getMaxVecRegSize(); | |||
9227 | unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin()); | |||
9228 | if (MaxVecRegSize < EltSize) | |||
9229 | continue; | |||
9230 | ||||
9231 | unsigned MaxElts = MaxVecRegSize / EltSize; | |||
9232 | for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) { | |||
9233 | auto Len = std::min<unsigned>(BE - BI, MaxElts); | |||
9234 | ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len); | |||
9235 | ||||
9236 | // Initialize a set a candidate getelementptrs. Note that we use a | |||
9237 | // SetVector here to preserve program order. If the index computations | |||
9238 | // are vectorizable and begin with loads, we want to minimize the chance | |||
9239 | // of having to reorder them later. | |||
9240 | SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); | |||
9241 | ||||
9242 | // Some of the candidates may have already been vectorized after we | |||
9243 | // initially collected them. If so, they are marked as deleted, so remove | |||
9244 | // them from the set of candidates. | |||
9245 | Candidates.remove_if( | |||
9246 | [&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); }); | |||
9247 | ||||
9248 | // Remove from the set of candidates all pairs of getelementptrs with | |||
9249 | // constant differences. Such getelementptrs are likely not good | |||
9250 | // candidates for vectorization in a bottom-up phase since one can be | |||
9251 | // computed from the other. We also ensure all candidate getelementptr | |||
9252 | // indices are unique. | |||
9253 | for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { | |||
9254 | auto *GEPI = GEPList[I]; | |||
9255 | if (!Candidates.count(GEPI)) | |||
9256 | continue; | |||
9257 | auto *SCEVI = SE->getSCEV(GEPList[I]); | |||
9258 | for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { | |||
9259 | auto *GEPJ = GEPList[J]; | |||
9260 | auto *SCEVJ = SE->getSCEV(GEPList[J]); | |||
9261 | if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { | |||
9262 | Candidates.remove(GEPI); | |||
9263 | Candidates.remove(GEPJ); | |||
9264 | } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { | |||
9265 | Candidates.remove(GEPJ); | |||
9266 | } | |||
9267 | } | |||
9268 | } | |||
9269 | ||||
9270 | // We break out of the above computation as soon as we know there are | |||
9271 | // fewer than two candidates remaining. | |||
9272 | if (Candidates.size() < 2) | |||
9273 | continue; | |||
9274 | ||||
9275 | // Add the single, non-constant index of each candidate to the bundle. We | |||
9276 | // ensured the indices met these constraints when we originally collected | |||
9277 | // the getelementptrs. | |||
9278 | SmallVector<Value *, 16> Bundle(Candidates.size()); | |||
9279 | auto BundleIndex = 0u; | |||
9280 | for (auto *V : Candidates) { | |||
9281 | auto *GEP = cast<GetElementPtrInst>(V); | |||
9282 | auto *GEPIdx = GEP->idx_begin()->get(); | |||
9283 | assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx))(static_cast <bool> (GEP->getNumIndices() == 1 || !isa <Constant>(GEPIdx)) ? void (0) : __assert_fail ("GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 9283, __extension__ __PRETTY_FUNCTION__)); | |||
9284 | Bundle[BundleIndex++] = GEPIdx; | |||
9285 | } | |||
9286 | ||||
9287 | // Try and vectorize the indices. We are currently only interested in | |||
9288 | // gather-like cases of the form: | |||
9289 | // | |||
9290 | // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... | |||
9291 | // | |||
9292 | // where the loads of "a", the loads of "b", and the subtractions can be | |||
9293 | // performed in parallel. It's likely that detecting this pattern in a | |||
9294 | // bottom-up phase will be simpler and less costly than building a | |||
9295 | // full-blown top-down phase beginning at the consecutive loads. | |||
9296 | Changed |= tryToVectorizeList(Bundle, R); | |||
9297 | } | |||
9298 | } | |||
9299 | return Changed; | |||
9300 | } | |||
9301 | ||||
9302 | bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { | |||
9303 | bool Changed = false; | |||
9304 | // Sort by type, base pointers and values operand. Value operands must be | |||
9305 | // compatible (have the same opcode, same parent), otherwise it is | |||
9306 | // definitely not profitable to try to vectorize them. | |||
9307 | auto &&StoreSorter = [this](StoreInst *V, StoreInst *V2) { | |||
9308 | if (V->getPointerOperandType()->getTypeID() < | |||
9309 | V2->getPointerOperandType()->getTypeID()) | |||
9310 | return true; | |||
9311 | if (V->getPointerOperandType()->getTypeID() > | |||
9312 | V2->getPointerOperandType()->getTypeID()) | |||
9313 | return false; | |||
9314 | // UndefValues are compatible with all other values. | |||
9315 | if (isa<UndefValue>(V->getValueOperand()) || | |||
9316 | isa<UndefValue>(V2->getValueOperand())) | |||
9317 | return false; | |||
9318 | if (auto *I1 = dyn_cast<Instruction>(V->getValueOperand())) | |||
9319 | if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { | |||
9320 | DomTreeNodeBase<llvm::BasicBlock> *NodeI1 = | |||
9321 | DT->getNode(I1->getParent()); | |||
9322 | DomTreeNodeBase<llvm::BasicBlock> *NodeI2 = | |||
9323 | DT->getNode(I2->getParent()); | |||
9324 | assert(NodeI1 && "Should only process reachable instructions")(static_cast <bool> (NodeI1 && "Should only process reachable instructions" ) ? void (0) : __assert_fail ("NodeI1 && \"Should only process reachable instructions\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 9324, __extension__ __PRETTY_FUNCTION__)); | |||
9325 | assert(NodeI1 && "Should only process reachable instructions")(static_cast <bool> (NodeI1 && "Should only process reachable instructions" ) ? void (0) : __assert_fail ("NodeI1 && \"Should only process reachable instructions\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 9325, __extension__ __PRETTY_FUNCTION__)); | |||
9326 | assert((NodeI1 == NodeI2) ==(static_cast <bool> ((NodeI1 == NodeI2) == (NodeI1-> getDFSNumIn() == NodeI2->getDFSNumIn()) && "Different nodes should have different DFS numbers" ) ? void (0) : __assert_fail ("(NodeI1 == NodeI2) == (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && \"Different nodes should have different DFS numbers\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 9328, __extension__ __PRETTY_FUNCTION__)) | |||
9327 | (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) &&(static_cast <bool> ((NodeI1 == NodeI2) == (NodeI1-> getDFSNumIn() == NodeI2->getDFSNumIn()) && "Different nodes should have different DFS numbers" ) ? void (0) : __assert_fail ("(NodeI1 == NodeI2) == (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && \"Different nodes should have different DFS numbers\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 9328, __extension__ __PRETTY_FUNCTION__)) | |||
9328 | "Different nodes should have different DFS numbers")(static_cast <bool> ((NodeI1 == NodeI2) == (NodeI1-> getDFSNumIn() == NodeI2->getDFSNumIn()) && "Different nodes should have different DFS numbers" ) ? void (0) : __assert_fail ("(NodeI1 == NodeI2) == (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && \"Different nodes should have different DFS numbers\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp" , 9328, __extension__ __PRETTY_FUNCTION__)); | |||
9329 | if (NodeI1 != NodeI2) | |||
9330 | return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); | |||
9331 | InstructionsState S = getSameOpcode({I1, I2}); | |||
9332 | if (S.getOpcode()) | |||
9333 | return false; | |||
9334 | return I1->getOpcode() < I2->getOpcode(); | |||
9335 | } | |||
9336 | if (isa<Constant>(V->getValueOperand()) && | |||
9337 | isa<Constant>(V2->getValueOperand())) | |||
9338 | return false; | |||
9339 | return V->getValueOperand()->getValueID() < | |||
9340 | V2->getValueOperand()->getValueID(); | |||
9341 | }; | |||
9342 | ||||
9343 | auto &&AreCompatibleStores = [](StoreInst *V1, StoreInst *V2) { | |||
9344 | if (V1 == V2) | |||
9345 | return true; | |||
9346 | if (V1->getPointerOperandType() != V2->getPointerOperandType()) | |||
9347 | return false; | |||
9348 | // Undefs are compatible with any other value. | |||
9349 | if (isa<UndefValue>(V1->getValueOperand()) || | |||
9350 | isa<UndefValue>(V2->getValueOperand())) | |||
9351 | return true; | |||
9352 | if (auto *I1 = dyn_cast<Instruction>(V1->getValueOperand())) | |||
9353 | if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { | |||
9354 | if (I1->getParent() != I2->getParent()) | |||
9355 | return false; | |||
9356 | InstructionsState S = getSameOpcode({I1, I2}); | |||
9357 | return S.getOpcode() > 0; | |||
9358 | } | |||
9359 | if (isa<Constant>(V1->getValueOperand()) && | |||
9360 | isa<Constant>(V2->getValueOperand())) | |||
9361 | return true; | |||
9362 | return V1->getValueOperand()->getValueID() == | |||
9363 | V2->getValueOperand()->getValueID(); | |||
9364 | }; | |||
9365 | ||||
9366 | // Attempt to sort and vectorize each of the store-groups. | |||
9367 | for (auto &Pair : Stores) { | |||
9368 | if (Pair.second.size() < 2) | |||
9369 | continue; | |||
9370 | ||||
9371 | LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing a store chain of length " << Pair.second.size() << ".\n"; } } while (false ) | |||
9372 | << Pair.second.size() << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Analyzing a store chain of length " << Pair.second.size() << ".\n"; } } while (false ); | |||
9373 | ||||
9374 | stable_sort(Pair.second, StoreSorter); | |||
9375 | ||||
9376 | // Try to vectorize elements based on their compatibility. | |||
9377 | for (ArrayRef<StoreInst *>::iterator IncIt = Pair.second.begin(), | |||
9378 | E = Pair.second.end(); | |||
9379 | IncIt != E;) { | |||
9380 | ||||
9381 | // Look for the next elements with the same type. | |||
9382 | ArrayRef<StoreInst *>::iterator SameTypeIt = IncIt; | |||
9383 | Type *EltTy = (*IncIt)->getPointerOperand()->getType(); | |||
9384 | ||||
9385 | while (SameTypeIt != E && AreCompatibleStores(*SameTypeIt, *IncIt)) | |||
9386 | ++SameTypeIt; | |||
9387 | ||||
9388 | // Try to vectorize them. | |||
9389 | unsigned NumElts = (SameTypeIt - IncIt); | |||
9390 | LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at stores ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Trying to vectorize starting at stores (" << NumElts << ")\n"; } } while (false) | |||
9391 | << NumElts << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("SLP")) { dbgs() << "SLP: Trying to vectorize starting at stores (" << NumElts << ")\n"; } } while (false); | |||
9392 | if (NumElts > 1 && !EltTy->getPointerElementType()->isVectorTy() && | |||
9393 | vectorizeStores(makeArrayRef(IncIt, NumElts), R)) { | |||
9394 | // Success start over because instructions might have been changed. | |||
9395 | Changed = true; | |||
9396 | } | |||
9397 | ||||
9398 | // Start over at the next instruction of a different type (or the end). | |||
9399 | IncIt = SameTypeIt; | |||
9400 | } | |||
9401 | } | |||
9402 | return Changed; | |||
9403 | } | |||
9404 | ||||
9405 | char SLPVectorizer::ID = 0; | |||
9406 | ||||
9407 | static const char lv_name[] = "SLP Vectorizer"; | |||
9408 | ||||
9409 | INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false)static void *initializeSLPVectorizerPassOnce(PassRegistry & Registry) { | |||
9410 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry); | |||
9411 | INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry); | |||
9412 | INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry); | |||
9413 | INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)initializeScalarEvolutionWrapperPassPass(Registry); | |||
9414 | INITIALIZE_PASS_DEPENDENCY(LoopSimplify)initializeLoopSimplifyPass(Registry); | |||
9415 | INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)initializeDemandedBitsWrapperPassPass(Registry); | |||
9416 | INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)initializeOptimizationRemarkEmitterWrapperPassPass(Registry); | |||
9417 | INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)initializeInjectTLIMappingsLegacyPass(Registry); | |||
9418 | INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false)PassInfo *PI = new PassInfo( lv_name, "slp-vectorizer", & SLPVectorizer::ID, PassInfo::NormalCtor_t(callDefaultCtor< SLPVectorizer>), false, false); Registry.registerPass(*PI, true); return PI; } static llvm::once_flag InitializeSLPVectorizerPassFlag ; void llvm::initializeSLPVectorizerPass(PassRegistry &Registry ) { llvm::call_once(InitializeSLPVectorizerPassFlag, initializeSLPVectorizerPassOnce , std::ref(Registry)); } | |||
9419 | ||||
9420 | Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); } |