Bug Summary

File:lib/CodeGen/SelectionDAG/DAGCombiner.cpp
Warning:line 14115, column 14
3rd function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name DAGCombiner.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/lib/CodeGen/SelectionDAG -I /build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG -I /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn338205/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/x86_64-linux-gnu/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/x86_64-linux-gnu/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/lib/gcc/x86_64-linux-gnu/8/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/lib/CodeGen/SelectionDAG -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-07-29-043837-17923-1 -x c++ /build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp -faddrsig
1//===- DAGCombiner.cpp - Implement a DAG node combiner --------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass combines dag nodes to form fewer, simpler DAG nodes. It can be run
11// both before and after the DAG is legalized.
12//
13// This pass is not a substitute for the LLVM IR instcombine pass. This pass is
14// primarily intended to handle simplification opportunities that are implicit
15// in the LLVM IR and exposed by the various codegen lowering phases.
16//
17//===----------------------------------------------------------------------===//
18
19#include "llvm/ADT/APFloat.h"
20#include "llvm/ADT/APInt.h"
21#include "llvm/ADT/ArrayRef.h"
22#include "llvm/ADT/DenseMap.h"
23#include "llvm/ADT/None.h"
24#include "llvm/ADT/Optional.h"
25#include "llvm/ADT/STLExtras.h"
26#include "llvm/ADT/SetVector.h"
27#include "llvm/ADT/SmallBitVector.h"
28#include "llvm/ADT/SmallPtrSet.h"
29#include "llvm/ADT/SmallSet.h"
30#include "llvm/ADT/SmallVector.h"
31#include "llvm/ADT/Statistic.h"
32#include "llvm/Analysis/AliasAnalysis.h"
33#include "llvm/Analysis/MemoryLocation.h"
34#include "llvm/CodeGen/DAGCombine.h"
35#include "llvm/CodeGen/ISDOpcodes.h"
36#include "llvm/CodeGen/MachineFrameInfo.h"
37#include "llvm/CodeGen/MachineFunction.h"
38#include "llvm/CodeGen/MachineMemOperand.h"
39#include "llvm/CodeGen/RuntimeLibcalls.h"
40#include "llvm/CodeGen/SelectionDAG.h"
41#include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
42#include "llvm/CodeGen/SelectionDAGNodes.h"
43#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
44#include "llvm/CodeGen/TargetLowering.h"
45#include "llvm/CodeGen/TargetRegisterInfo.h"
46#include "llvm/CodeGen/TargetSubtargetInfo.h"
47#include "llvm/CodeGen/ValueTypes.h"
48#include "llvm/IR/Attributes.h"
49#include "llvm/IR/Constant.h"
50#include "llvm/IR/DataLayout.h"
51#include "llvm/IR/DerivedTypes.h"
52#include "llvm/IR/Function.h"
53#include "llvm/IR/LLVMContext.h"
54#include "llvm/IR/Metadata.h"
55#include "llvm/Support/Casting.h"
56#include "llvm/Support/CodeGen.h"
57#include "llvm/Support/CommandLine.h"
58#include "llvm/Support/Compiler.h"
59#include "llvm/Support/Debug.h"
60#include "llvm/Support/ErrorHandling.h"
61#include "llvm/Support/KnownBits.h"
62#include "llvm/Support/MachineValueType.h"
63#include "llvm/Support/MathExtras.h"
64#include "llvm/Support/raw_ostream.h"
65#include "llvm/Target/TargetMachine.h"
66#include "llvm/Target/TargetOptions.h"
67#include <algorithm>
68#include <cassert>
69#include <cstdint>
70#include <functional>
71#include <iterator>
72#include <string>
73#include <tuple>
74#include <utility>
75#include <vector>
76
77using namespace llvm;
78
79#define DEBUG_TYPE"dagcombine" "dagcombine"
80
81STATISTIC(NodesCombined , "Number of dag nodes combined")static llvm::Statistic NodesCombined = {"dagcombine", "NodesCombined"
, "Number of dag nodes combined", {0}, {false}}
;
82STATISTIC(PreIndexedNodes , "Number of pre-indexed nodes created")static llvm::Statistic PreIndexedNodes = {"dagcombine", "PreIndexedNodes"
, "Number of pre-indexed nodes created", {0}, {false}}
;
83STATISTIC(PostIndexedNodes, "Number of post-indexed nodes created")static llvm::Statistic PostIndexedNodes = {"dagcombine", "PostIndexedNodes"
, "Number of post-indexed nodes created", {0}, {false}}
;
84STATISTIC(OpsNarrowed , "Number of load/op/store narrowed")static llvm::Statistic OpsNarrowed = {"dagcombine", "OpsNarrowed"
, "Number of load/op/store narrowed", {0}, {false}}
;
85STATISTIC(LdStFP2Int , "Number of fp load/store pairs transformed to int")static llvm::Statistic LdStFP2Int = {"dagcombine", "LdStFP2Int"
, "Number of fp load/store pairs transformed to int", {0}, {false
}}
;
86STATISTIC(SlicedLoads, "Number of load sliced")static llvm::Statistic SlicedLoads = {"dagcombine", "SlicedLoads"
, "Number of load sliced", {0}, {false}}
;
87
88static cl::opt<bool>
89CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden,
90 cl::desc("Enable DAG combiner's use of IR alias analysis"));
91
92static cl::opt<bool>
93UseTBAA("combiner-use-tbaa", cl::Hidden, cl::init(true),
94 cl::desc("Enable DAG combiner's use of TBAA"));
95
96#ifndef NDEBUG
97static cl::opt<std::string>
98CombinerAAOnlyFunc("combiner-aa-only-func", cl::Hidden,
99 cl::desc("Only use DAG-combiner alias analysis in this"
100 " function"));
101#endif
102
103/// Hidden option to stress test load slicing, i.e., when this option
104/// is enabled, load slicing bypasses most of its profitability guards.
105static cl::opt<bool>
106StressLoadSlicing("combiner-stress-load-slicing", cl::Hidden,
107 cl::desc("Bypass the profitability model of load slicing"),
108 cl::init(false));
109
110static cl::opt<bool>
111 MaySplitLoadIndex("combiner-split-load-index", cl::Hidden, cl::init(true),
112 cl::desc("DAG combiner may split indexing from loads"));
113
114namespace {
115
116 class DAGCombiner {
117 SelectionDAG &DAG;
118 const TargetLowering &TLI;
119 CombineLevel Level;
120 CodeGenOpt::Level OptLevel;
121 bool LegalOperations = false;
122 bool LegalTypes = false;
123 bool ForCodeSize;
124
125 /// Worklist of all of the nodes that need to be simplified.
126 ///
127 /// This must behave as a stack -- new nodes to process are pushed onto the
128 /// back and when processing we pop off of the back.
129 ///
130 /// The worklist will not contain duplicates but may contain null entries
131 /// due to nodes being deleted from the underlying DAG.
132 SmallVector<SDNode *, 64> Worklist;
133
134 /// Mapping from an SDNode to its position on the worklist.
135 ///
136 /// This is used to find and remove nodes from the worklist (by nulling
137 /// them) when they are deleted from the underlying DAG. It relies on
138 /// stable indices of nodes within the worklist.
139 DenseMap<SDNode *, unsigned> WorklistMap;
140
141 /// Set of nodes which have been combined (at least once).
142 ///
143 /// This is used to allow us to reliably add any operands of a DAG node
144 /// which have not yet been combined to the worklist.
145 SmallPtrSet<SDNode *, 32> CombinedNodes;
146
147 // AA - Used for DAG load/store alias analysis.
148 AliasAnalysis *AA;
149
150 /// When an instruction is simplified, add all users of the instruction to
151 /// the work lists because they might get more simplified now.
152 void AddUsersToWorklist(SDNode *N) {
153 for (SDNode *Node : N->uses())
154 AddToWorklist(Node);
155 }
156
157 /// Call the node-specific routine that folds each particular type of node.
158 SDValue visit(SDNode *N);
159
160 public:
161 DAGCombiner(SelectionDAG &D, AliasAnalysis *AA, CodeGenOpt::Level OL)
162 : DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes),
163 OptLevel(OL), AA(AA) {
164 ForCodeSize = DAG.getMachineFunction().getFunction().optForSize();
165
166 MaximumLegalStoreInBits = 0;
167 for (MVT VT : MVT::all_valuetypes())
168 if (EVT(VT).isSimple() && VT != MVT::Other &&
169 TLI.isTypeLegal(EVT(VT)) &&
170 VT.getSizeInBits() >= MaximumLegalStoreInBits)
171 MaximumLegalStoreInBits = VT.getSizeInBits();
172 }
173
174 /// Add to the worklist making sure its instance is at the back (next to be
175 /// processed.)
176 void AddToWorklist(SDNode *N) {
177 assert(N->getOpcode() != ISD::DELETED_NODE &&(static_cast <bool> (N->getOpcode() != ISD::DELETED_NODE
&& "Deleted Node added to Worklist") ? void (0) : __assert_fail
("N->getOpcode() != ISD::DELETED_NODE && \"Deleted Node added to Worklist\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 178, __extension__ __PRETTY_FUNCTION__))
178 "Deleted Node added to Worklist")(static_cast <bool> (N->getOpcode() != ISD::DELETED_NODE
&& "Deleted Node added to Worklist") ? void (0) : __assert_fail
("N->getOpcode() != ISD::DELETED_NODE && \"Deleted Node added to Worklist\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 178, __extension__ __PRETTY_FUNCTION__))
;
179
180 // Skip handle nodes as they can't usefully be combined and confuse the
181 // zero-use deletion strategy.
182 if (N->getOpcode() == ISD::HANDLENODE)
183 return;
184
185 if (WorklistMap.insert(std::make_pair(N, Worklist.size())).second)
186 Worklist.push_back(N);
187 }
188
189 /// Remove all instances of N from the worklist.
190 void removeFromWorklist(SDNode *N) {
191 CombinedNodes.erase(N);
192
193 auto It = WorklistMap.find(N);
194 if (It == WorklistMap.end())
195 return; // Not in the worklist.
196
197 // Null out the entry rather than erasing it to avoid a linear operation.
198 Worklist[It->second] = nullptr;
199 WorklistMap.erase(It);
200 }
201
202 void deleteAndRecombine(SDNode *N);
203 bool recursivelyDeleteUnusedNodes(SDNode *N);
204
205 /// Replaces all uses of the results of one DAG node with new values.
206 SDValue CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
207 bool AddTo = true);
208
209 /// Replaces all uses of the results of one DAG node with new values.
210 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true) {
211 return CombineTo(N, &Res, 1, AddTo);
212 }
213
214 /// Replaces all uses of the results of one DAG node with new values.
215 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1,
216 bool AddTo = true) {
217 SDValue To[] = { Res0, Res1 };
218 return CombineTo(N, To, 2, AddTo);
219 }
220
221 void CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO);
222
223 private:
224 unsigned MaximumLegalStoreInBits;
225
226 /// Check the specified integer node value to see if it can be simplified or
227 /// if things it uses can be simplified by bit propagation.
228 /// If so, return true.
229 bool SimplifyDemandedBits(SDValue Op) {
230 unsigned BitWidth = Op.getScalarValueSizeInBits();
231 APInt Demanded = APInt::getAllOnesValue(BitWidth);
232 return SimplifyDemandedBits(Op, Demanded);
233 }
234
235 /// Check the specified vector node value to see if it can be simplified or
236 /// if things it uses can be simplified as it only uses some of the
237 /// elements. If so, return true.
238 bool SimplifyDemandedVectorElts(SDValue Op) {
239 unsigned NumElts = Op.getValueType().getVectorNumElements();
240 APInt Demanded = APInt::getAllOnesValue(NumElts);
241 return SimplifyDemandedVectorElts(Op, Demanded);
242 }
243
244 bool SimplifyDemandedBits(SDValue Op, const APInt &Demanded);
245 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &Demanded,
246 bool AssumeSingleUse = false);
247
248 bool CombineToPreIndexedLoadStore(SDNode *N);
249 bool CombineToPostIndexedLoadStore(SDNode *N);
250 SDValue SplitIndexingFromLoad(LoadSDNode *LD);
251 bool SliceUpLoad(SDNode *N);
252
253 /// Replace an ISD::EXTRACT_VECTOR_ELT of a load with a narrowed
254 /// load.
255 ///
256 /// \param EVE ISD::EXTRACT_VECTOR_ELT to be replaced.
257 /// \param InVecVT type of the input vector to EVE with bitcasts resolved.
258 /// \param EltNo index of the vector element to load.
259 /// \param OriginalLoad load that EVE came from to be replaced.
260 /// \returns EVE on success SDValue() on failure.
261 SDValue ReplaceExtractVectorEltOfLoadWithNarrowedLoad(
262 SDNode *EVE, EVT InVecVT, SDValue EltNo, LoadSDNode *OriginalLoad);
263 void ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad);
264 SDValue PromoteOperand(SDValue Op, EVT PVT, bool &Replace);
265 SDValue SExtPromoteOperand(SDValue Op, EVT PVT);
266 SDValue ZExtPromoteOperand(SDValue Op, EVT PVT);
267 SDValue PromoteIntBinOp(SDValue Op);
268 SDValue PromoteIntShiftOp(SDValue Op);
269 SDValue PromoteExtend(SDValue Op);
270 bool PromoteLoad(SDValue Op);
271
272 /// Call the node-specific routine that knows how to fold each
273 /// particular type of node. If that doesn't do anything, try the
274 /// target-specific DAG combines.
275 SDValue combine(SDNode *N);
276
277 // Visitation implementation - Implement dag node combining for different
278 // node types. The semantics are as follows:
279 // Return Value:
280 // SDValue.getNode() == 0 - No change was made
281 // SDValue.getNode() == N - N was replaced, is dead and has been handled.
282 // otherwise - N should be replaced by the returned Operand.
283 //
284 SDValue visitTokenFactor(SDNode *N);
285 SDValue visitMERGE_VALUES(SDNode *N);
286 SDValue visitADD(SDNode *N);
287 SDValue visitADDLike(SDValue N0, SDValue N1, SDNode *LocReference);
288 SDValue visitSUB(SDNode *N);
289 SDValue visitADDC(SDNode *N);
290 SDValue visitUADDO(SDNode *N);
291 SDValue visitUADDOLike(SDValue N0, SDValue N1, SDNode *N);
292 SDValue visitSUBC(SDNode *N);
293 SDValue visitUSUBO(SDNode *N);
294 SDValue visitADDE(SDNode *N);
295 SDValue visitADDCARRY(SDNode *N);
296 SDValue visitADDCARRYLike(SDValue N0, SDValue N1, SDValue CarryIn, SDNode *N);
297 SDValue visitSUBE(SDNode *N);
298 SDValue visitSUBCARRY(SDNode *N);
299 SDValue visitMUL(SDNode *N);
300 SDValue useDivRem(SDNode *N);
301 SDValue visitSDIV(SDNode *N);
302 SDValue visitSDIVLike(SDValue N0, SDValue N1, SDNode *N);
303 SDValue visitUDIV(SDNode *N);
304 SDValue visitUDIVLike(SDValue N0, SDValue N1, SDNode *N);
305 SDValue visitREM(SDNode *N);
306 SDValue visitMULHU(SDNode *N);
307 SDValue visitMULHS(SDNode *N);
308 SDValue visitSMUL_LOHI(SDNode *N);
309 SDValue visitUMUL_LOHI(SDNode *N);
310 SDValue visitSMULO(SDNode *N);
311 SDValue visitUMULO(SDNode *N);
312 SDValue visitIMINMAX(SDNode *N);
313 SDValue visitAND(SDNode *N);
314 SDValue visitANDLike(SDValue N0, SDValue N1, SDNode *N);
315 SDValue visitOR(SDNode *N);
316 SDValue visitORLike(SDValue N0, SDValue N1, SDNode *N);
317 SDValue visitXOR(SDNode *N);
318 SDValue SimplifyVBinOp(SDNode *N);
319 SDValue visitSHL(SDNode *N);
320 SDValue visitSRA(SDNode *N);
321 SDValue visitSRL(SDNode *N);
322 SDValue visitRotate(SDNode *N);
323 SDValue visitABS(SDNode *N);
324 SDValue visitBSWAP(SDNode *N);
325 SDValue visitBITREVERSE(SDNode *N);
326 SDValue visitCTLZ(SDNode *N);
327 SDValue visitCTLZ_ZERO_UNDEF(SDNode *N);
328 SDValue visitCTTZ(SDNode *N);
329 SDValue visitCTTZ_ZERO_UNDEF(SDNode *N);
330 SDValue visitCTPOP(SDNode *N);
331 SDValue visitSELECT(SDNode *N);
332 SDValue visitVSELECT(SDNode *N);
333 SDValue visitSELECT_CC(SDNode *N);
334 SDValue visitSETCC(SDNode *N);
335 SDValue visitSETCCCARRY(SDNode *N);
336 SDValue visitSIGN_EXTEND(SDNode *N);
337 SDValue visitZERO_EXTEND(SDNode *N);
338 SDValue visitANY_EXTEND(SDNode *N);
339 SDValue visitAssertExt(SDNode *N);
340 SDValue visitSIGN_EXTEND_INREG(SDNode *N);
341 SDValue visitSIGN_EXTEND_VECTOR_INREG(SDNode *N);
342 SDValue visitZERO_EXTEND_VECTOR_INREG(SDNode *N);
343 SDValue visitTRUNCATE(SDNode *N);
344 SDValue visitBITCAST(SDNode *N);
345 SDValue visitBUILD_PAIR(SDNode *N);
346 SDValue visitFADD(SDNode *N);
347 SDValue visitFSUB(SDNode *N);
348 SDValue visitFMUL(SDNode *N);
349 SDValue visitFMA(SDNode *N);
350 SDValue visitFDIV(SDNode *N);
351 SDValue visitFREM(SDNode *N);
352 SDValue visitFSQRT(SDNode *N);
353 SDValue visitFCOPYSIGN(SDNode *N);
354 SDValue visitSINT_TO_FP(SDNode *N);
355 SDValue visitUINT_TO_FP(SDNode *N);
356 SDValue visitFP_TO_SINT(SDNode *N);
357 SDValue visitFP_TO_UINT(SDNode *N);
358 SDValue visitFP_ROUND(SDNode *N);
359 SDValue visitFP_ROUND_INREG(SDNode *N);
360 SDValue visitFP_EXTEND(SDNode *N);
361 SDValue visitFNEG(SDNode *N);
362 SDValue visitFABS(SDNode *N);
363 SDValue visitFCEIL(SDNode *N);
364 SDValue visitFTRUNC(SDNode *N);
365 SDValue visitFFLOOR(SDNode *N);
366 SDValue visitFMINNUM(SDNode *N);
367 SDValue visitFMAXNUM(SDNode *N);
368 SDValue visitBRCOND(SDNode *N);
369 SDValue visitBR_CC(SDNode *N);
370 SDValue visitLOAD(SDNode *N);
371
372 SDValue replaceStoreChain(StoreSDNode *ST, SDValue BetterChain);
373 SDValue replaceStoreOfFPConstant(StoreSDNode *ST);
374
375 SDValue visitSTORE(SDNode *N);
376 SDValue visitINSERT_VECTOR_ELT(SDNode *N);
377 SDValue visitEXTRACT_VECTOR_ELT(SDNode *N);
378 SDValue visitBUILD_VECTOR(SDNode *N);
379 SDValue visitCONCAT_VECTORS(SDNode *N);
380 SDValue visitEXTRACT_SUBVECTOR(SDNode *N);
381 SDValue visitVECTOR_SHUFFLE(SDNode *N);
382 SDValue visitSCALAR_TO_VECTOR(SDNode *N);
383 SDValue visitINSERT_SUBVECTOR(SDNode *N);
384 SDValue visitMLOAD(SDNode *N);
385 SDValue visitMSTORE(SDNode *N);
386 SDValue visitMGATHER(SDNode *N);
387 SDValue visitMSCATTER(SDNode *N);
388 SDValue visitFP_TO_FP16(SDNode *N);
389 SDValue visitFP16_TO_FP(SDNode *N);
390
391 SDValue visitFADDForFMACombine(SDNode *N);
392 SDValue visitFSUBForFMACombine(SDNode *N);
393 SDValue visitFMULForFMADistributiveCombine(SDNode *N);
394
395 SDValue XformToShuffleWithZero(SDNode *N);
396 SDValue ReassociateOps(unsigned Opc, const SDLoc &DL, SDValue N0,
397 SDValue N1);
398
399 SDValue visitShiftByConstant(SDNode *N, ConstantSDNode *Amt);
400
401 SDValue foldSelectOfConstants(SDNode *N);
402 SDValue foldVSelectOfConstants(SDNode *N);
403 SDValue foldBinOpIntoSelect(SDNode *BO);
404 bool SimplifySelectOps(SDNode *SELECT, SDValue LHS, SDValue RHS);
405 SDValue SimplifyBinOpWithSameOpcodeHands(SDNode *N);
406 SDValue SimplifySelect(const SDLoc &DL, SDValue N0, SDValue N1, SDValue N2);
407 SDValue SimplifySelectCC(const SDLoc &DL, SDValue N0, SDValue N1,
408 SDValue N2, SDValue N3, ISD::CondCode CC,
409 bool NotExtCompare = false);
410 SDValue foldSelectCCToShiftAnd(const SDLoc &DL, SDValue N0, SDValue N1,
411 SDValue N2, SDValue N3, ISD::CondCode CC);
412 SDValue foldLogicOfSetCCs(bool IsAnd, SDValue N0, SDValue N1,
413 const SDLoc &DL);
414 SDValue unfoldMaskedMerge(SDNode *N);
415 SDValue unfoldExtremeBitClearingToShifts(SDNode *N);
416 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
417 const SDLoc &DL, bool foldBooleans);
418 SDValue rebuildSetCC(SDValue N);
419
420 bool isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS,
421 SDValue &CC) const;
422 bool isOneUseSetCC(SDValue N) const;
423
424 SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
425 unsigned HiOp);
426 SDValue CombineConsecutiveLoads(SDNode *N, EVT VT);
427 SDValue CombineExtLoad(SDNode *N);
428 SDValue CombineZExtLogicopShiftLoad(SDNode *N);
429 SDValue combineRepeatedFPDivisors(SDNode *N);
430 SDValue combineInsertEltToShuffle(SDNode *N, unsigned InsIndex);
431 SDValue ConstantFoldBITCASTofBUILD_VECTOR(SDNode *, EVT);
432 SDValue BuildSDIV(SDNode *N);
433 SDValue BuildSDIVPow2(SDNode *N);
434 SDValue BuildUDIV(SDNode *N);
435 SDValue BuildLogBase2(SDValue V, const SDLoc &DL);
436 SDValue BuildReciprocalEstimate(SDValue Op, SDNodeFlags Flags);
437 SDValue buildRsqrtEstimate(SDValue Op, SDNodeFlags Flags);
438 SDValue buildSqrtEstimate(SDValue Op, SDNodeFlags Flags);
439 SDValue buildSqrtEstimateImpl(SDValue Op, SDNodeFlags Flags, bool Recip);
440 SDValue buildSqrtNROneConst(SDValue Arg, SDValue Est, unsigned Iterations,
441 SDNodeFlags Flags, bool Reciprocal);
442 SDValue buildSqrtNRTwoConst(SDValue Arg, SDValue Est, unsigned Iterations,
443 SDNodeFlags Flags, bool Reciprocal);
444 SDValue MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1,
445 bool DemandHighBits = true);
446 SDValue MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1);
447 SDNode *MatchRotatePosNeg(SDValue Shifted, SDValue Pos, SDValue Neg,
448 SDValue InnerPos, SDValue InnerNeg,
449 unsigned PosOpcode, unsigned NegOpcode,
450 const SDLoc &DL);
451 SDNode *MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL);
452 SDValue MatchLoadCombine(SDNode *N);
453 SDValue ReduceLoadWidth(SDNode *N);
454 SDValue ReduceLoadOpStoreWidth(SDNode *N);
455 SDValue splitMergedValStore(StoreSDNode *ST);
456 SDValue TransformFPLoadStorePair(SDNode *N);
457 SDValue convertBuildVecZextToZext(SDNode *N);
458 SDValue reduceBuildVecExtToExtBuildVec(SDNode *N);
459 SDValue reduceBuildVecConvertToConvertBuildVec(SDNode *N);
460 SDValue reduceBuildVecToShuffle(SDNode *N);
461 SDValue createBuildVecShuffle(const SDLoc &DL, SDNode *N,
462 ArrayRef<int> VectorMask, SDValue VecIn1,
463 SDValue VecIn2, unsigned LeftIdx);
464 SDValue matchVSelectOpSizesWithSetCC(SDNode *Cast);
465
466 /// Walk up chain skipping non-aliasing memory nodes,
467 /// looking for aliasing nodes and adding them to the Aliases vector.
468 void GatherAllAliases(SDNode *N, SDValue OriginalChain,
469 SmallVectorImpl<SDValue> &Aliases);
470
471 /// Return true if there is any possibility that the two addresses overlap.
472 bool isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const;
473
474 /// Walk up chain skipping non-aliasing memory nodes, looking for a better
475 /// chain (aliasing node.)
476 SDValue FindBetterChain(SDNode *N, SDValue Chain);
477
478 /// Try to replace a store and any possibly adjacent stores on
479 /// consecutive chains with better chains. Return true only if St is
480 /// replaced.
481 ///
482 /// Notice that other chains may still be replaced even if the function
483 /// returns false.
484 bool findBetterNeighborChains(StoreSDNode *St);
485
486 /// Match "(X shl/srl V1) & V2" where V2 may not be present.
487 bool MatchRotateHalf(SDValue Op, SDValue &Shift, SDValue &Mask);
488
489 /// Holds a pointer to an LSBaseSDNode as well as information on where it
490 /// is located in a sequence of memory operations connected by a chain.
491 struct MemOpLink {
492 // Ptr to the mem node.
493 LSBaseSDNode *MemNode;
494
495 // Offset from the base ptr.
496 int64_t OffsetFromBase;
497
498 MemOpLink(LSBaseSDNode *N, int64_t Offset)
499 : MemNode(N), OffsetFromBase(Offset) {}
500 };
501
502 /// This is a helper function for visitMUL to check the profitability
503 /// of folding (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2).
504 /// MulNode is the original multiply, AddNode is (add x, c1),
505 /// and ConstNode is c2.
506 bool isMulAddWithConstProfitable(SDNode *MulNode,
507 SDValue &AddNode,
508 SDValue &ConstNode);
509
510 /// This is a helper function for visitAND and visitZERO_EXTEND. Returns
511 /// true if the (and (load x) c) pattern matches an extload. ExtVT returns
512 /// the type of the loaded value to be extended.
513 bool isAndLoadExtLoad(ConstantSDNode *AndC, LoadSDNode *LoadN,
514 EVT LoadResultTy, EVT &ExtVT);
515
516 /// Helper function to calculate whether the given Load/Store can have its
517 /// width reduced to ExtVT.
518 bool isLegalNarrowLdSt(LSBaseSDNode *LDSTN, ISD::LoadExtType ExtType,
519 EVT &MemVT, unsigned ShAmt = 0);
520
521 /// Used by BackwardsPropagateMask to find suitable loads.
522 bool SearchForAndLoads(SDNode *N, SmallPtrSetImpl<LoadSDNode*> &Loads,
523 SmallPtrSetImpl<SDNode*> &NodesWithConsts,
524 ConstantSDNode *Mask, SDNode *&NodeToMask);
525 /// Attempt to propagate a given AND node back to load leaves so that they
526 /// can be combined into narrow loads.
527 bool BackwardsPropagateMask(SDNode *N, SelectionDAG &DAG);
528
529 /// Helper function for MergeConsecutiveStores which merges the
530 /// component store chains.
531 SDValue getMergeStoreChains(SmallVectorImpl<MemOpLink> &StoreNodes,
532 unsigned NumStores);
533
534 /// This is a helper function for MergeConsecutiveStores. When the
535 /// source elements of the consecutive stores are all constants or
536 /// all extracted vector elements, try to merge them into one
537 /// larger store introducing bitcasts if necessary. \return True
538 /// if a merged store was created.
539 bool MergeStoresOfConstantsOrVecElts(SmallVectorImpl<MemOpLink> &StoreNodes,
540 EVT MemVT, unsigned NumStores,
541 bool IsConstantSrc, bool UseVector,
542 bool UseTrunc);
543
544 /// This is a helper function for MergeConsecutiveStores. Stores
545 /// that potentially may be merged with St are placed in
546 /// StoreNodes. RootNode is a chain predecessor to all store
547 /// candidates.
548 void getStoreMergeCandidates(StoreSDNode *St,
549 SmallVectorImpl<MemOpLink> &StoreNodes,
550 SDNode *&Root);
551
552 /// Helper function for MergeConsecutiveStores. Checks if
553 /// candidate stores have indirect dependency through their
554 /// operands. RootNode is the predecessor to all stores calculated
555 /// by getStoreMergeCandidates and is used to prune the dependency check.
556 /// \return True if safe to merge.
557 bool checkMergeStoreCandidatesForDependencies(
558 SmallVectorImpl<MemOpLink> &StoreNodes, unsigned NumStores,
559 SDNode *RootNode);
560
561 /// Merge consecutive store operations into a wide store.
562 /// This optimization uses wide integers or vectors when possible.
563 /// \return number of stores that were merged into a merged store (the
564 /// affected nodes are stored as a prefix in \p StoreNodes).
565 bool MergeConsecutiveStores(StoreSDNode *St);
566
567 /// Try to transform a truncation where C is a constant:
568 /// (trunc (and X, C)) -> (and (trunc X), (trunc C))
569 ///
570 /// \p N needs to be a truncation and its first operand an AND. Other
571 /// requirements are checked by the function (e.g. that trunc is
572 /// single-use) and if missed an empty SDValue is returned.
573 SDValue distributeTruncateThroughAnd(SDNode *N);
574
575 /// Helper function to determine whether the target supports operation
576 /// given by \p Opcode for type \p VT, that is, whether the operation
577 /// is legal or custom before legalizing operations, and whether is
578 /// legal (but not custom) after legalization.
579 bool hasOperation(unsigned Opcode, EVT VT) {
580 if (LegalOperations)
581 return TLI.isOperationLegal(Opcode, VT);
582 return TLI.isOperationLegalOrCustom(Opcode, VT);
583 }
584
585 public:
586 /// Runs the dag combiner on all nodes in the work list
587 void Run(CombineLevel AtLevel);
588
589 SelectionDAG &getDAG() const { return DAG; }
590
591 /// Returns a type large enough to hold any valid shift amount - before type
592 /// legalization these can be huge.
593 EVT getShiftAmountTy(EVT LHSTy) {
594 assert(LHSTy.isInteger() && "Shift amount is not an integer type!")(static_cast <bool> (LHSTy.isInteger() && "Shift amount is not an integer type!"
) ? void (0) : __assert_fail ("LHSTy.isInteger() && \"Shift amount is not an integer type!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 594, __extension__ __PRETTY_FUNCTION__))
;
595 return TLI.getShiftAmountTy(LHSTy, DAG.getDataLayout(), LegalTypes);
596 }
597
598 /// This method returns true if we are running before type legalization or
599 /// if the specified VT is legal.
600 bool isTypeLegal(const EVT &VT) {
601 if (!LegalTypes) return true;
602 return TLI.isTypeLegal(VT);
603 }
604
605 /// Convenience wrapper around TargetLowering::getSetCCResultType
606 EVT getSetCCResultType(EVT VT) const {
607 return TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
608 }
609
610 void ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs,
611 SDValue OrigLoad, SDValue ExtLoad,
612 ISD::NodeType ExtType);
613 };
614
615/// This class is a DAGUpdateListener that removes any deleted
616/// nodes from the worklist.
617class WorklistRemover : public SelectionDAG::DAGUpdateListener {
618 DAGCombiner &DC;
619
620public:
621 explicit WorklistRemover(DAGCombiner &dc)
622 : SelectionDAG::DAGUpdateListener(dc.getDAG()), DC(dc) {}
623
624 void NodeDeleted(SDNode *N, SDNode *E) override {
625 DC.removeFromWorklist(N);
626 }
627};
628
629} // end anonymous namespace
630
631//===----------------------------------------------------------------------===//
632// TargetLowering::DAGCombinerInfo implementation
633//===----------------------------------------------------------------------===//
634
635void TargetLowering::DAGCombinerInfo::AddToWorklist(SDNode *N) {
636 ((DAGCombiner*)DC)->AddToWorklist(N);
637}
638
639SDValue TargetLowering::DAGCombinerInfo::
640CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo) {
641 return ((DAGCombiner*)DC)->CombineTo(N, &To[0], To.size(), AddTo);
642}
643
644SDValue TargetLowering::DAGCombinerInfo::
645CombineTo(SDNode *N, SDValue Res, bool AddTo) {
646 return ((DAGCombiner*)DC)->CombineTo(N, Res, AddTo);
647}
648
649SDValue TargetLowering::DAGCombinerInfo::
650CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo) {
651 return ((DAGCombiner*)DC)->CombineTo(N, Res0, Res1, AddTo);
652}
653
654void TargetLowering::DAGCombinerInfo::
655CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
656 return ((DAGCombiner*)DC)->CommitTargetLoweringOpt(TLO);
657}
658
659//===----------------------------------------------------------------------===//
660// Helper Functions
661//===----------------------------------------------------------------------===//
662
663void DAGCombiner::deleteAndRecombine(SDNode *N) {
664 removeFromWorklist(N);
665
666 // If the operands of this node are only used by the node, they will now be
667 // dead. Make sure to re-visit them and recursively delete dead nodes.
668 for (const SDValue &Op : N->ops())
669 // For an operand generating multiple values, one of the values may
670 // become dead allowing further simplification (e.g. split index
671 // arithmetic from an indexed load).
672 if (Op->hasOneUse() || Op->getNumValues() > 1)
673 AddToWorklist(Op.getNode());
674
675 DAG.DeleteNode(N);
676}
677
678/// Return 1 if we can compute the negated form of the specified expression for
679/// the same cost as the expression itself, or 2 if we can compute the negated
680/// form more cheaply than the expression itself.
681static char isNegatibleForFree(SDValue Op, bool LegalOperations,
682 const TargetLowering &TLI,
683 const TargetOptions *Options,
684 unsigned Depth = 0) {
685 // fneg is removable even if it has multiple uses.
686 if (Op.getOpcode() == ISD::FNEG) return 2;
687
688 // Don't allow anything with multiple uses unless we know it is free.
689 EVT VT = Op.getValueType();
690 const SDNodeFlags Flags = Op->getFlags();
691 if (!Op.hasOneUse())
692 if (!(Op.getOpcode() == ISD::FP_EXTEND &&
693 TLI.isFPExtFree(VT, Op.getOperand(0).getValueType())))
694 return 0;
695
696 // Don't recurse exponentially.
697 if (Depth > 6) return 0;
698
699 switch (Op.getOpcode()) {
700 default: return false;
701 case ISD::ConstantFP: {
702 if (!LegalOperations)
703 return 1;
704
705 // Don't invert constant FP values after legalization unless the target says
706 // the negated constant is legal.
707 return TLI.isOperationLegal(ISD::ConstantFP, VT) ||
708 TLI.isFPImmLegal(neg(cast<ConstantFPSDNode>(Op)->getValueAPF()), VT);
709 }
710 case ISD::FADD:
711 if (!Options->UnsafeFPMath && !Flags.hasNoSignedZeros())
712 return 0;
713
714 // After operation legalization, it might not be legal to create new FSUBs.
715 if (LegalOperations && !TLI.isOperationLegalOrCustom(ISD::FSUB, VT))
716 return 0;
717
718 // fold (fneg (fadd A, B)) -> (fsub (fneg A), B)
719 if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI,
720 Options, Depth + 1))
721 return V;
722 // fold (fneg (fadd A, B)) -> (fsub (fneg B), A)
723 return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options,
724 Depth + 1);
725 case ISD::FSUB:
726 // We can't turn -(A-B) into B-A when we honor signed zeros.
727 if (!Options->NoSignedZerosFPMath &&
728 !Flags.hasNoSignedZeros())
729 return 0;
730
731 // fold (fneg (fsub A, B)) -> (fsub B, A)
732 return 1;
733
734 case ISD::FMUL:
735 case ISD::FDIV:
736 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) or (fmul X, (fneg Y))
737 if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI,
738 Options, Depth + 1))
739 return V;
740
741 return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options,
742 Depth + 1);
743
744 case ISD::FP_EXTEND:
745 case ISD::FP_ROUND:
746 case ISD::FSIN:
747 return isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, Options,
748 Depth + 1);
749 }
750}
751
752/// If isNegatibleForFree returns true, return the newly negated expression.
753static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG,
754 bool LegalOperations, unsigned Depth = 0) {
755 const TargetOptions &Options = DAG.getTarget().Options;
756 // fneg is removable even if it has multiple uses.
757 if (Op.getOpcode() == ISD::FNEG) return Op.getOperand(0);
758
759 assert(Depth <= 6 && "GetNegatedExpression doesn't match isNegatibleForFree")(static_cast <bool> (Depth <= 6 && "GetNegatedExpression doesn't match isNegatibleForFree"
) ? void (0) : __assert_fail ("Depth <= 6 && \"GetNegatedExpression doesn't match isNegatibleForFree\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 759, __extension__ __PRETTY_FUNCTION__))
;
760
761 const SDNodeFlags Flags = Op.getNode()->getFlags();
762
763 switch (Op.getOpcode()) {
764 default: llvm_unreachable("Unknown code")::llvm::llvm_unreachable_internal("Unknown code", "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 764)
;
765 case ISD::ConstantFP: {
766 APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF();
767 V.changeSign();
768 return DAG.getConstantFP(V, SDLoc(Op), Op.getValueType());
769 }
770 case ISD::FADD:
771 assert(Options.UnsafeFPMath || Flags.hasNoSignedZeros())(static_cast <bool> (Options.UnsafeFPMath || Flags.hasNoSignedZeros
()) ? void (0) : __assert_fail ("Options.UnsafeFPMath || Flags.hasNoSignedZeros()"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 771, __extension__ __PRETTY_FUNCTION__))
;
772
773 // fold (fneg (fadd A, B)) -> (fsub (fneg A), B)
774 if (isNegatibleForFree(Op.getOperand(0), LegalOperations,
775 DAG.getTargetLoweringInfo(), &Options, Depth+1))
776 return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(),
777 GetNegatedExpression(Op.getOperand(0), DAG,
778 LegalOperations, Depth+1),
779 Op.getOperand(1), Flags);
780 // fold (fneg (fadd A, B)) -> (fsub (fneg B), A)
781 return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(),
782 GetNegatedExpression(Op.getOperand(1), DAG,
783 LegalOperations, Depth+1),
784 Op.getOperand(0), Flags);
785 case ISD::FSUB:
786 // fold (fneg (fsub 0, B)) -> B
787 if (ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(Op.getOperand(0)))
788 if (N0CFP->isZero())
789 return Op.getOperand(1);
790
791 // fold (fneg (fsub A, B)) -> (fsub B, A)
792 return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(),
793 Op.getOperand(1), Op.getOperand(0), Flags);
794
795 case ISD::FMUL:
796 case ISD::FDIV:
797 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y)
798 if (isNegatibleForFree(Op.getOperand(0), LegalOperations,
799 DAG.getTargetLoweringInfo(), &Options, Depth+1))
800 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(),
801 GetNegatedExpression(Op.getOperand(0), DAG,
802 LegalOperations, Depth+1),
803 Op.getOperand(1), Flags);
804
805 // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y))
806 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(),
807 Op.getOperand(0),
808 GetNegatedExpression(Op.getOperand(1), DAG,
809 LegalOperations, Depth+1), Flags);
810
811 case ISD::FP_EXTEND:
812 case ISD::FSIN:
813 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(),
814 GetNegatedExpression(Op.getOperand(0), DAG,
815 LegalOperations, Depth+1));
816 case ISD::FP_ROUND:
817 return DAG.getNode(ISD::FP_ROUND, SDLoc(Op), Op.getValueType(),
818 GetNegatedExpression(Op.getOperand(0), DAG,
819 LegalOperations, Depth+1),
820 Op.getOperand(1));
821 }
822}
823
824// APInts must be the same size for most operations, this helper
825// function zero extends the shorter of the pair so that they match.
826// We provide an Offset so that we can create bitwidths that won't overflow.
827static void zeroExtendToMatch(APInt &LHS, APInt &RHS, unsigned Offset = 0) {
828 unsigned Bits = Offset + std::max(LHS.getBitWidth(), RHS.getBitWidth());
829 LHS = LHS.zextOrSelf(Bits);
830 RHS = RHS.zextOrSelf(Bits);
831}
832
833// Return true if this node is a setcc, or is a select_cc
834// that selects between the target values used for true and false, making it
835// equivalent to a setcc. Also, set the incoming LHS, RHS, and CC references to
836// the appropriate nodes based on the type of node we are checking. This
837// simplifies life a bit for the callers.
838bool DAGCombiner::isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS,
839 SDValue &CC) const {
840 if (N.getOpcode() == ISD::SETCC) {
841 LHS = N.getOperand(0);
842 RHS = N.getOperand(1);
843 CC = N.getOperand(2);
844 return true;
845 }
846
847 if (N.getOpcode() != ISD::SELECT_CC ||
848 !TLI.isConstTrueVal(N.getOperand(2).getNode()) ||
849 !TLI.isConstFalseVal(N.getOperand(3).getNode()))
850 return false;
851
852 if (TLI.getBooleanContents(N.getValueType()) ==
853 TargetLowering::UndefinedBooleanContent)
854 return false;
855
856 LHS = N.getOperand(0);
857 RHS = N.getOperand(1);
858 CC = N.getOperand(4);
859 return true;
860}
861
862/// Return true if this is a SetCC-equivalent operation with only one use.
863/// If this is true, it allows the users to invert the operation for free when
864/// it is profitable to do so.
865bool DAGCombiner::isOneUseSetCC(SDValue N) const {
866 SDValue N0, N1, N2;
867 if (isSetCCEquivalent(N, N0, N1, N2) && N.getNode()->hasOneUse())
868 return true;
869 return false;
870}
871
872static SDValue peekThroughBitcast(SDValue V) {
873 while (V.getOpcode() == ISD::BITCAST)
874 V = V.getOperand(0);
875 return V;
876}
877
878// Returns the SDNode if it is a constant float BuildVector
879// or constant float.
880static SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N) {
881 if (isa<ConstantFPSDNode>(N))
882 return N.getNode();
883 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode()))
884 return N.getNode();
885 return nullptr;
886}
887
888// Determines if it is a constant integer or a build vector of constant
889// integers (and undefs).
890// Do not permit build vector implicit truncation.
891static bool isConstantOrConstantVector(SDValue N, bool NoOpaques = false) {
892 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N))
893 return !(Const->isOpaque() && NoOpaques);
894 if (N.getOpcode() != ISD::BUILD_VECTOR)
895 return false;
896 unsigned BitWidth = N.getScalarValueSizeInBits();
897 for (const SDValue &Op : N->op_values()) {
898 if (Op.isUndef())
899 continue;
900 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Op);
901 if (!Const || Const->getAPIntValue().getBitWidth() != BitWidth ||
902 (Const->isOpaque() && NoOpaques))
903 return false;
904 }
905 return true;
906}
907
908// Determines if it is a constant null integer or a splatted vector of a
909// constant null integer (with no undefs).
910// Build vector implicit truncation is not an issue for null values.
911static bool isNullConstantOrNullSplatConstant(SDValue N) {
912 // TODO: may want to use peekThroughBitcast() here.
913 if (ConstantSDNode *Splat = isConstOrConstSplat(N))
914 return Splat->isNullValue();
915 return false;
916}
917
918// Determines if it is a constant integer of one or a splatted vector of a
919// constant integer of one (with no undefs).
920// Do not permit build vector implicit truncation.
921static bool isOneConstantOrOneSplatConstant(SDValue N) {
922 // TODO: may want to use peekThroughBitcast() here.
923 unsigned BitWidth = N.getScalarValueSizeInBits();
924 if (ConstantSDNode *Splat = isConstOrConstSplat(N))
925 return Splat->isOne() && Splat->getAPIntValue().getBitWidth() == BitWidth;
926 return false;
927}
928
929// Determines if it is a constant integer of all ones or a splatted vector of a
930// constant integer of all ones (with no undefs).
931// Do not permit build vector implicit truncation.
932static bool isAllOnesConstantOrAllOnesSplatConstant(SDValue N) {
933 N = peekThroughBitcast(N);
934 unsigned BitWidth = N.getScalarValueSizeInBits();
935 if (ConstantSDNode *Splat = isConstOrConstSplat(N))
936 return Splat->isAllOnesValue() &&
937 Splat->getAPIntValue().getBitWidth() == BitWidth;
938 return false;
939}
940
941// Determines if a BUILD_VECTOR is composed of all-constants possibly mixed with
942// undef's.
943static bool isAnyConstantBuildVector(const SDNode *N) {
944 return ISD::isBuildVectorOfConstantSDNodes(N) ||
945 ISD::isBuildVectorOfConstantFPSDNodes(N);
946}
947
948SDValue DAGCombiner::ReassociateOps(unsigned Opc, const SDLoc &DL, SDValue N0,
949 SDValue N1) {
950 EVT VT = N0.getValueType();
951 if (N0.getOpcode() == Opc) {
952 if (SDNode *L = DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1))) {
953 if (SDNode *R = DAG.isConstantIntBuildVectorOrConstantInt(N1)) {
954 // reassoc. (op (op x, c1), c2) -> (op x, (op c1, c2))
955 if (SDValue OpNode = DAG.FoldConstantArithmetic(Opc, DL, VT, L, R))
956 return DAG.getNode(Opc, DL, VT, N0.getOperand(0), OpNode);
957 return SDValue();
958 }
959 if (N0.hasOneUse()) {
960 // reassoc. (op (op x, c1), y) -> (op (op x, y), c1) iff x+c1 has one
961 // use
962 SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N0.getOperand(0), N1);
963 if (!OpNode.getNode())
964 return SDValue();
965 AddToWorklist(OpNode.getNode());
966 return DAG.getNode(Opc, DL, VT, OpNode, N0.getOperand(1));
967 }
968 }
969 }
970
971 if (N1.getOpcode() == Opc) {
972 if (SDNode *R = DAG.isConstantIntBuildVectorOrConstantInt(N1.getOperand(1))) {
973 if (SDNode *L = DAG.isConstantIntBuildVectorOrConstantInt(N0)) {
974 // reassoc. (op c2, (op x, c1)) -> (op x, (op c1, c2))
975 if (SDValue OpNode = DAG.FoldConstantArithmetic(Opc, DL, VT, R, L))
976 return DAG.getNode(Opc, DL, VT, N1.getOperand(0), OpNode);
977 return SDValue();
978 }
979 if (N1.hasOneUse()) {
980 // reassoc. (op x, (op y, c1)) -> (op (op x, y), c1) iff x+c1 has one
981 // use
982 SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N0, N1.getOperand(0));
983 if (!OpNode.getNode())
984 return SDValue();
985 AddToWorklist(OpNode.getNode());
986 return DAG.getNode(Opc, DL, VT, OpNode, N1.getOperand(1));
987 }
988 }
989 }
990
991 return SDValue();
992}
993
994SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
995 bool AddTo) {
996 assert(N->getNumValues() == NumTo && "Broken CombineTo call!")(static_cast <bool> (N->getNumValues() == NumTo &&
"Broken CombineTo call!") ? void (0) : __assert_fail ("N->getNumValues() == NumTo && \"Broken CombineTo call!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 996, __extension__ __PRETTY_FUNCTION__))
;
997 ++NodesCombined;
998 LLVM_DEBUG(dbgs() << "\nReplacing.1 "; N->dump(&DAG); dbgs() << "\nWith: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.1 "; N->dump
(&DAG); dbgs() << "\nWith: "; To[0].getNode()->dump
(&DAG); dbgs() << " and " << NumTo - 1 <<
" other values\n"; } } while (false)
999 To[0].getNode()->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.1 "; N->dump
(&DAG); dbgs() << "\nWith: "; To[0].getNode()->dump
(&DAG); dbgs() << " and " << NumTo - 1 <<
" other values\n"; } } while (false)
1000 dbgs() << " and " << NumTo - 1 << " other values\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.1 "; N->dump
(&DAG); dbgs() << "\nWith: "; To[0].getNode()->dump
(&DAG); dbgs() << " and " << NumTo - 1 <<
" other values\n"; } } while (false)
;
1001 for (unsigned i = 0, e = NumTo; i != e; ++i)
1002 assert((!To[i].getNode() ||(static_cast <bool> ((!To[i].getNode() || N->getValueType
(i) == To[i].getValueType()) && "Cannot combine value to value of different type!"
) ? void (0) : __assert_fail ("(!To[i].getNode() || N->getValueType(i) == To[i].getValueType()) && \"Cannot combine value to value of different type!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1004, __extension__ __PRETTY_FUNCTION__))
1003 N->getValueType(i) == To[i].getValueType()) &&(static_cast <bool> ((!To[i].getNode() || N->getValueType
(i) == To[i].getValueType()) && "Cannot combine value to value of different type!"
) ? void (0) : __assert_fail ("(!To[i].getNode() || N->getValueType(i) == To[i].getValueType()) && \"Cannot combine value to value of different type!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1004, __extension__ __PRETTY_FUNCTION__))
1004 "Cannot combine value to value of different type!")(static_cast <bool> ((!To[i].getNode() || N->getValueType
(i) == To[i].getValueType()) && "Cannot combine value to value of different type!"
) ? void (0) : __assert_fail ("(!To[i].getNode() || N->getValueType(i) == To[i].getValueType()) && \"Cannot combine value to value of different type!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1004, __extension__ __PRETTY_FUNCTION__))
;
1005
1006 WorklistRemover DeadNodes(*this);
1007 DAG.ReplaceAllUsesWith(N, To);
1008 if (AddTo) {
1009 // Push the new nodes and any users onto the worklist
1010 for (unsigned i = 0, e = NumTo; i != e; ++i) {
1011 if (To[i].getNode()) {
1012 AddToWorklist(To[i].getNode());
1013 AddUsersToWorklist(To[i].getNode());
1014 }
1015 }
1016 }
1017
1018 // Finally, if the node is now dead, remove it from the graph. The node
1019 // may not be dead if the replacement process recursively simplified to
1020 // something else needing this node.
1021 if (N->use_empty())
1022 deleteAndRecombine(N);
1023 return SDValue(N, 0);
1024}
1025
1026void DAGCombiner::
1027CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
1028 // Replace all uses. If any nodes become isomorphic to other nodes and
1029 // are deleted, make sure to remove them from our worklist.
1030 WorklistRemover DeadNodes(*this);
1031 DAG.ReplaceAllUsesOfValueWith(TLO.Old, TLO.New);
1032
1033 // Push the new node and any (possibly new) users onto the worklist.
1034 AddToWorklist(TLO.New.getNode());
1035 AddUsersToWorklist(TLO.New.getNode());
1036
1037 // Finally, if the node is now dead, remove it from the graph. The node
1038 // may not be dead if the replacement process recursively simplified to
1039 // something else needing this node.
1040 if (TLO.Old.getNode()->use_empty())
1041 deleteAndRecombine(TLO.Old.getNode());
1042}
1043
1044/// Check the specified integer node value to see if it can be simplified or if
1045/// things it uses can be simplified by bit propagation. If so, return true.
1046bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) {
1047 TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations);
1048 KnownBits Known;
1049 if (!TLI.SimplifyDemandedBits(Op, Demanded, Known, TLO))
1050 return false;
1051
1052 // Revisit the node.
1053 AddToWorklist(Op.getNode());
1054
1055 // Replace the old value with the new one.
1056 ++NodesCombined;
1057 LLVM_DEBUG(dbgs() << "\nReplacing.2 "; TLO.Old.getNode()->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.2 "; TLO.Old.getNode
()->dump(&DAG); dbgs() << "\nWith: "; TLO.New.getNode
()->dump(&DAG); dbgs() << '\n'; } } while (false
)
1058 dbgs() << "\nWith: "; TLO.New.getNode()->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.2 "; TLO.Old.getNode
()->dump(&DAG); dbgs() << "\nWith: "; TLO.New.getNode
()->dump(&DAG); dbgs() << '\n'; } } while (false
)
1059 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.2 "; TLO.Old.getNode
()->dump(&DAG); dbgs() << "\nWith: "; TLO.New.getNode
()->dump(&DAG); dbgs() << '\n'; } } while (false
)
;
1060
1061 CommitTargetLoweringOpt(TLO);
1062 return true;
1063}
1064
1065/// Check the specified vector node value to see if it can be simplified or
1066/// if things it uses can be simplified as it only uses some of the elements.
1067/// If so, return true.
1068bool DAGCombiner::SimplifyDemandedVectorElts(SDValue Op, const APInt &Demanded,
1069 bool AssumeSingleUse) {
1070 TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations);
1071 APInt KnownUndef, KnownZero;
1072 if (!TLI.SimplifyDemandedVectorElts(Op, Demanded, KnownUndef, KnownZero, TLO,
1073 0, AssumeSingleUse))
1074 return false;
1075
1076 // Revisit the node.
1077 AddToWorklist(Op.getNode());
1078
1079 // Replace the old value with the new one.
1080 ++NodesCombined;
1081 LLVM_DEBUG(dbgs() << "\nReplacing.2 "; TLO.Old.getNode()->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.2 "; TLO.Old.getNode
()->dump(&DAG); dbgs() << "\nWith: "; TLO.New.getNode
()->dump(&DAG); dbgs() << '\n'; } } while (false
)
1082 dbgs() << "\nWith: "; TLO.New.getNode()->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.2 "; TLO.Old.getNode
()->dump(&DAG); dbgs() << "\nWith: "; TLO.New.getNode
()->dump(&DAG); dbgs() << '\n'; } } while (false
)
1083 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.2 "; TLO.Old.getNode
()->dump(&DAG); dbgs() << "\nWith: "; TLO.New.getNode
()->dump(&DAG); dbgs() << '\n'; } } while (false
)
;
1084
1085 CommitTargetLoweringOpt(TLO);
1086 return true;
1087}
1088
1089void DAGCombiner::ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad) {
1090 SDLoc DL(Load);
1091 EVT VT = Load->getValueType(0);
1092 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, SDValue(ExtLoad, 0));
1093
1094 LLVM_DEBUG(dbgs() << "\nReplacing.9 "; Load->dump(&DAG); dbgs() << "\nWith: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.9 "; Load->
dump(&DAG); dbgs() << "\nWith: "; Trunc.getNode()->
dump(&DAG); dbgs() << '\n'; } } while (false)
1095 Trunc.getNode()->dump(&DAG); dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.9 "; Load->
dump(&DAG); dbgs() << "\nWith: "; Trunc.getNode()->
dump(&DAG); dbgs() << '\n'; } } while (false)
;
1096 WorklistRemover DeadNodes(*this);
1097 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), Trunc);
1098 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), SDValue(ExtLoad, 1));
1099 deleteAndRecombine(Load);
1100 AddToWorklist(Trunc.getNode());
1101}
1102
1103SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) {
1104 Replace = false;
1105 SDLoc DL(Op);
1106 if (ISD::isUNINDEXEDLoad(Op.getNode())) {
1107 LoadSDNode *LD = cast<LoadSDNode>(Op);
1108 EVT MemVT = LD->getMemoryVT();
1109 ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) ? ISD::EXTLOAD
1110 : LD->getExtensionType();
1111 Replace = true;
1112 return DAG.getExtLoad(ExtType, DL, PVT,
1113 LD->getChain(), LD->getBasePtr(),
1114 MemVT, LD->getMemOperand());
1115 }
1116
1117 unsigned Opc = Op.getOpcode();
1118 switch (Opc) {
1119 default: break;
1120 case ISD::AssertSext:
1121 if (SDValue Op0 = SExtPromoteOperand(Op.getOperand(0), PVT))
1122 return DAG.getNode(ISD::AssertSext, DL, PVT, Op0, Op.getOperand(1));
1123 break;
1124 case ISD::AssertZext:
1125 if (SDValue Op0 = ZExtPromoteOperand(Op.getOperand(0), PVT))
1126 return DAG.getNode(ISD::AssertZext, DL, PVT, Op0, Op.getOperand(1));
1127 break;
1128 case ISD::Constant: {
1129 unsigned ExtOpc =
1130 Op.getValueType().isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
1131 return DAG.getNode(ExtOpc, DL, PVT, Op);
1132 }
1133 }
1134
1135 if (!TLI.isOperationLegal(ISD::ANY_EXTEND, PVT))
1136 return SDValue();
1137 return DAG.getNode(ISD::ANY_EXTEND, DL, PVT, Op);
1138}
1139
1140SDValue DAGCombiner::SExtPromoteOperand(SDValue Op, EVT PVT) {
1141 if (!TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, PVT))
1142 return SDValue();
1143 EVT OldVT = Op.getValueType();
1144 SDLoc DL(Op);
1145 bool Replace = false;
1146 SDValue NewOp = PromoteOperand(Op, PVT, Replace);
1147 if (!NewOp.getNode())
1148 return SDValue();
1149 AddToWorklist(NewOp.getNode());
1150
1151 if (Replace)
1152 ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode());
1153 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, NewOp.getValueType(), NewOp,
1154 DAG.getValueType(OldVT));
1155}
1156
1157SDValue DAGCombiner::ZExtPromoteOperand(SDValue Op, EVT PVT) {
1158 EVT OldVT = Op.getValueType();
1159 SDLoc DL(Op);
1160 bool Replace = false;
1161 SDValue NewOp = PromoteOperand(Op, PVT, Replace);
1162 if (!NewOp.getNode())
1163 return SDValue();
1164 AddToWorklist(NewOp.getNode());
1165
1166 if (Replace)
1167 ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode());
1168 return DAG.getZeroExtendInReg(NewOp, DL, OldVT);
1169}
1170
1171/// Promote the specified integer binary operation if the target indicates it is
1172/// beneficial. e.g. On x86, it's usually better to promote i16 operations to
1173/// i32 since i16 instructions are longer.
1174SDValue DAGCombiner::PromoteIntBinOp(SDValue Op) {
1175 if (!LegalOperations)
1176 return SDValue();
1177
1178 EVT VT = Op.getValueType();
1179 if (VT.isVector() || !VT.isInteger())
1180 return SDValue();
1181
1182 // If operation type is 'undesirable', e.g. i16 on x86, consider
1183 // promoting it.
1184 unsigned Opc = Op.getOpcode();
1185 if (TLI.isTypeDesirableForOp(Opc, VT))
1186 return SDValue();
1187
1188 EVT PVT = VT;
1189 // Consult target whether it is a good idea to promote this operation and
1190 // what's the right type to promote it to.
1191 if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
1192 assert(PVT != VT && "Don't know what type to promote to!")(static_cast <bool> (PVT != VT && "Don't know what type to promote to!"
) ? void (0) : __assert_fail ("PVT != VT && \"Don't know what type to promote to!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1192, __extension__ __PRETTY_FUNCTION__))
;
1193
1194 LLVM_DEBUG(dbgs() << "\nPromoting "; Op.getNode()->dump(&DAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nPromoting "; Op.getNode(
)->dump(&DAG); } } while (false)
;
1195
1196 bool Replace0 = false;
1197 SDValue N0 = Op.getOperand(0);
1198 SDValue NN0 = PromoteOperand(N0, PVT, Replace0);
1199
1200 bool Replace1 = false;
1201 SDValue N1 = Op.getOperand(1);
1202 SDValue NN1 = PromoteOperand(N1, PVT, Replace1);
1203 SDLoc DL(Op);
1204
1205 SDValue RV =
1206 DAG.getNode(ISD::TRUNCATE, DL, VT, DAG.getNode(Opc, DL, PVT, NN0, NN1));
1207
1208 // We are always replacing N0/N1's use in N and only need
1209 // additional replacements if there are additional uses.
1210 Replace0 &= !N0->hasOneUse();
1211 Replace1 &= (N0 != N1) && !N1->hasOneUse();
1212
1213 // Combine Op here so it is preserved past replacements.
1214 CombineTo(Op.getNode(), RV);
1215
1216 // If operands have a use ordering, make sure we deal with
1217 // predecessor first.
1218 if (Replace0 && Replace1 && N0.getNode()->isPredecessorOf(N1.getNode())) {
1219 std::swap(N0, N1);
1220 std::swap(NN0, NN1);
1221 }
1222
1223 if (Replace0) {
1224 AddToWorklist(NN0.getNode());
1225 ReplaceLoadWithPromotedLoad(N0.getNode(), NN0.getNode());
1226 }
1227 if (Replace1) {
1228 AddToWorklist(NN1.getNode());
1229 ReplaceLoadWithPromotedLoad(N1.getNode(), NN1.getNode());
1230 }
1231 return Op;
1232 }
1233 return SDValue();
1234}
1235
1236/// Promote the specified integer shift operation if the target indicates it is
1237/// beneficial. e.g. On x86, it's usually better to promote i16 operations to
1238/// i32 since i16 instructions are longer.
1239SDValue DAGCombiner::PromoteIntShiftOp(SDValue Op) {
1240 if (!LegalOperations)
1241 return SDValue();
1242
1243 EVT VT = Op.getValueType();
1244 if (VT.isVector() || !VT.isInteger())
1245 return SDValue();
1246
1247 // If operation type is 'undesirable', e.g. i16 on x86, consider
1248 // promoting it.
1249 unsigned Opc = Op.getOpcode();
1250 if (TLI.isTypeDesirableForOp(Opc, VT))
1251 return SDValue();
1252
1253 EVT PVT = VT;
1254 // Consult target whether it is a good idea to promote this operation and
1255 // what's the right type to promote it to.
1256 if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
1257 assert(PVT != VT && "Don't know what type to promote to!")(static_cast <bool> (PVT != VT && "Don't know what type to promote to!"
) ? void (0) : __assert_fail ("PVT != VT && \"Don't know what type to promote to!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1257, __extension__ __PRETTY_FUNCTION__))
;
1258
1259 LLVM_DEBUG(dbgs() << "\nPromoting "; Op.getNode()->dump(&DAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nPromoting "; Op.getNode(
)->dump(&DAG); } } while (false)
;
1260
1261 bool Replace = false;
1262 SDValue N0 = Op.getOperand(0);
1263 SDValue N1 = Op.getOperand(1);
1264 if (Opc == ISD::SRA)
1265 N0 = SExtPromoteOperand(N0, PVT);
1266 else if (Opc == ISD::SRL)
1267 N0 = ZExtPromoteOperand(N0, PVT);
1268 else
1269 N0 = PromoteOperand(N0, PVT, Replace);
1270
1271 if (!N0.getNode())
1272 return SDValue();
1273
1274 SDLoc DL(Op);
1275 SDValue RV =
1276 DAG.getNode(ISD::TRUNCATE, DL, VT, DAG.getNode(Opc, DL, PVT, N0, N1));
1277
1278 AddToWorklist(N0.getNode());
1279 if (Replace)
1280 ReplaceLoadWithPromotedLoad(Op.getOperand(0).getNode(), N0.getNode());
1281
1282 // Deal with Op being deleted.
1283 if (Op && Op.getOpcode() != ISD::DELETED_NODE)
1284 return RV;
1285 }
1286 return SDValue();
1287}
1288
1289SDValue DAGCombiner::PromoteExtend(SDValue Op) {
1290 if (!LegalOperations)
1291 return SDValue();
1292
1293 EVT VT = Op.getValueType();
1294 if (VT.isVector() || !VT.isInteger())
1295 return SDValue();
1296
1297 // If operation type is 'undesirable', e.g. i16 on x86, consider
1298 // promoting it.
1299 unsigned Opc = Op.getOpcode();
1300 if (TLI.isTypeDesirableForOp(Opc, VT))
1301 return SDValue();
1302
1303 EVT PVT = VT;
1304 // Consult target whether it is a good idea to promote this operation and
1305 // what's the right type to promote it to.
1306 if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
1307 assert(PVT != VT && "Don't know what type to promote to!")(static_cast <bool> (PVT != VT && "Don't know what type to promote to!"
) ? void (0) : __assert_fail ("PVT != VT && \"Don't know what type to promote to!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1307, __extension__ __PRETTY_FUNCTION__))
;
1308 // fold (aext (aext x)) -> (aext x)
1309 // fold (aext (zext x)) -> (zext x)
1310 // fold (aext (sext x)) -> (sext x)
1311 LLVM_DEBUG(dbgs() << "\nPromoting "; Op.getNode()->dump(&DAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nPromoting "; Op.getNode(
)->dump(&DAG); } } while (false)
;
1312 return DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, Op.getOperand(0));
1313 }
1314 return SDValue();
1315}
1316
1317bool DAGCombiner::PromoteLoad(SDValue Op) {
1318 if (!LegalOperations)
1319 return false;
1320
1321 if (!ISD::isUNINDEXEDLoad(Op.getNode()))
1322 return false;
1323
1324 EVT VT = Op.getValueType();
1325 if (VT.isVector() || !VT.isInteger())
1326 return false;
1327
1328 // If operation type is 'undesirable', e.g. i16 on x86, consider
1329 // promoting it.
1330 unsigned Opc = Op.getOpcode();
1331 if (TLI.isTypeDesirableForOp(Opc, VT))
1332 return false;
1333
1334 EVT PVT = VT;
1335 // Consult target whether it is a good idea to promote this operation and
1336 // what's the right type to promote it to.
1337 if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
1338 assert(PVT != VT && "Don't know what type to promote to!")(static_cast <bool> (PVT != VT && "Don't know what type to promote to!"
) ? void (0) : __assert_fail ("PVT != VT && \"Don't know what type to promote to!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1338, __extension__ __PRETTY_FUNCTION__))
;
1339
1340 SDLoc DL(Op);
1341 SDNode *N = Op.getNode();
1342 LoadSDNode *LD = cast<LoadSDNode>(N);
1343 EVT MemVT = LD->getMemoryVT();
1344 ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) ? ISD::EXTLOAD
1345 : LD->getExtensionType();
1346 SDValue NewLD = DAG.getExtLoad(ExtType, DL, PVT,
1347 LD->getChain(), LD->getBasePtr(),
1348 MemVT, LD->getMemOperand());
1349 SDValue Result = DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD);
1350
1351 LLVM_DEBUG(dbgs() << "\nPromoting "; N->dump(&DAG); dbgs() << "\nTo: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nPromoting "; N->dump(
&DAG); dbgs() << "\nTo: "; Result.getNode()->dump
(&DAG); dbgs() << '\n'; } } while (false)
1352 Result.getNode()->dump(&DAG); dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nPromoting "; N->dump(
&DAG); dbgs() << "\nTo: "; Result.getNode()->dump
(&DAG); dbgs() << '\n'; } } while (false)
;
1353 WorklistRemover DeadNodes(*this);
1354 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
1355 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLD.getValue(1));
1356 deleteAndRecombine(N);
1357 AddToWorklist(Result.getNode());
1358 return true;
1359 }
1360 return false;
1361}
1362
1363/// Recursively delete a node which has no uses and any operands for
1364/// which it is the only use.
1365///
1366/// Note that this both deletes the nodes and removes them from the worklist.
1367/// It also adds any nodes who have had a user deleted to the worklist as they
1368/// may now have only one use and subject to other combines.
1369bool DAGCombiner::recursivelyDeleteUnusedNodes(SDNode *N) {
1370 if (!N->use_empty())
1371 return false;
1372
1373 SmallSetVector<SDNode *, 16> Nodes;
1374 Nodes.insert(N);
1375 do {
1376 N = Nodes.pop_back_val();
1377 if (!N)
1378 continue;
1379
1380 if (N->use_empty()) {
1381 for (const SDValue &ChildN : N->op_values())
1382 Nodes.insert(ChildN.getNode());
1383
1384 removeFromWorklist(N);
1385 DAG.DeleteNode(N);
1386 } else {
1387 AddToWorklist(N);
1388 }
1389 } while (!Nodes.empty());
1390 return true;
1391}
1392
1393//===----------------------------------------------------------------------===//
1394// Main DAG Combiner implementation
1395//===----------------------------------------------------------------------===//
1396
1397void DAGCombiner::Run(CombineLevel AtLevel) {
1398 // set the instance variables, so that the various visit routines may use it.
1399 Level = AtLevel;
1400 LegalOperations = Level >= AfterLegalizeVectorOps;
1401 LegalTypes = Level >= AfterLegalizeTypes;
1402
1403 // Add all the dag nodes to the worklist.
1404 for (SDNode &Node : DAG.allnodes())
1405 AddToWorklist(&Node);
1406
1407 // Create a dummy node (which is not added to allnodes), that adds a reference
1408 // to the root node, preventing it from being deleted, and tracking any
1409 // changes of the root.
1410 HandleSDNode Dummy(DAG.getRoot());
1411
1412 // While the worklist isn't empty, find a node and try to combine it.
1413 while (!WorklistMap.empty()) {
1414 SDNode *N;
1415 // The Worklist holds the SDNodes in order, but it may contain null entries.
1416 do {
1417 N = Worklist.pop_back_val();
1418 } while (!N);
1419
1420 bool GoodWorklistEntry = WorklistMap.erase(N);
1421 (void)GoodWorklistEntry;
1422 assert(GoodWorklistEntry &&(static_cast <bool> (GoodWorklistEntry && "Found a worklist entry without a corresponding map entry!"
) ? void (0) : __assert_fail ("GoodWorklistEntry && \"Found a worklist entry without a corresponding map entry!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1423, __extension__ __PRETTY_FUNCTION__))
1423 "Found a worklist entry without a corresponding map entry!")(static_cast <bool> (GoodWorklistEntry && "Found a worklist entry without a corresponding map entry!"
) ? void (0) : __assert_fail ("GoodWorklistEntry && \"Found a worklist entry without a corresponding map entry!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1423, __extension__ __PRETTY_FUNCTION__))
;
1424
1425 // If N has no uses, it is dead. Make sure to revisit all N's operands once
1426 // N is deleted from the DAG, since they too may now be dead or may have a
1427 // reduced number of uses, allowing other xforms.
1428 if (recursivelyDeleteUnusedNodes(N))
1429 continue;
1430
1431 WorklistRemover DeadNodes(*this);
1432
1433 // If this combine is running after legalizing the DAG, re-legalize any
1434 // nodes pulled off the worklist.
1435 if (Level == AfterLegalizeDAG) {
1436 SmallSetVector<SDNode *, 16> UpdatedNodes;
1437 bool NIsValid = DAG.LegalizeOp(N, UpdatedNodes);
1438
1439 for (SDNode *LN : UpdatedNodes) {
1440 AddToWorklist(LN);
1441 AddUsersToWorklist(LN);
1442 }
1443 if (!NIsValid)
1444 continue;
1445 }
1446
1447 LLVM_DEBUG(dbgs() << "\nCombining: "; N->dump(&DAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nCombining: "; N->dump
(&DAG); } } while (false)
;
1448
1449 // Add any operands of the new node which have not yet been combined to the
1450 // worklist as well. Because the worklist uniques things already, this
1451 // won't repeatedly process the same operand.
1452 CombinedNodes.insert(N);
1453 for (const SDValue &ChildN : N->op_values())
1454 if (!CombinedNodes.count(ChildN.getNode()))
1455 AddToWorklist(ChildN.getNode());
1456
1457 SDValue RV = combine(N);
1458
1459 if (!RV.getNode())
1460 continue;
1461
1462 ++NodesCombined;
1463
1464 // If we get back the same node we passed in, rather than a new node or
1465 // zero, we know that the node must have defined multiple values and
1466 // CombineTo was used. Since CombineTo takes care of the worklist
1467 // mechanics for us, we have no work to do in this case.
1468 if (RV.getNode() == N)
1469 continue;
1470
1471 assert(N->getOpcode() != ISD::DELETED_NODE &&(static_cast <bool> (N->getOpcode() != ISD::DELETED_NODE
&& RV.getOpcode() != ISD::DELETED_NODE && "Node was deleted but visit returned new node!"
) ? void (0) : __assert_fail ("N->getOpcode() != ISD::DELETED_NODE && RV.getOpcode() != ISD::DELETED_NODE && \"Node was deleted but visit returned new node!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1473, __extension__ __PRETTY_FUNCTION__))
1472 RV.getOpcode() != ISD::DELETED_NODE &&(static_cast <bool> (N->getOpcode() != ISD::DELETED_NODE
&& RV.getOpcode() != ISD::DELETED_NODE && "Node was deleted but visit returned new node!"
) ? void (0) : __assert_fail ("N->getOpcode() != ISD::DELETED_NODE && RV.getOpcode() != ISD::DELETED_NODE && \"Node was deleted but visit returned new node!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1473, __extension__ __PRETTY_FUNCTION__))
1473 "Node was deleted but visit returned new node!")(static_cast <bool> (N->getOpcode() != ISD::DELETED_NODE
&& RV.getOpcode() != ISD::DELETED_NODE && "Node was deleted but visit returned new node!"
) ? void (0) : __assert_fail ("N->getOpcode() != ISD::DELETED_NODE && RV.getOpcode() != ISD::DELETED_NODE && \"Node was deleted but visit returned new node!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1473, __extension__ __PRETTY_FUNCTION__))
;
1474
1475 LLVM_DEBUG(dbgs() << " ... into: "; RV.getNode()->dump(&DAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << " ... into: "; RV.getNode()
->dump(&DAG); } } while (false)
;
1476
1477 if (N->getNumValues() == RV.getNode()->getNumValues())
1478 DAG.ReplaceAllUsesWith(N, RV.getNode());
1479 else {
1480 assert(N->getValueType(0) == RV.getValueType() &&(static_cast <bool> (N->getValueType(0) == RV.getValueType
() && N->getNumValues() == 1 && "Type mismatch"
) ? void (0) : __assert_fail ("N->getValueType(0) == RV.getValueType() && N->getNumValues() == 1 && \"Type mismatch\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1481, __extension__ __PRETTY_FUNCTION__))
1481 N->getNumValues() == 1 && "Type mismatch")(static_cast <bool> (N->getValueType(0) == RV.getValueType
() && N->getNumValues() == 1 && "Type mismatch"
) ? void (0) : __assert_fail ("N->getValueType(0) == RV.getValueType() && N->getNumValues() == 1 && \"Type mismatch\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1481, __extension__ __PRETTY_FUNCTION__))
;
1482 DAG.ReplaceAllUsesWith(N, &RV);
1483 }
1484
1485 // Push the new node and any users onto the worklist
1486 AddToWorklist(RV.getNode());
1487 AddUsersToWorklist(RV.getNode());
1488
1489 // Finally, if the node is now dead, remove it from the graph. The node
1490 // may not be dead if the replacement process recursively simplified to
1491 // something else needing this node. This will also take care of adding any
1492 // operands which have lost a user to the worklist.
1493 recursivelyDeleteUnusedNodes(N);
1494 }
1495
1496 // If the root changed (e.g. it was a dead load, update the root).
1497 DAG.setRoot(Dummy.getValue());
1498 DAG.RemoveDeadNodes();
1499}
1500
1501SDValue DAGCombiner::visit(SDNode *N) {
1502 switch (N->getOpcode()) {
1503 default: break;
1504 case ISD::TokenFactor: return visitTokenFactor(N);
1505 case ISD::MERGE_VALUES: return visitMERGE_VALUES(N);
1506 case ISD::ADD: return visitADD(N);
1507 case ISD::SUB: return visitSUB(N);
1508 case ISD::ADDC: return visitADDC(N);
1509 case ISD::UADDO: return visitUADDO(N);
1510 case ISD::SUBC: return visitSUBC(N);
1511 case ISD::USUBO: return visitUSUBO(N);
1512 case ISD::ADDE: return visitADDE(N);
1513 case ISD::ADDCARRY: return visitADDCARRY(N);
1514 case ISD::SUBE: return visitSUBE(N);
1515 case ISD::SUBCARRY: return visitSUBCARRY(N);
1516 case ISD::MUL: return visitMUL(N);
1517 case ISD::SDIV: return visitSDIV(N);
1518 case ISD::UDIV: return visitUDIV(N);
1519 case ISD::SREM:
1520 case ISD::UREM: return visitREM(N);
1521 case ISD::MULHU: return visitMULHU(N);
1522 case ISD::MULHS: return visitMULHS(N);
1523 case ISD::SMUL_LOHI: return visitSMUL_LOHI(N);
1524 case ISD::UMUL_LOHI: return visitUMUL_LOHI(N);
1525 case ISD::SMULO: return visitSMULO(N);
1526 case ISD::UMULO: return visitUMULO(N);
1527 case ISD::SMIN:
1528 case ISD::SMAX:
1529 case ISD::UMIN:
1530 case ISD::UMAX: return visitIMINMAX(N);
1531 case ISD::AND: return visitAND(N);
1532 case ISD::OR: return visitOR(N);
1533 case ISD::XOR: return visitXOR(N);
1534 case ISD::SHL: return visitSHL(N);
1535 case ISD::SRA: return visitSRA(N);
1536 case ISD::SRL: return visitSRL(N);
1537 case ISD::ROTR:
1538 case ISD::ROTL: return visitRotate(N);
1539 case ISD::ABS: return visitABS(N);
1540 case ISD::BSWAP: return visitBSWAP(N);
1541 case ISD::BITREVERSE: return visitBITREVERSE(N);
1542 case ISD::CTLZ: return visitCTLZ(N);
1543 case ISD::CTLZ_ZERO_UNDEF: return visitCTLZ_ZERO_UNDEF(N);
1544 case ISD::CTTZ: return visitCTTZ(N);
1545 case ISD::CTTZ_ZERO_UNDEF: return visitCTTZ_ZERO_UNDEF(N);
1546 case ISD::CTPOP: return visitCTPOP(N);
1547 case ISD::SELECT: return visitSELECT(N);
1548 case ISD::VSELECT: return visitVSELECT(N);
1549 case ISD::SELECT_CC: return visitSELECT_CC(N);
1550 case ISD::SETCC: return visitSETCC(N);
1551 case ISD::SETCCCARRY: return visitSETCCCARRY(N);
1552 case ISD::SIGN_EXTEND: return visitSIGN_EXTEND(N);
1553 case ISD::ZERO_EXTEND: return visitZERO_EXTEND(N);
1554 case ISD::ANY_EXTEND: return visitANY_EXTEND(N);
1555 case ISD::AssertSext:
1556 case ISD::AssertZext: return visitAssertExt(N);
1557 case ISD::SIGN_EXTEND_INREG: return visitSIGN_EXTEND_INREG(N);
1558 case ISD::SIGN_EXTEND_VECTOR_INREG: return visitSIGN_EXTEND_VECTOR_INREG(N);
1559 case ISD::ZERO_EXTEND_VECTOR_INREG: return visitZERO_EXTEND_VECTOR_INREG(N);
1560 case ISD::TRUNCATE: return visitTRUNCATE(N);
1561 case ISD::BITCAST: return visitBITCAST(N);
1562 case ISD::BUILD_PAIR: return visitBUILD_PAIR(N);
1563 case ISD::FADD: return visitFADD(N);
1564 case ISD::FSUB: return visitFSUB(N);
1565 case ISD::FMUL: return visitFMUL(N);
1566 case ISD::FMA: return visitFMA(N);
1567 case ISD::FDIV: return visitFDIV(N);
1568 case ISD::FREM: return visitFREM(N);
1569 case ISD::FSQRT: return visitFSQRT(N);
1570 case ISD::FCOPYSIGN: return visitFCOPYSIGN(N);
1571 case ISD::SINT_TO_FP: return visitSINT_TO_FP(N);
1572 case ISD::UINT_TO_FP: return visitUINT_TO_FP(N);
1573 case ISD::FP_TO_SINT: return visitFP_TO_SINT(N);
1574 case ISD::FP_TO_UINT: return visitFP_TO_UINT(N);
1575 case ISD::FP_ROUND: return visitFP_ROUND(N);
1576 case ISD::FP_ROUND_INREG: return visitFP_ROUND_INREG(N);
1577 case ISD::FP_EXTEND: return visitFP_EXTEND(N);
1578 case ISD::FNEG: return visitFNEG(N);
1579 case ISD::FABS: return visitFABS(N);
1580 case ISD::FFLOOR: return visitFFLOOR(N);
1581 case ISD::FMINNUM: return visitFMINNUM(N);
1582 case ISD::FMAXNUM: return visitFMAXNUM(N);
1583 case ISD::FCEIL: return visitFCEIL(N);
1584 case ISD::FTRUNC: return visitFTRUNC(N);
1585 case ISD::BRCOND: return visitBRCOND(N);
1586 case ISD::BR_CC: return visitBR_CC(N);
1587 case ISD::LOAD: return visitLOAD(N);
1588 case ISD::STORE: return visitSTORE(N);
1589 case ISD::INSERT_VECTOR_ELT: return visitINSERT_VECTOR_ELT(N);
1590 case ISD::EXTRACT_VECTOR_ELT: return visitEXTRACT_VECTOR_ELT(N);
1591 case ISD::BUILD_VECTOR: return visitBUILD_VECTOR(N);
1592 case ISD::CONCAT_VECTORS: return visitCONCAT_VECTORS(N);
1593 case ISD::EXTRACT_SUBVECTOR: return visitEXTRACT_SUBVECTOR(N);
1594 case ISD::VECTOR_SHUFFLE: return visitVECTOR_SHUFFLE(N);
1595 case ISD::SCALAR_TO_VECTOR: return visitSCALAR_TO_VECTOR(N);
1596 case ISD::INSERT_SUBVECTOR: return visitINSERT_SUBVECTOR(N);
1597 case ISD::MGATHER: return visitMGATHER(N);
1598 case ISD::MLOAD: return visitMLOAD(N);
1599 case ISD::MSCATTER: return visitMSCATTER(N);
1600 case ISD::MSTORE: return visitMSTORE(N);
1601 case ISD::FP_TO_FP16: return visitFP_TO_FP16(N);
1602 case ISD::FP16_TO_FP: return visitFP16_TO_FP(N);
1603 }
1604 return SDValue();
1605}
1606
1607SDValue DAGCombiner::combine(SDNode *N) {
1608 SDValue RV = visit(N);
1609
1610 // If nothing happened, try a target-specific DAG combine.
1611 if (!RV.getNode()) {
1612 assert(N->getOpcode() != ISD::DELETED_NODE &&(static_cast <bool> (N->getOpcode() != ISD::DELETED_NODE
&& "Node was deleted but visit returned NULL!") ? void
(0) : __assert_fail ("N->getOpcode() != ISD::DELETED_NODE && \"Node was deleted but visit returned NULL!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1613, __extension__ __PRETTY_FUNCTION__))
1613 "Node was deleted but visit returned NULL!")(static_cast <bool> (N->getOpcode() != ISD::DELETED_NODE
&& "Node was deleted but visit returned NULL!") ? void
(0) : __assert_fail ("N->getOpcode() != ISD::DELETED_NODE && \"Node was deleted but visit returned NULL!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1613, __extension__ __PRETTY_FUNCTION__))
;
1614
1615 if (N->getOpcode() >= ISD::BUILTIN_OP_END ||
1616 TLI.hasTargetDAGCombine((ISD::NodeType)N->getOpcode())) {
1617
1618 // Expose the DAG combiner to the target combiner impls.
1619 TargetLowering::DAGCombinerInfo
1620 DagCombineInfo(DAG, Level, false, this);
1621
1622 RV = TLI.PerformDAGCombine(N, DagCombineInfo);
1623 }
1624 }
1625
1626 // If nothing happened still, try promoting the operation.
1627 if (!RV.getNode()) {
1628 switch (N->getOpcode()) {
1629 default: break;
1630 case ISD::ADD:
1631 case ISD::SUB:
1632 case ISD::MUL:
1633 case ISD::AND:
1634 case ISD::OR:
1635 case ISD::XOR:
1636 RV = PromoteIntBinOp(SDValue(N, 0));
1637 break;
1638 case ISD::SHL:
1639 case ISD::SRA:
1640 case ISD::SRL:
1641 RV = PromoteIntShiftOp(SDValue(N, 0));
1642 break;
1643 case ISD::SIGN_EXTEND:
1644 case ISD::ZERO_EXTEND:
1645 case ISD::ANY_EXTEND:
1646 RV = PromoteExtend(SDValue(N, 0));
1647 break;
1648 case ISD::LOAD:
1649 if (PromoteLoad(SDValue(N, 0)))
1650 RV = SDValue(N, 0);
1651 break;
1652 }
1653 }
1654
1655 // If N is a commutative binary node, try eliminate it if the commuted
1656 // version is already present in the DAG.
1657 if (!RV.getNode() && TLI.isCommutativeBinOp(N->getOpcode()) &&
1658 N->getNumValues() == 1) {
1659 SDValue N0 = N->getOperand(0);
1660 SDValue N1 = N->getOperand(1);
1661
1662 // Constant operands are canonicalized to RHS.
1663 if (N0 != N1 && (isa<ConstantSDNode>(N0) || !isa<ConstantSDNode>(N1))) {
1664 SDValue Ops[] = {N1, N0};
1665 SDNode *CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), Ops,
1666 N->getFlags());
1667 if (CSENode)
1668 return SDValue(CSENode, 0);
1669 }
1670 }
1671
1672 return RV;
1673}
1674
1675/// Given a node, return its input chain if it has one, otherwise return a null
1676/// sd operand.
1677static SDValue getInputChainForNode(SDNode *N) {
1678 if (unsigned NumOps = N->getNumOperands()) {
1679 if (N->getOperand(0).getValueType() == MVT::Other)
1680 return N->getOperand(0);
1681 if (N->getOperand(NumOps-1).getValueType() == MVT::Other)
1682 return N->getOperand(NumOps-1);
1683 for (unsigned i = 1; i < NumOps-1; ++i)
1684 if (N->getOperand(i).getValueType() == MVT::Other)
1685 return N->getOperand(i);
1686 }
1687 return SDValue();
1688}
1689
1690SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
1691 // If N has two operands, where one has an input chain equal to the other,
1692 // the 'other' chain is redundant.
1693 if (N->getNumOperands() == 2) {
1694 if (getInputChainForNode(N->getOperand(0).getNode()) == N->getOperand(1))
1695 return N->getOperand(0);
1696 if (getInputChainForNode(N->getOperand(1).getNode()) == N->getOperand(0))
1697 return N->getOperand(1);
1698 }
1699
1700 // Don't simplify token factors if optnone.
1701 if (OptLevel == CodeGenOpt::None)
1702 return SDValue();
1703
1704 SmallVector<SDNode *, 8> TFs; // List of token factors to visit.
1705 SmallVector<SDValue, 8> Ops; // Ops for replacing token factor.
1706 SmallPtrSet<SDNode*, 16> SeenOps;
1707 bool Changed = false; // If we should replace this token factor.
1708
1709 // Start out with this token factor.
1710 TFs.push_back(N);
1711
1712 // Iterate through token factors. The TFs grows when new token factors are
1713 // encountered.
1714 for (unsigned i = 0; i < TFs.size(); ++i) {
1715 SDNode *TF = TFs[i];
1716
1717 // Check each of the operands.
1718 for (const SDValue &Op : TF->op_values()) {
1719 switch (Op.getOpcode()) {
1720 case ISD::EntryToken:
1721 // Entry tokens don't need to be added to the list. They are
1722 // redundant.
1723 Changed = true;
1724 break;
1725
1726 case ISD::TokenFactor:
1727 if (Op.hasOneUse() && !is_contained(TFs, Op.getNode())) {
1728 // Queue up for processing.
1729 TFs.push_back(Op.getNode());
1730 // Clean up in case the token factor is removed.
1731 AddToWorklist(Op.getNode());
1732 Changed = true;
1733 break;
1734 }
1735 LLVM_FALLTHROUGH[[clang::fallthrough]];
1736
1737 default:
1738 // Only add if it isn't already in the list.
1739 if (SeenOps.insert(Op.getNode()).second)
1740 Ops.push_back(Op);
1741 else
1742 Changed = true;
1743 break;
1744 }
1745 }
1746 }
1747
1748 // Remove Nodes that are chained to another node in the list. Do so
1749 // by walking up chains breath-first stopping when we've seen
1750 // another operand. In general we must climb to the EntryNode, but we can exit
1751 // early if we find all remaining work is associated with just one operand as
1752 // no further pruning is possible.
1753
1754 // List of nodes to search through and original Ops from which they originate.
1755 SmallVector<std::pair<SDNode *, unsigned>, 8> Worklist;
1756 SmallVector<unsigned, 8> OpWorkCount; // Count of work for each Op.
1757 SmallPtrSet<SDNode *, 16> SeenChains;
1758 bool DidPruneOps = false;
1759
1760 unsigned NumLeftToConsider = 0;
1761 for (const SDValue &Op : Ops) {
1762 Worklist.push_back(std::make_pair(Op.getNode(), NumLeftToConsider++));
1763 OpWorkCount.push_back(1);
1764 }
1765
1766 auto AddToWorklist = [&](unsigned CurIdx, SDNode *Op, unsigned OpNumber) {
1767 // If this is an Op, we can remove the op from the list. Remark any
1768 // search associated with it as from the current OpNumber.
1769 if (SeenOps.count(Op) != 0) {
1770 Changed = true;
1771 DidPruneOps = true;
1772 unsigned OrigOpNumber = 0;
1773 while (OrigOpNumber < Ops.size() && Ops[OrigOpNumber].getNode() != Op)
1774 OrigOpNumber++;
1775 assert((OrigOpNumber != Ops.size()) &&(static_cast <bool> ((OrigOpNumber != Ops.size()) &&
"expected to find TokenFactor Operand") ? void (0) : __assert_fail
("(OrigOpNumber != Ops.size()) && \"expected to find TokenFactor Operand\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1776, __extension__ __PRETTY_FUNCTION__))
1776 "expected to find TokenFactor Operand")(static_cast <bool> ((OrigOpNumber != Ops.size()) &&
"expected to find TokenFactor Operand") ? void (0) : __assert_fail
("(OrigOpNumber != Ops.size()) && \"expected to find TokenFactor Operand\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1776, __extension__ __PRETTY_FUNCTION__))
;
1777 // Re-mark worklist from OrigOpNumber to OpNumber
1778 for (unsigned i = CurIdx + 1; i < Worklist.size(); ++i) {
1779 if (Worklist[i].second == OrigOpNumber) {
1780 Worklist[i].second = OpNumber;
1781 }
1782 }
1783 OpWorkCount[OpNumber] += OpWorkCount[OrigOpNumber];
1784 OpWorkCount[OrigOpNumber] = 0;
1785 NumLeftToConsider--;
1786 }
1787 // Add if it's a new chain
1788 if (SeenChains.insert(Op).second) {
1789 OpWorkCount[OpNumber]++;
1790 Worklist.push_back(std::make_pair(Op, OpNumber));
1791 }
1792 };
1793
1794 for (unsigned i = 0; i < Worklist.size() && i < 1024; ++i) {
1795 // We need at least be consider at least 2 Ops to prune.
1796 if (NumLeftToConsider <= 1)
1797 break;
1798 auto CurNode = Worklist[i].first;
1799 auto CurOpNumber = Worklist[i].second;
1800 assert((OpWorkCount[CurOpNumber] > 0) &&(static_cast <bool> ((OpWorkCount[CurOpNumber] > 0) &&
"Node should not appear in worklist") ? void (0) : __assert_fail
("(OpWorkCount[CurOpNumber] > 0) && \"Node should not appear in worklist\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1801, __extension__ __PRETTY_FUNCTION__))
1801 "Node should not appear in worklist")(static_cast <bool> ((OpWorkCount[CurOpNumber] > 0) &&
"Node should not appear in worklist") ? void (0) : __assert_fail
("(OpWorkCount[CurOpNumber] > 0) && \"Node should not appear in worklist\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1801, __extension__ __PRETTY_FUNCTION__))
;
1802 switch (CurNode->getOpcode()) {
1803 case ISD::EntryToken:
1804 // Hitting EntryToken is the only way for the search to terminate without
1805 // hitting
1806 // another operand's search. Prevent us from marking this operand
1807 // considered.
1808 NumLeftToConsider++;
1809 break;
1810 case ISD::TokenFactor:
1811 for (const SDValue &Op : CurNode->op_values())
1812 AddToWorklist(i, Op.getNode(), CurOpNumber);
1813 break;
1814 case ISD::CopyFromReg:
1815 case ISD::CopyToReg:
1816 AddToWorklist(i, CurNode->getOperand(0).getNode(), CurOpNumber);
1817 break;
1818 default:
1819 if (auto *MemNode = dyn_cast<MemSDNode>(CurNode))
1820 AddToWorklist(i, MemNode->getChain().getNode(), CurOpNumber);
1821 break;
1822 }
1823 OpWorkCount[CurOpNumber]--;
1824 if (OpWorkCount[CurOpNumber] == 0)
1825 NumLeftToConsider--;
1826 }
1827
1828 // If we've changed things around then replace token factor.
1829 if (Changed) {
1830 SDValue Result;
1831 if (Ops.empty()) {
1832 // The entry token is the only possible outcome.
1833 Result = DAG.getEntryNode();
1834 } else {
1835 if (DidPruneOps) {
1836 SmallVector<SDValue, 8> PrunedOps;
1837 //
1838 for (const SDValue &Op : Ops) {
1839 if (SeenChains.count(Op.getNode()) == 0)
1840 PrunedOps.push_back(Op);
1841 }
1842 Result = DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, PrunedOps);
1843 } else {
1844 Result = DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Ops);
1845 }
1846 }
1847 return Result;
1848 }
1849 return SDValue();
1850}
1851
1852/// MERGE_VALUES can always be eliminated.
1853SDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) {
1854 WorklistRemover DeadNodes(*this);
1855 // Replacing results may cause a different MERGE_VALUES to suddenly
1856 // be CSE'd with N, and carry its uses with it. Iterate until no
1857 // uses remain, to ensure that the node can be safely deleted.
1858 // First add the users of this node to the work list so that they
1859 // can be tried again once they have new operands.
1860 AddUsersToWorklist(N);
1861 do {
1862 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
1863 DAG.ReplaceAllUsesOfValueWith(SDValue(N, i), N->getOperand(i));
1864 } while (!N->use_empty());
1865 deleteAndRecombine(N);
1866 return SDValue(N, 0); // Return N so it doesn't get rechecked!
1867}
1868
1869/// If \p N is a ConstantSDNode with isOpaque() == false return it casted to a
1870/// ConstantSDNode pointer else nullptr.
1871static ConstantSDNode *getAsNonOpaqueConstant(SDValue N) {
1872 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N);
1873 return Const != nullptr && !Const->isOpaque() ? Const : nullptr;
1874}
1875
1876SDValue DAGCombiner::foldBinOpIntoSelect(SDNode *BO) {
1877 auto BinOpcode = BO->getOpcode();
1878 assert((BinOpcode == ISD::ADD || BinOpcode == ISD::SUB ||(static_cast <bool> ((BinOpcode == ISD::ADD || BinOpcode
== ISD::SUB || BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV
|| BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM || BinOpcode
== ISD::UREM || BinOpcode == ISD::AND || BinOpcode == ISD::OR
|| BinOpcode == ISD::XOR || BinOpcode == ISD::SHL || BinOpcode
== ISD::SRL || BinOpcode == ISD::SRA || BinOpcode == ISD::FADD
|| BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL || BinOpcode
== ISD::FDIV || BinOpcode == ISD::FREM) && "Unexpected binary operator"
) ? void (0) : __assert_fail ("(BinOpcode == ISD::ADD || BinOpcode == ISD::SUB || BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV || BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM || BinOpcode == ISD::UREM || BinOpcode == ISD::AND || BinOpcode == ISD::OR || BinOpcode == ISD::XOR || BinOpcode == ISD::SHL || BinOpcode == ISD::SRL || BinOpcode == ISD::SRA || BinOpcode == ISD::FADD || BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL || BinOpcode == ISD::FDIV || BinOpcode == ISD::FREM) && \"Unexpected binary operator\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1887, __extension__ __PRETTY_FUNCTION__))
1879 BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV ||(static_cast <bool> ((BinOpcode == ISD::ADD || BinOpcode
== ISD::SUB || BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV
|| BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM || BinOpcode
== ISD::UREM || BinOpcode == ISD::AND || BinOpcode == ISD::OR
|| BinOpcode == ISD::XOR || BinOpcode == ISD::SHL || BinOpcode
== ISD::SRL || BinOpcode == ISD::SRA || BinOpcode == ISD::FADD
|| BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL || BinOpcode
== ISD::FDIV || BinOpcode == ISD::FREM) && "Unexpected binary operator"
) ? void (0) : __assert_fail ("(BinOpcode == ISD::ADD || BinOpcode == ISD::SUB || BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV || BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM || BinOpcode == ISD::UREM || BinOpcode == ISD::AND || BinOpcode == ISD::OR || BinOpcode == ISD::XOR || BinOpcode == ISD::SHL || BinOpcode == ISD::SRL || BinOpcode == ISD::SRA || BinOpcode == ISD::FADD || BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL || BinOpcode == ISD::FDIV || BinOpcode == ISD::FREM) && \"Unexpected binary operator\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1887, __extension__ __PRETTY_FUNCTION__))
1880 BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM ||(static_cast <bool> ((BinOpcode == ISD::ADD || BinOpcode
== ISD::SUB || BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV
|| BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM || BinOpcode
== ISD::UREM || BinOpcode == ISD::AND || BinOpcode == ISD::OR
|| BinOpcode == ISD::XOR || BinOpcode == ISD::SHL || BinOpcode
== ISD::SRL || BinOpcode == ISD::SRA || BinOpcode == ISD::FADD
|| BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL || BinOpcode
== ISD::FDIV || BinOpcode == ISD::FREM) && "Unexpected binary operator"
) ? void (0) : __assert_fail ("(BinOpcode == ISD::ADD || BinOpcode == ISD::SUB || BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV || BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM || BinOpcode == ISD::UREM || BinOpcode == ISD::AND || BinOpcode == ISD::OR || BinOpcode == ISD::XOR || BinOpcode == ISD::SHL || BinOpcode == ISD::SRL || BinOpcode == ISD::SRA || BinOpcode == ISD::FADD || BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL || BinOpcode == ISD::FDIV || BinOpcode == ISD::FREM) && \"Unexpected binary operator\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1887, __extension__ __PRETTY_FUNCTION__))
1881 BinOpcode == ISD::UREM || BinOpcode == ISD::AND ||(static_cast <bool> ((BinOpcode == ISD::ADD || BinOpcode
== ISD::SUB || BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV
|| BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM || BinOpcode
== ISD::UREM || BinOpcode == ISD::AND || BinOpcode == ISD::OR
|| BinOpcode == ISD::XOR || BinOpcode == ISD::SHL || BinOpcode
== ISD::SRL || BinOpcode == ISD::SRA || BinOpcode == ISD::FADD
|| BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL || BinOpcode
== ISD::FDIV || BinOpcode == ISD::FREM) && "Unexpected binary operator"
) ? void (0) : __assert_fail ("(BinOpcode == ISD::ADD || BinOpcode == ISD::SUB || BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV || BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM || BinOpcode == ISD::UREM || BinOpcode == ISD::AND || BinOpcode == ISD::OR || BinOpcode == ISD::XOR || BinOpcode == ISD::SHL || BinOpcode == ISD::SRL || BinOpcode == ISD::SRA || BinOpcode == ISD::FADD || BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL || BinOpcode == ISD::FDIV || BinOpcode == ISD::FREM) && \"Unexpected binary operator\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1887, __extension__ __PRETTY_FUNCTION__))
1882 BinOpcode == ISD::OR || BinOpcode == ISD::XOR ||(static_cast <bool> ((BinOpcode == ISD::ADD || BinOpcode
== ISD::SUB || BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV
|| BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM || BinOpcode
== ISD::UREM || BinOpcode == ISD::AND || BinOpcode == ISD::OR
|| BinOpcode == ISD::XOR || BinOpcode == ISD::SHL || BinOpcode
== ISD::SRL || BinOpcode == ISD::SRA || BinOpcode == ISD::FADD
|| BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL || BinOpcode
== ISD::FDIV || BinOpcode == ISD::FREM) && "Unexpected binary operator"
) ? void (0) : __assert_fail ("(BinOpcode == ISD::ADD || BinOpcode == ISD::SUB || BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV || BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM || BinOpcode == ISD::UREM || BinOpcode == ISD::AND || BinOpcode == ISD::OR || BinOpcode == ISD::XOR || BinOpcode == ISD::SHL || BinOpcode == ISD::SRL || BinOpcode == ISD::SRA || BinOpcode == ISD::FADD || BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL || BinOpcode == ISD::FDIV || BinOpcode == ISD::FREM) && \"Unexpected binary operator\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1887, __extension__ __PRETTY_FUNCTION__))
1883 BinOpcode == ISD::SHL || BinOpcode == ISD::SRL ||(static_cast <bool> ((BinOpcode == ISD::ADD || BinOpcode
== ISD::SUB || BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV
|| BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM || BinOpcode
== ISD::UREM || BinOpcode == ISD::AND || BinOpcode == ISD::OR
|| BinOpcode == ISD::XOR || BinOpcode == ISD::SHL || BinOpcode
== ISD::SRL || BinOpcode == ISD::SRA || BinOpcode == ISD::FADD
|| BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL || BinOpcode
== ISD::FDIV || BinOpcode == ISD::FREM) && "Unexpected binary operator"
) ? void (0) : __assert_fail ("(BinOpcode == ISD::ADD || BinOpcode == ISD::SUB || BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV || BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM || BinOpcode == ISD::UREM || BinOpcode == ISD::AND || BinOpcode == ISD::OR || BinOpcode == ISD::XOR || BinOpcode == ISD::SHL || BinOpcode == ISD::SRL || BinOpcode == ISD::SRA || BinOpcode == ISD::FADD || BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL || BinOpcode == ISD::FDIV || BinOpcode == ISD::FREM) && \"Unexpected binary operator\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1887, __extension__ __PRETTY_FUNCTION__))
1884 BinOpcode == ISD::SRA || BinOpcode == ISD::FADD ||(static_cast <bool> ((BinOpcode == ISD::ADD || BinOpcode
== ISD::SUB || BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV
|| BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM || BinOpcode
== ISD::UREM || BinOpcode == ISD::AND || BinOpcode == ISD::OR
|| BinOpcode == ISD::XOR || BinOpcode == ISD::SHL || BinOpcode
== ISD::SRL || BinOpcode == ISD::SRA || BinOpcode == ISD::FADD
|| BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL || BinOpcode
== ISD::FDIV || BinOpcode == ISD::FREM) && "Unexpected binary operator"
) ? void (0) : __assert_fail ("(BinOpcode == ISD::ADD || BinOpcode == ISD::SUB || BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV || BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM || BinOpcode == ISD::UREM || BinOpcode == ISD::AND || BinOpcode == ISD::OR || BinOpcode == ISD::XOR || BinOpcode == ISD::SHL || BinOpcode == ISD::SRL || BinOpcode == ISD::SRA || BinOpcode == ISD::FADD || BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL || BinOpcode == ISD::FDIV || BinOpcode == ISD::FREM) && \"Unexpected binary operator\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1887, __extension__ __PRETTY_FUNCTION__))
1885 BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL ||(static_cast <bool> ((BinOpcode == ISD::ADD || BinOpcode
== ISD::SUB || BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV
|| BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM || BinOpcode
== ISD::UREM || BinOpcode == ISD::AND || BinOpcode == ISD::OR
|| BinOpcode == ISD::XOR || BinOpcode == ISD::SHL || BinOpcode
== ISD::SRL || BinOpcode == ISD::SRA || BinOpcode == ISD::FADD
|| BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL || BinOpcode
== ISD::FDIV || BinOpcode == ISD::FREM) && "Unexpected binary operator"
) ? void (0) : __assert_fail ("(BinOpcode == ISD::ADD || BinOpcode == ISD::SUB || BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV || BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM || BinOpcode == ISD::UREM || BinOpcode == ISD::AND || BinOpcode == ISD::OR || BinOpcode == ISD::XOR || BinOpcode == ISD::SHL || BinOpcode == ISD::SRL || BinOpcode == ISD::SRA || BinOpcode == ISD::FADD || BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL || BinOpcode == ISD::FDIV || BinOpcode == ISD::FREM) && \"Unexpected binary operator\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1887, __extension__ __PRETTY_FUNCTION__))
1886 BinOpcode == ISD::FDIV || BinOpcode == ISD::FREM) &&(static_cast <bool> ((BinOpcode == ISD::ADD || BinOpcode
== ISD::SUB || BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV
|| BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM || BinOpcode
== ISD::UREM || BinOpcode == ISD::AND || BinOpcode == ISD::OR
|| BinOpcode == ISD::XOR || BinOpcode == ISD::SHL || BinOpcode
== ISD::SRL || BinOpcode == ISD::SRA || BinOpcode == ISD::FADD
|| BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL || BinOpcode
== ISD::FDIV || BinOpcode == ISD::FREM) && "Unexpected binary operator"
) ? void (0) : __assert_fail ("(BinOpcode == ISD::ADD || BinOpcode == ISD::SUB || BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV || BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM || BinOpcode == ISD::UREM || BinOpcode == ISD::AND || BinOpcode == ISD::OR || BinOpcode == ISD::XOR || BinOpcode == ISD::SHL || BinOpcode == ISD::SRL || BinOpcode == ISD::SRA || BinOpcode == ISD::FADD || BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL || BinOpcode == ISD::FDIV || BinOpcode == ISD::FREM) && \"Unexpected binary operator\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1887, __extension__ __PRETTY_FUNCTION__))
1887 "Unexpected binary operator")(static_cast <bool> ((BinOpcode == ISD::ADD || BinOpcode
== ISD::SUB || BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV
|| BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM || BinOpcode
== ISD::UREM || BinOpcode == ISD::AND || BinOpcode == ISD::OR
|| BinOpcode == ISD::XOR || BinOpcode == ISD::SHL || BinOpcode
== ISD::SRL || BinOpcode == ISD::SRA || BinOpcode == ISD::FADD
|| BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL || BinOpcode
== ISD::FDIV || BinOpcode == ISD::FREM) && "Unexpected binary operator"
) ? void (0) : __assert_fail ("(BinOpcode == ISD::ADD || BinOpcode == ISD::SUB || BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV || BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM || BinOpcode == ISD::UREM || BinOpcode == ISD::AND || BinOpcode == ISD::OR || BinOpcode == ISD::XOR || BinOpcode == ISD::SHL || BinOpcode == ISD::SRL || BinOpcode == ISD::SRA || BinOpcode == ISD::FADD || BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL || BinOpcode == ISD::FDIV || BinOpcode == ISD::FREM) && \"Unexpected binary operator\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1887, __extension__ __PRETTY_FUNCTION__))
;
1888
1889 // Don't do this unless the old select is going away. We want to eliminate the
1890 // binary operator, not replace a binop with a select.
1891 // TODO: Handle ISD::SELECT_CC.
1892 unsigned SelOpNo = 0;
1893 SDValue Sel = BO->getOperand(0);
1894 if (Sel.getOpcode() != ISD::SELECT || !Sel.hasOneUse()) {
1895 SelOpNo = 1;
1896 Sel = BO->getOperand(1);
1897 }
1898
1899 if (Sel.getOpcode() != ISD::SELECT || !Sel.hasOneUse())
1900 return SDValue();
1901
1902 SDValue CT = Sel.getOperand(1);
1903 if (!isConstantOrConstantVector(CT, true) &&
1904 !isConstantFPBuildVectorOrConstantFP(CT))
1905 return SDValue();
1906
1907 SDValue CF = Sel.getOperand(2);
1908 if (!isConstantOrConstantVector(CF, true) &&
1909 !isConstantFPBuildVectorOrConstantFP(CF))
1910 return SDValue();
1911
1912 // Bail out if any constants are opaque because we can't constant fold those.
1913 // The exception is "and" and "or" with either 0 or -1 in which case we can
1914 // propagate non constant operands into select. I.e.:
1915 // and (select Cond, 0, -1), X --> select Cond, 0, X
1916 // or X, (select Cond, -1, 0) --> select Cond, -1, X
1917 bool CanFoldNonConst = (BinOpcode == ISD::AND || BinOpcode == ISD::OR) &&
1918 (isNullConstantOrNullSplatConstant(CT) ||
1919 isAllOnesConstantOrAllOnesSplatConstant(CT)) &&
1920 (isNullConstantOrNullSplatConstant(CF) ||
1921 isAllOnesConstantOrAllOnesSplatConstant(CF));
1922
1923 SDValue CBO = BO->getOperand(SelOpNo ^ 1);
1924 if (!CanFoldNonConst &&
1925 !isConstantOrConstantVector(CBO, true) &&
1926 !isConstantFPBuildVectorOrConstantFP(CBO))
1927 return SDValue();
1928
1929 EVT VT = Sel.getValueType();
1930
1931 // In case of shift value and shift amount may have different VT. For instance
1932 // on x86 shift amount is i8 regardles of LHS type. Bail out if we have
1933 // swapped operands and value types do not match. NB: x86 is fine if operands
1934 // are not swapped with shift amount VT being not bigger than shifted value.
1935 // TODO: that is possible to check for a shift operation, correct VTs and
1936 // still perform optimization on x86 if needed.
1937 if (SelOpNo && VT != CBO.getValueType())
1938 return SDValue();
1939
1940 // We have a select-of-constants followed by a binary operator with a
1941 // constant. Eliminate the binop by pulling the constant math into the select.
1942 // Example: add (select Cond, CT, CF), CBO --> select Cond, CT + CBO, CF + CBO
1943 SDLoc DL(Sel);
1944 SDValue NewCT = SelOpNo ? DAG.getNode(BinOpcode, DL, VT, CBO, CT)
1945 : DAG.getNode(BinOpcode, DL, VT, CT, CBO);
1946 if (!CanFoldNonConst && !NewCT.isUndef() &&
1947 !isConstantOrConstantVector(NewCT, true) &&
1948 !isConstantFPBuildVectorOrConstantFP(NewCT))
1949 return SDValue();
1950
1951 SDValue NewCF = SelOpNo ? DAG.getNode(BinOpcode, DL, VT, CBO, CF)
1952 : DAG.getNode(BinOpcode, DL, VT, CF, CBO);
1953 if (!CanFoldNonConst && !NewCF.isUndef() &&
1954 !isConstantOrConstantVector(NewCF, true) &&
1955 !isConstantFPBuildVectorOrConstantFP(NewCF))
1956 return SDValue();
1957
1958 return DAG.getSelect(DL, VT, Sel.getOperand(0), NewCT, NewCF);
1959}
1960
1961static SDValue foldAddSubBoolOfMaskedVal(SDNode *N, SelectionDAG &DAG) {
1962 assert((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&(static_cast <bool> ((N->getOpcode() == ISD::ADD || N
->getOpcode() == ISD::SUB) && "Expecting add or sub"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && \"Expecting add or sub\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1963, __extension__ __PRETTY_FUNCTION__))
1963 "Expecting add or sub")(static_cast <bool> ((N->getOpcode() == ISD::ADD || N
->getOpcode() == ISD::SUB) && "Expecting add or sub"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && \"Expecting add or sub\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1963, __extension__ __PRETTY_FUNCTION__))
;
1964
1965 // Match a constant operand and a zext operand for the math instruction:
1966 // add Z, C
1967 // sub C, Z
1968 bool IsAdd = N->getOpcode() == ISD::ADD;
1969 SDValue C = IsAdd ? N->getOperand(1) : N->getOperand(0);
1970 SDValue Z = IsAdd ? N->getOperand(0) : N->getOperand(1);
1971 auto *CN = dyn_cast<ConstantSDNode>(C);
1972 if (!CN || Z.getOpcode() != ISD::ZERO_EXTEND)
1973 return SDValue();
1974
1975 // Match the zext operand as a setcc of a boolean.
1976 if (Z.getOperand(0).getOpcode() != ISD::SETCC ||
1977 Z.getOperand(0).getValueType() != MVT::i1)
1978 return SDValue();
1979
1980 // Match the compare as: setcc (X & 1), 0, eq.
1981 SDValue SetCC = Z.getOperand(0);
1982 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
1983 if (CC != ISD::SETEQ || !isNullConstant(SetCC.getOperand(1)) ||
1984 SetCC.getOperand(0).getOpcode() != ISD::AND ||
1985 !isOneConstant(SetCC.getOperand(0).getOperand(1)))
1986 return SDValue();
1987
1988 // We are adding/subtracting a constant and an inverted low bit. Turn that
1989 // into a subtract/add of the low bit with incremented/decremented constant:
1990 // add (zext i1 (seteq (X & 1), 0)), C --> sub C+1, (zext (X & 1))
1991 // sub C, (zext i1 (seteq (X & 1), 0)) --> add C-1, (zext (X & 1))
1992 EVT VT = C.getValueType();
1993 SDLoc DL(N);
1994 SDValue LowBit = DAG.getZExtOrTrunc(SetCC.getOperand(0), DL, VT);
1995 SDValue C1 = IsAdd ? DAG.getConstant(CN->getAPIntValue() + 1, DL, VT) :
1996 DAG.getConstant(CN->getAPIntValue() - 1, DL, VT);
1997 return DAG.getNode(IsAdd ? ISD::SUB : ISD::ADD, DL, VT, C1, LowBit);
1998}
1999
2000/// Try to fold a 'not' shifted sign-bit with add/sub with constant operand into
2001/// a shift and add with a different constant.
2002static SDValue foldAddSubOfSignBit(SDNode *N, SelectionDAG &DAG) {
2003 assert((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&(static_cast <bool> ((N->getOpcode() == ISD::ADD || N
->getOpcode() == ISD::SUB) && "Expecting add or sub"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && \"Expecting add or sub\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 2004, __extension__ __PRETTY_FUNCTION__))
2004 "Expecting add or sub")(static_cast <bool> ((N->getOpcode() == ISD::ADD || N
->getOpcode() == ISD::SUB) && "Expecting add or sub"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && \"Expecting add or sub\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 2004, __extension__ __PRETTY_FUNCTION__))
;
2005
2006 // We need a constant operand for the add/sub, and the other operand is a
2007 // logical shift right: add (srl), C or sub C, (srl).
2008 bool IsAdd = N->getOpcode() == ISD::ADD;
2009 SDValue ConstantOp = IsAdd ? N->getOperand(1) : N->getOperand(0);
2010 SDValue ShiftOp = IsAdd ? N->getOperand(0) : N->getOperand(1);
2011 ConstantSDNode *C = isConstOrConstSplat(ConstantOp);
2012 if (!C || ShiftOp.getOpcode() != ISD::SRL)
2013 return SDValue();
2014
2015 // The shift must be of a 'not' value.
2016 // TODO: Use isBitwiseNot() if it works with vectors.
2017 SDValue Not = ShiftOp.getOperand(0);
2018 if (!Not.hasOneUse() || Not.getOpcode() != ISD::XOR ||
2019 !isAllOnesConstantOrAllOnesSplatConstant(Not.getOperand(1)))
2020 return SDValue();
2021
2022 // The shift must be moving the sign bit to the least-significant-bit.
2023 EVT VT = ShiftOp.getValueType();
2024 SDValue ShAmt = ShiftOp.getOperand(1);
2025 ConstantSDNode *ShAmtC = isConstOrConstSplat(ShAmt);
2026 if (!ShAmtC || ShAmtC->getZExtValue() != VT.getScalarSizeInBits() - 1)
2027 return SDValue();
2028
2029 // Eliminate the 'not' by adjusting the shift and add/sub constant:
2030 // add (srl (not X), 31), C --> add (sra X, 31), (C + 1)
2031 // sub C, (srl (not X), 31) --> add (srl X, 31), (C - 1)
2032 SDLoc DL(N);
2033 auto ShOpcode = IsAdd ? ISD::SRA : ISD::SRL;
2034 SDValue NewShift = DAG.getNode(ShOpcode, DL, VT, Not.getOperand(0), ShAmt);
2035 APInt NewC = IsAdd ? C->getAPIntValue() + 1 : C->getAPIntValue() - 1;
2036 return DAG.getNode(ISD::ADD, DL, VT, NewShift, DAG.getConstant(NewC, DL, VT));
2037}
2038
2039SDValue DAGCombiner::visitADD(SDNode *N) {
2040 SDValue N0 = N->getOperand(0);
2041 SDValue N1 = N->getOperand(1);
2042 EVT VT = N0.getValueType();
2043 SDLoc DL(N);
2044
2045 // fold vector ops
2046 if (VT.isVector()) {
2047 if (SDValue FoldedVOp = SimplifyVBinOp(N))
2048 return FoldedVOp;
2049
2050 // fold (add x, 0) -> x, vector edition
2051 if (ISD::isBuildVectorAllZeros(N1.getNode()))
2052 return N0;
2053 if (ISD::isBuildVectorAllZeros(N0.getNode()))
2054 return N1;
2055 }
2056
2057 // fold (add x, undef) -> undef
2058 if (N0.isUndef())
2059 return N0;
2060
2061 if (N1.isUndef())
2062 return N1;
2063
2064 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) {
2065 // canonicalize constant to RHS
2066 if (!DAG.isConstantIntBuildVectorOrConstantInt(N1))
2067 return DAG.getNode(ISD::ADD, DL, VT, N1, N0);
2068 // fold (add c1, c2) -> c1+c2
2069 return DAG.FoldConstantArithmetic(ISD::ADD, DL, VT, N0.getNode(),
2070 N1.getNode());
2071 }
2072
2073 // fold (add x, 0) -> x
2074 if (isNullConstant(N1))
2075 return N0;
2076
2077 if (isConstantOrConstantVector(N1, /* NoOpaque */ true)) {
2078 // fold ((c1-A)+c2) -> (c1+c2)-A
2079 if (N0.getOpcode() == ISD::SUB &&
2080 isConstantOrConstantVector(N0.getOperand(0), /* NoOpaque */ true)) {
2081 // FIXME: Adding 2 constants should be handled by FoldConstantArithmetic.
2082 return DAG.getNode(ISD::SUB, DL, VT,
2083 DAG.getNode(ISD::ADD, DL, VT, N1, N0.getOperand(0)),
2084 N0.getOperand(1));
2085 }
2086
2087 // add (sext i1 X), 1 -> zext (not i1 X)
2088 // We don't transform this pattern:
2089 // add (zext i1 X), -1 -> sext (not i1 X)
2090 // because most (?) targets generate better code for the zext form.
2091 if (N0.getOpcode() == ISD::SIGN_EXTEND && N0.hasOneUse() &&
2092 isOneConstantOrOneSplatConstant(N1)) {
2093 SDValue X = N0.getOperand(0);
2094 if ((!LegalOperations ||
2095 (TLI.isOperationLegal(ISD::XOR, X.getValueType()) &&
2096 TLI.isOperationLegal(ISD::ZERO_EXTEND, VT))) &&
2097 X.getScalarValueSizeInBits() == 1) {
2098 SDValue Not = DAG.getNOT(DL, X, X.getValueType());
2099 return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Not);
2100 }
2101 }
2102
2103 // Undo the add -> or combine to merge constant offsets from a frame index.
2104 if (N0.getOpcode() == ISD::OR &&
2105 isa<FrameIndexSDNode>(N0.getOperand(0)) &&
2106 isa<ConstantSDNode>(N0.getOperand(1)) &&
2107 DAG.haveNoCommonBitsSet(N0.getOperand(0), N0.getOperand(1))) {
2108 SDValue Add0 = DAG.getNode(ISD::ADD, DL, VT, N1, N0.getOperand(1));
2109 return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), Add0);
2110 }
2111 }
2112
2113 if (SDValue NewSel = foldBinOpIntoSelect(N))
2114 return NewSel;
2115
2116 // reassociate add
2117 if (SDValue RADD = ReassociateOps(ISD::ADD, DL, N0, N1))
2118 return RADD;
2119
2120 // fold ((0-A) + B) -> B-A
2121 if (N0.getOpcode() == ISD::SUB &&
2122 isNullConstantOrNullSplatConstant(N0.getOperand(0)))
2123 return DAG.getNode(ISD::SUB, DL, VT, N1, N0.getOperand(1));
2124
2125 // fold (A + (0-B)) -> A-B
2126 if (N1.getOpcode() == ISD::SUB &&
2127 isNullConstantOrNullSplatConstant(N1.getOperand(0)))
2128 return DAG.getNode(ISD::SUB, DL, VT, N0, N1.getOperand(1));
2129
2130 // fold (A+(B-A)) -> B
2131 if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(1))
2132 return N1.getOperand(0);
2133
2134 // fold ((B-A)+A) -> B
2135 if (N0.getOpcode() == ISD::SUB && N1 == N0.getOperand(1))
2136 return N0.getOperand(0);
2137
2138 // fold (A+(B-(A+C))) to (B-C)
2139 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD &&
2140 N0 == N1.getOperand(1).getOperand(0))
2141 return DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(0),
2142 N1.getOperand(1).getOperand(1));
2143
2144 // fold (A+(B-(C+A))) to (B-C)
2145 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD &&
2146 N0 == N1.getOperand(1).getOperand(1))
2147 return DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(0),
2148 N1.getOperand(1).getOperand(0));
2149
2150 // fold (A+((B-A)+or-C)) to (B+or-C)
2151 if ((N1.getOpcode() == ISD::SUB || N1.getOpcode() == ISD::ADD) &&
2152 N1.getOperand(0).getOpcode() == ISD::SUB &&
2153 N0 == N1.getOperand(0).getOperand(1))
2154 return DAG.getNode(N1.getOpcode(), DL, VT, N1.getOperand(0).getOperand(0),
2155 N1.getOperand(1));
2156
2157 // fold (A-B)+(C-D) to (A+C)-(B+D) when A or C is constant
2158 if (N0.getOpcode() == ISD::SUB && N1.getOpcode() == ISD::SUB) {
2159 SDValue N00 = N0.getOperand(0);
2160 SDValue N01 = N0.getOperand(1);
2161 SDValue N10 = N1.getOperand(0);
2162 SDValue N11 = N1.getOperand(1);
2163
2164 if (isConstantOrConstantVector(N00) || isConstantOrConstantVector(N10))
2165 return DAG.getNode(ISD::SUB, DL, VT,
2166 DAG.getNode(ISD::ADD, SDLoc(N0), VT, N00, N10),
2167 DAG.getNode(ISD::ADD, SDLoc(N1), VT, N01, N11));
2168 }
2169
2170 if (SDValue V = foldAddSubBoolOfMaskedVal(N, DAG))
2171 return V;
2172
2173 if (SDValue V = foldAddSubOfSignBit(N, DAG))
2174 return V;
2175
2176 if (SimplifyDemandedBits(SDValue(N, 0)))
2177 return SDValue(N, 0);
2178
2179 // fold (a+b) -> (a|b) iff a and b share no bits.
2180 if ((!LegalOperations || TLI.isOperationLegal(ISD::OR, VT)) &&
2181 DAG.haveNoCommonBitsSet(N0, N1))
2182 return DAG.getNode(ISD::OR, DL, VT, N0, N1);
2183
2184 // fold (add (xor a, -1), 1) -> (sub 0, a)
2185 if (isBitwiseNot(N0) && isOneConstantOrOneSplatConstant(N1))
2186 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
2187 N0.getOperand(0));
2188
2189 if (SDValue Combined = visitADDLike(N0, N1, N))
2190 return Combined;
2191
2192 if (SDValue Combined = visitADDLike(N1, N0, N))
2193 return Combined;
2194
2195 return SDValue();
2196}
2197
2198static SDValue getAsCarry(const TargetLowering &TLI, SDValue V) {
2199 bool Masked = false;
2200
2201 // First, peel away TRUNCATE/ZERO_EXTEND/AND nodes due to legalization.
2202 while (true) {
2203 if (V.getOpcode() == ISD::TRUNCATE || V.getOpcode() == ISD::ZERO_EXTEND) {
2204 V = V.getOperand(0);
2205 continue;
2206 }
2207
2208 if (V.getOpcode() == ISD::AND && isOneConstant(V.getOperand(1))) {
2209 Masked = true;
2210 V = V.getOperand(0);
2211 continue;
2212 }
2213
2214 break;
2215 }
2216
2217 // If this is not a carry, return.
2218 if (V.getResNo() != 1)
2219 return SDValue();
2220
2221 if (V.getOpcode() != ISD::ADDCARRY && V.getOpcode() != ISD::SUBCARRY &&
2222 V.getOpcode() != ISD::UADDO && V.getOpcode() != ISD::USUBO)
2223 return SDValue();
2224
2225 // If the result is masked, then no matter what kind of bool it is we can
2226 // return. If it isn't, then we need to make sure the bool type is either 0 or
2227 // 1 and not other values.
2228 if (Masked ||
2229 TLI.getBooleanContents(V.getValueType()) ==
2230 TargetLoweringBase::ZeroOrOneBooleanContent)
2231 return V;
2232
2233 return SDValue();
2234}
2235
2236SDValue DAGCombiner::visitADDLike(SDValue N0, SDValue N1, SDNode *LocReference) {
2237 EVT VT = N0.getValueType();
2238 SDLoc DL(LocReference);
2239
2240 // fold (add x, shl(0 - y, n)) -> sub(x, shl(y, n))
2241 if (N1.getOpcode() == ISD::SHL && N1.getOperand(0).getOpcode() == ISD::SUB &&
2242 isNullConstantOrNullSplatConstant(N1.getOperand(0).getOperand(0)))
2243 return DAG.getNode(ISD::SUB, DL, VT, N0,
2244 DAG.getNode(ISD::SHL, DL, VT,
2245 N1.getOperand(0).getOperand(1),
2246 N1.getOperand(1)));
2247
2248 if (N1.getOpcode() == ISD::AND) {
2249 SDValue AndOp0 = N1.getOperand(0);
2250 unsigned NumSignBits = DAG.ComputeNumSignBits(AndOp0);
2251 unsigned DestBits = VT.getScalarSizeInBits();
2252
2253 // (add z, (and (sbbl x, x), 1)) -> (sub z, (sbbl x, x))
2254 // and similar xforms where the inner op is either ~0 or 0.
2255 if (NumSignBits == DestBits &&
2256 isOneConstantOrOneSplatConstant(N1->getOperand(1)))
2257 return DAG.getNode(ISD::SUB, DL, VT, N0, AndOp0);
2258 }
2259
2260 // add (sext i1), X -> sub X, (zext i1)
2261 if (N0.getOpcode() == ISD::SIGN_EXTEND &&
2262 N0.getOperand(0).getValueType() == MVT::i1 &&
2263 !TLI.isOperationLegal(ISD::SIGN_EXTEND, MVT::i1)) {
2264 SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
2265 return DAG.getNode(ISD::SUB, DL, VT, N1, ZExt);
2266 }
2267
2268 // add X, (sextinreg Y i1) -> sub X, (and Y 1)
2269 if (N1.getOpcode() == ISD::SIGN_EXTEND_INREG) {
2270 VTSDNode *TN = cast<VTSDNode>(N1.getOperand(1));
2271 if (TN->getVT() == MVT::i1) {
2272 SDValue ZExt = DAG.getNode(ISD::AND, DL, VT, N1.getOperand(0),
2273 DAG.getConstant(1, DL, VT));
2274 return DAG.getNode(ISD::SUB, DL, VT, N0, ZExt);
2275 }
2276 }
2277
2278 // (add X, (addcarry Y, 0, Carry)) -> (addcarry X, Y, Carry)
2279 if (N1.getOpcode() == ISD::ADDCARRY && isNullConstant(N1.getOperand(1)) &&
2280 N1.getResNo() == 0)
2281 return DAG.getNode(ISD::ADDCARRY, DL, N1->getVTList(),
2282 N0, N1.getOperand(0), N1.getOperand(2));
2283
2284 // (add X, Carry) -> (addcarry X, 0, Carry)
2285 if (TLI.isOperationLegalOrCustom(ISD::ADDCARRY, VT))
2286 if (SDValue Carry = getAsCarry(TLI, N1))
2287 return DAG.getNode(ISD::ADDCARRY, DL,
2288 DAG.getVTList(VT, Carry.getValueType()), N0,
2289 DAG.getConstant(0, DL, VT), Carry);
2290
2291 return SDValue();
2292}
2293
2294SDValue DAGCombiner::visitADDC(SDNode *N) {
2295 SDValue N0 = N->getOperand(0);
2296 SDValue N1 = N->getOperand(1);
2297 EVT VT = N0.getValueType();
2298 SDLoc DL(N);
2299
2300 // If the flag result is dead, turn this into an ADD.
2301 if (!N->hasAnyUseOfValue(1))
2302 return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1),
2303 DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));
2304
2305 // canonicalize constant to RHS.
2306 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2307 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2308 if (N0C && !N1C)
2309 return DAG.getNode(ISD::ADDC, DL, N->getVTList(), N1, N0);
2310
2311 // fold (addc x, 0) -> x + no carry out
2312 if (isNullConstant(N1))
2313 return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE,
2314 DL, MVT::Glue));
2315
2316 // If it cannot overflow, transform into an add.
2317 if (DAG.computeOverflowKind(N0, N1) == SelectionDAG::OFK_Never)
2318 return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1),
2319 DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));
2320
2321 return SDValue();
2322}
2323
2324static SDValue flipBoolean(SDValue V, const SDLoc &DL, EVT VT,
2325 SelectionDAG &DAG, const TargetLowering &TLI) {
2326 SDValue Cst;
2327 switch (TLI.getBooleanContents(VT)) {
2328 case TargetLowering::ZeroOrOneBooleanContent:
2329 case TargetLowering::UndefinedBooleanContent:
2330 Cst = DAG.getConstant(1, DL, VT);
2331 break;
2332 case TargetLowering::ZeroOrNegativeOneBooleanContent:
2333 Cst = DAG.getConstant(-1, DL, VT);
2334 break;
2335 }
2336
2337 return DAG.getNode(ISD::XOR, DL, VT, V, Cst);
2338}
2339
2340static bool isBooleanFlip(SDValue V, EVT VT, const TargetLowering &TLI) {
2341 if (V.getOpcode() != ISD::XOR) return false;
2342 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V.getOperand(1));
2343 if (!Const) return false;
2344
2345 switch(TLI.getBooleanContents(VT)) {
2346 case TargetLowering::ZeroOrOneBooleanContent:
2347 return Const->isOne();
2348 case TargetLowering::ZeroOrNegativeOneBooleanContent:
2349 return Const->isAllOnesValue();
2350 case TargetLowering::UndefinedBooleanContent:
2351 return (Const->getAPIntValue() & 0x01) == 1;
2352 }
2353 llvm_unreachable("Unsupported boolean content")::llvm::llvm_unreachable_internal("Unsupported boolean content"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 2353)
;
2354}
2355
2356SDValue DAGCombiner::visitUADDO(SDNode *N) {
2357 SDValue N0 = N->getOperand(0);
2358 SDValue N1 = N->getOperand(1);
2359 EVT VT = N0.getValueType();
2360 if (VT.isVector())
2361 return SDValue();
2362
2363 EVT CarryVT = N->getValueType(1);
2364 SDLoc DL(N);
2365
2366 // If the flag result is dead, turn this into an ADD.
2367 if (!N->hasAnyUseOfValue(1))
2368 return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1),
2369 DAG.getUNDEF(CarryVT));
2370
2371 // canonicalize constant to RHS.
2372 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2373 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2374 if (N0C && !N1C)
2375 return DAG.getNode(ISD::UADDO, DL, N->getVTList(), N1, N0);
2376
2377 // fold (uaddo x, 0) -> x + no carry out
2378 if (isNullConstant(N1))
2379 return CombineTo(N, N0, DAG.getConstant(0, DL, CarryVT));
2380
2381 // If it cannot overflow, transform into an add.
2382 if (DAG.computeOverflowKind(N0, N1) == SelectionDAG::OFK_Never)
2383 return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1),
2384 DAG.getConstant(0, DL, CarryVT));
2385
2386 // fold (uaddo (xor a, -1), 1) -> (usub 0, a) and flip carry.
2387 if (isBitwiseNot(N0) && isOneConstantOrOneSplatConstant(N1)) {
2388 SDValue Sub = DAG.getNode(ISD::USUBO, DL, N->getVTList(),
2389 DAG.getConstant(0, DL, VT),
2390 N0.getOperand(0));
2391 return CombineTo(N, Sub,
2392 flipBoolean(Sub.getValue(1), DL, CarryVT, DAG, TLI));
2393 }
2394
2395 if (SDValue Combined = visitUADDOLike(N0, N1, N))
2396 return Combined;
2397
2398 if (SDValue Combined = visitUADDOLike(N1, N0, N))
2399 return Combined;
2400
2401 return SDValue();
2402}
2403
2404SDValue DAGCombiner::visitUADDOLike(SDValue N0, SDValue N1, SDNode *N) {
2405 auto VT = N0.getValueType();
2406
2407 // (uaddo X, (addcarry Y, 0, Carry)) -> (addcarry X, Y, Carry)
2408 // If Y + 1 cannot overflow.
2409 if (N1.getOpcode() == ISD::ADDCARRY && isNullConstant(N1.getOperand(1))) {
2410 SDValue Y = N1.getOperand(0);
2411 SDValue One = DAG.getConstant(1, SDLoc(N), Y.getValueType());
2412 if (DAG.computeOverflowKind(Y, One) == SelectionDAG::OFK_Never)
2413 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), N->getVTList(), N0, Y,
2414 N1.getOperand(2));
2415 }
2416
2417 // (uaddo X, Carry) -> (addcarry X, 0, Carry)
2418 if (TLI.isOperationLegalOrCustom(ISD::ADDCARRY, VT))
2419 if (SDValue Carry = getAsCarry(TLI, N1))
2420 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), N->getVTList(), N0,
2421 DAG.getConstant(0, SDLoc(N), VT), Carry);
2422
2423 return SDValue();
2424}
2425
2426SDValue DAGCombiner::visitADDE(SDNode *N) {
2427 SDValue N0 = N->getOperand(0);
2428 SDValue N1 = N->getOperand(1);
2429 SDValue CarryIn = N->getOperand(2);
2430
2431 // canonicalize constant to RHS
2432 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2433 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2434 if (N0C && !N1C)
2435 return DAG.getNode(ISD::ADDE, SDLoc(N), N->getVTList(),
2436 N1, N0, CarryIn);
2437
2438 // fold (adde x, y, false) -> (addc x, y)
2439 if (CarryIn.getOpcode() == ISD::CARRY_FALSE)
2440 return DAG.getNode(ISD::ADDC, SDLoc(N), N->getVTList(), N0, N1);
2441
2442 return SDValue();
2443}
2444
2445SDValue DAGCombiner::visitADDCARRY(SDNode *N) {
2446 SDValue N0 = N->getOperand(0);
2447 SDValue N1 = N->getOperand(1);
2448 SDValue CarryIn = N->getOperand(2);
2449 SDLoc DL(N);
2450
2451 // canonicalize constant to RHS
2452 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2453 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2454 if (N0C && !N1C)
2455 return DAG.getNode(ISD::ADDCARRY, DL, N->getVTList(), N1, N0, CarryIn);
2456
2457 // fold (addcarry x, y, false) -> (uaddo x, y)
2458 if (isNullConstant(CarryIn)) {
2459 if (!LegalOperations ||
2460 TLI.isOperationLegalOrCustom(ISD::UADDO, N->getValueType(0)))
2461 return DAG.getNode(ISD::UADDO, DL, N->getVTList(), N0, N1);
2462 }
2463
2464 EVT CarryVT = CarryIn.getValueType();
2465
2466 // fold (addcarry 0, 0, X) -> (and (ext/trunc X), 1) and no carry.
2467 if (isNullConstant(N0) && isNullConstant(N1)) {
2468 EVT VT = N0.getValueType();
2469 SDValue CarryExt = DAG.getBoolExtOrTrunc(CarryIn, DL, VT, CarryVT);
2470 AddToWorklist(CarryExt.getNode());
2471 return CombineTo(N, DAG.getNode(ISD::AND, DL, VT, CarryExt,
2472 DAG.getConstant(1, DL, VT)),
2473 DAG.getConstant(0, DL, CarryVT));
2474 }
2475
2476 // fold (addcarry (xor a, -1), 0, !b) -> (subcarry 0, a, b) and flip carry.
2477 if (isBitwiseNot(N0) && isNullConstant(N1) &&
2478 isBooleanFlip(CarryIn, CarryVT, TLI)) {
2479 SDValue Sub = DAG.getNode(ISD::SUBCARRY, DL, N->getVTList(),
2480 DAG.getConstant(0, DL, N0.getValueType()),
2481 N0.getOperand(0), CarryIn.getOperand(0));
2482 return CombineTo(N, Sub,
2483 flipBoolean(Sub.getValue(1), DL, CarryVT, DAG, TLI));
2484 }
2485
2486 if (SDValue Combined = visitADDCARRYLike(N0, N1, CarryIn, N))
2487 return Combined;
2488
2489 if (SDValue Combined = visitADDCARRYLike(N1, N0, CarryIn, N))
2490 return Combined;
2491
2492 return SDValue();
2493}
2494
2495SDValue DAGCombiner::visitADDCARRYLike(SDValue N0, SDValue N1, SDValue CarryIn,
2496 SDNode *N) {
2497 // Iff the flag result is dead:
2498 // (addcarry (add|uaddo X, Y), 0, Carry) -> (addcarry X, Y, Carry)
2499 if ((N0.getOpcode() == ISD::ADD ||
2500 (N0.getOpcode() == ISD::UADDO && N0.getResNo() == 0)) &&
2501 isNullConstant(N1) && !N->hasAnyUseOfValue(1))
2502 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), N->getVTList(),
2503 N0.getOperand(0), N0.getOperand(1), CarryIn);
2504
2505 /**
2506 * When one of the addcarry argument is itself a carry, we may be facing
2507 * a diamond carry propagation. In which case we try to transform the DAG
2508 * to ensure linear carry propagation if that is possible.
2509 *
2510 * We are trying to get:
2511 * (addcarry X, 0, (addcarry A, B, Z):Carry)
2512 */
2513 if (auto Y = getAsCarry(TLI, N1)) {
2514 /**
2515 * (uaddo A, B)
2516 * / \
2517 * Carry Sum
2518 * | \
2519 * | (addcarry *, 0, Z)
2520 * | /
2521 * \ Carry
2522 * | /
2523 * (addcarry X, *, *)
2524 */
2525 if (Y.getOpcode() == ISD::UADDO &&
2526 CarryIn.getResNo() == 1 &&
2527 CarryIn.getOpcode() == ISD::ADDCARRY &&
2528 isNullConstant(CarryIn.getOperand(1)) &&
2529 CarryIn.getOperand(0) == Y.getValue(0)) {
2530 auto NewY = DAG.getNode(ISD::ADDCARRY, SDLoc(N), Y->getVTList(),
2531 Y.getOperand(0), Y.getOperand(1),
2532 CarryIn.getOperand(2));
2533 AddToWorklist(NewY.getNode());
2534 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), N->getVTList(), N0,
2535 DAG.getConstant(0, SDLoc(N), N0.getValueType()),
2536 NewY.getValue(1));
2537 }
2538 }
2539
2540 return SDValue();
2541}
2542
2543// Since it may not be valid to emit a fold to zero for vector initializers
2544// check if we can before folding.
2545static SDValue tryFoldToZero(const SDLoc &DL, const TargetLowering &TLI, EVT VT,
2546 SelectionDAG &DAG, bool LegalOperations,
2547 bool LegalTypes) {
2548 if (!VT.isVector())
2549 return DAG.getConstant(0, DL, VT);
2550 if (!LegalOperations || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT))
2551 return DAG.getConstant(0, DL, VT);
2552 return SDValue();
2553}
2554
2555SDValue DAGCombiner::visitSUB(SDNode *N) {
2556 SDValue N0 = N->getOperand(0);
2557 SDValue N1 = N->getOperand(1);
2558 EVT VT = N0.getValueType();
2559 SDLoc DL(N);
2560
2561 // fold vector ops
2562 if (VT.isVector()) {
2563 if (SDValue FoldedVOp = SimplifyVBinOp(N))
2564 return FoldedVOp;
2565
2566 // fold (sub x, 0) -> x, vector edition
2567 if (ISD::isBuildVectorAllZeros(N1.getNode()))
2568 return N0;
2569 }
2570
2571 // fold (sub x, x) -> 0
2572 // FIXME: Refactor this and xor and other similar operations together.
2573 if (N0 == N1)
2574 return tryFoldToZero(DL, TLI, VT, DAG, LegalOperations, LegalTypes);
2575 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
2576 DAG.isConstantIntBuildVectorOrConstantInt(N1)) {
2577 // fold (sub c1, c2) -> c1-c2
2578 return DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, N0.getNode(),
2579 N1.getNode());
2580 }
2581
2582 if (SDValue NewSel = foldBinOpIntoSelect(N))
2583 return NewSel;
2584
2585 ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);
2586
2587 // fold (sub x, c) -> (add x, -c)
2588 if (N1C) {
2589 return DAG.getNode(ISD::ADD, DL, VT, N0,
2590 DAG.getConstant(-N1C->getAPIntValue(), DL, VT));
2591 }
2592
2593 if (isNullConstantOrNullSplatConstant(N0)) {
2594 unsigned BitWidth = VT.getScalarSizeInBits();
2595 // Right-shifting everything out but the sign bit followed by negation is
2596 // the same as flipping arithmetic/logical shift type without the negation:
2597 // -(X >>u 31) -> (X >>s 31)
2598 // -(X >>s 31) -> (X >>u 31)
2599 if (N1->getOpcode() == ISD::SRA || N1->getOpcode() == ISD::SRL) {
2600 ConstantSDNode *ShiftAmt = isConstOrConstSplat(N1.getOperand(1));
2601 if (ShiftAmt && ShiftAmt->getZExtValue() == BitWidth - 1) {
2602 auto NewSh = N1->getOpcode() == ISD::SRA ? ISD::SRL : ISD::SRA;
2603 if (!LegalOperations || TLI.isOperationLegal(NewSh, VT))
2604 return DAG.getNode(NewSh, DL, VT, N1.getOperand(0), N1.getOperand(1));
2605 }
2606 }
2607
2608 // 0 - X --> 0 if the sub is NUW.
2609 if (N->getFlags().hasNoUnsignedWrap())
2610 return N0;
2611
2612 if (DAG.MaskedValueIsZero(N1, ~APInt::getSignMask(BitWidth))) {
2613 // N1 is either 0 or the minimum signed value. If the sub is NSW, then
2614 // N1 must be 0 because negating the minimum signed value is undefined.
2615 if (N->getFlags().hasNoSignedWrap())
2616 return N0;
2617
2618 // 0 - X --> X if X is 0 or the minimum signed value.
2619 return N1;
2620 }
2621 }
2622
2623 // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1)
2624 if (isAllOnesConstantOrAllOnesSplatConstant(N0))
2625 return DAG.getNode(ISD::XOR, DL, VT, N1, N0);
2626
2627 // fold (A - (0-B)) -> A+B
2628 if (N1.getOpcode() == ISD::SUB &&
2629 isNullConstantOrNullSplatConstant(N1.getOperand(0)))
2630 return DAG.getNode(ISD::ADD, DL, VT, N0, N1.getOperand(1));
2631
2632 // fold A-(A-B) -> B
2633 if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(0))
2634 return N1.getOperand(1);
2635
2636 // fold (A+B)-A -> B
2637 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1)
2638 return N0.getOperand(1);
2639
2640 // fold (A+B)-B -> A
2641 if (N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1)
2642 return N0.getOperand(0);
2643
2644 // fold C2-(A+C1) -> (C2-C1)-A
2645 if (N1.getOpcode() == ISD::ADD) {
2646 SDValue N11 = N1.getOperand(1);
2647 if (isConstantOrConstantVector(N0, /* NoOpaques */ true) &&
2648 isConstantOrConstantVector(N11, /* NoOpaques */ true)) {
2649 SDValue NewC = DAG.getNode(ISD::SUB, DL, VT, N0, N11);
2650 return DAG.getNode(ISD::SUB, DL, VT, NewC, N1.getOperand(0));
2651 }
2652 }
2653
2654 // fold ((A+(B+or-C))-B) -> A+or-C
2655 if (N0.getOpcode() == ISD::ADD &&
2656 (N0.getOperand(1).getOpcode() == ISD::SUB ||
2657 N0.getOperand(1).getOpcode() == ISD::ADD) &&
2658 N0.getOperand(1).getOperand(0) == N1)
2659 return DAG.getNode(N0.getOperand(1).getOpcode(), DL, VT, N0.getOperand(0),
2660 N0.getOperand(1).getOperand(1));
2661
2662 // fold ((A+(C+B))-B) -> A+C
2663 if (N0.getOpcode() == ISD::ADD && N0.getOperand(1).getOpcode() == ISD::ADD &&
2664 N0.getOperand(1).getOperand(1) == N1)
2665 return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0),
2666 N0.getOperand(1).getOperand(0));
2667
2668 // fold ((A-(B-C))-C) -> A-B
2669 if (N0.getOpcode() == ISD::SUB && N0.getOperand(1).getOpcode() == ISD::SUB &&
2670 N0.getOperand(1).getOperand(1) == N1)
2671 return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0),
2672 N0.getOperand(1).getOperand(0));
2673
2674 // fold (A-(B-C)) -> A+(C-B)
2675 if (N1.getOpcode() == ISD::SUB && N1.hasOneUse())
2676 return DAG.getNode(ISD::ADD, DL, VT, N0,
2677 DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(1),
2678 N1.getOperand(0)));
2679
2680 // fold (X - (-Y * Z)) -> (X + (Y * Z))
2681 if (N1.getOpcode() == ISD::MUL && N1.hasOneUse()) {
2682 if (N1.getOperand(0).getOpcode() == ISD::SUB &&
2683 isNullConstantOrNullSplatConstant(N1.getOperand(0).getOperand(0))) {
2684 SDValue Mul = DAG.getNode(ISD::MUL, DL, VT,
2685 N1.getOperand(0).getOperand(1),
2686 N1.getOperand(1));
2687 return DAG.getNode(ISD::ADD, DL, VT, N0, Mul);
2688 }
2689 if (N1.getOperand(1).getOpcode() == ISD::SUB &&
2690 isNullConstantOrNullSplatConstant(N1.getOperand(1).getOperand(0))) {
2691 SDValue Mul = DAG.getNode(ISD::MUL, DL, VT,
2692 N1.getOperand(0),
2693 N1.getOperand(1).getOperand(1));
2694 return DAG.getNode(ISD::ADD, DL, VT, N0, Mul);
2695 }
2696 }
2697
2698 // If either operand of a sub is undef, the result is undef
2699 if (N0.isUndef())
2700 return N0;
2701 if (N1.isUndef())
2702 return N1;
2703
2704 if (SDValue V = foldAddSubBoolOfMaskedVal(N, DAG))
2705 return V;
2706
2707 if (SDValue V = foldAddSubOfSignBit(N, DAG))
2708 return V;
2709
2710 // fold Y = sra (X, size(X)-1); sub (xor (X, Y), Y) -> (abs X)
2711 if (TLI.isOperationLegalOrCustom(ISD::ABS, VT)) {
2712 if (N0.getOpcode() == ISD::XOR && N1.getOpcode() == ISD::SRA) {
2713 SDValue X0 = N0.getOperand(0), X1 = N0.getOperand(1);
2714 SDValue S0 = N1.getOperand(0);
2715 if ((X0 == S0 && X1 == N1) || (X0 == N1 && X1 == S0)) {
2716 unsigned OpSizeInBits = VT.getScalarSizeInBits();
2717 if (ConstantSDNode *C = isConstOrConstSplat(N1.getOperand(1)))
2718 if (C->getAPIntValue() == (OpSizeInBits - 1))
2719 return DAG.getNode(ISD::ABS, SDLoc(N), VT, S0);
2720 }
2721 }
2722 }
2723
2724 // If the relocation model supports it, consider symbol offsets.
2725 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0))
2726 if (!LegalOperations && TLI.isOffsetFoldingLegal(GA)) {
2727 // fold (sub Sym, c) -> Sym-c
2728 if (N1C && GA->getOpcode() == ISD::GlobalAddress)
2729 return DAG.getGlobalAddress(GA->getGlobal(), SDLoc(N1C), VT,
2730 GA->getOffset() -
2731 (uint64_t)N1C->getSExtValue());
2732 // fold (sub Sym+c1, Sym+c2) -> c1-c2
2733 if (GlobalAddressSDNode *GB = dyn_cast<GlobalAddressSDNode>(N1))
2734 if (GA->getGlobal() == GB->getGlobal())
2735 return DAG.getConstant((uint64_t)GA->getOffset() - GB->getOffset(),
2736 DL, VT);
2737 }
2738
2739 // sub X, (sextinreg Y i1) -> add X, (and Y 1)
2740 if (N1.getOpcode() == ISD::SIGN_EXTEND_INREG) {
2741 VTSDNode *TN = cast<VTSDNode>(N1.getOperand(1));
2742 if (TN->getVT() == MVT::i1) {
2743 SDValue ZExt = DAG.getNode(ISD::AND, DL, VT, N1.getOperand(0),
2744 DAG.getConstant(1, DL, VT));
2745 return DAG.getNode(ISD::ADD, DL, VT, N0, ZExt);
2746 }
2747 }
2748
2749 return SDValue();
2750}
2751
2752SDValue DAGCombiner::visitSUBC(SDNode *N) {
2753 SDValue N0 = N->getOperand(0);
2754 SDValue N1 = N->getOperand(1);
2755 EVT VT = N0.getValueType();
2756 SDLoc DL(N);
2757
2758 // If the flag result is dead, turn this into an SUB.
2759 if (!N->hasAnyUseOfValue(1))
2760 return CombineTo(N, DAG.getNode(ISD::SUB, DL, VT, N0, N1),
2761 DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));
2762
2763 // fold (subc x, x) -> 0 + no borrow
2764 if (N0 == N1)
2765 return CombineTo(N, DAG.getConstant(0, DL, VT),
2766 DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));
2767
2768 // fold (subc x, 0) -> x + no borrow
2769 if (isNullConstant(N1))
2770 return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));
2771
2772 // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) + no borrow
2773 if (isAllOnesConstant(N0))
2774 return CombineTo(N, DAG.getNode(ISD::XOR, DL, VT, N1, N0),
2775 DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));
2776
2777 return SDValue();
2778}
2779
2780SDValue DAGCombiner::visitUSUBO(SDNode *N) {
2781 SDValue N0 = N->getOperand(0);
2782 SDValue N1 = N->getOperand(1);
2783 EVT VT = N0.getValueType();
2784 if (VT.isVector())
2785 return SDValue();
2786
2787 EVT CarryVT = N->getValueType(1);
2788 SDLoc DL(N);
2789
2790 // If the flag result is dead, turn this into an SUB.
2791 if (!N->hasAnyUseOfValue(1))
2792 return CombineTo(N, DAG.getNode(ISD::SUB, DL, VT, N0, N1),
2793 DAG.getUNDEF(CarryVT));
2794
2795 // fold (usubo x, x) -> 0 + no borrow
2796 if (N0 == N1)
2797 return CombineTo(N, DAG.getConstant(0, DL, VT),
2798 DAG.getConstant(0, DL, CarryVT));
2799
2800 // fold (usubo x, 0) -> x + no borrow
2801 if (isNullConstant(N1))
2802 return CombineTo(N, N0, DAG.getConstant(0, DL, CarryVT));
2803
2804 // Canonicalize (usubo -1, x) -> ~x, i.e. (xor x, -1) + no borrow
2805 if (isAllOnesConstant(N0))
2806 return CombineTo(N, DAG.getNode(ISD::XOR, DL, VT, N1, N0),
2807 DAG.getConstant(0, DL, CarryVT));
2808
2809 return SDValue();
2810}
2811
2812SDValue DAGCombiner::visitSUBE(SDNode *N) {
2813 SDValue N0 = N->getOperand(0);
2814 SDValue N1 = N->getOperand(1);
2815 SDValue CarryIn = N->getOperand(2);
2816
2817 // fold (sube x, y, false) -> (subc x, y)
2818 if (CarryIn.getOpcode() == ISD::CARRY_FALSE)
2819 return DAG.getNode(ISD::SUBC, SDLoc(N), N->getVTList(), N0, N1);
2820
2821 return SDValue();
2822}
2823
2824SDValue DAGCombiner::visitSUBCARRY(SDNode *N) {
2825 SDValue N0 = N->getOperand(0);
2826 SDValue N1 = N->getOperand(1);
2827 SDValue CarryIn = N->getOperand(2);
2828
2829 // fold (subcarry x, y, false) -> (usubo x, y)
2830 if (isNullConstant(CarryIn)) {
2831 if (!LegalOperations ||
2832 TLI.isOperationLegalOrCustom(ISD::USUBO, N->getValueType(0)))
2833 return DAG.getNode(ISD::USUBO, SDLoc(N), N->getVTList(), N0, N1);
2834 }
2835
2836 return SDValue();
2837}
2838
2839SDValue DAGCombiner::visitMUL(SDNode *N) {
2840 SDValue N0 = N->getOperand(0);
2841 SDValue N1 = N->getOperand(1);
2842 EVT VT = N0.getValueType();
2843
2844 // fold (mul x, undef) -> 0
2845 if (N0.isUndef() || N1.isUndef())
2846 return DAG.getConstant(0, SDLoc(N), VT);
2847
2848 bool N0IsConst = false;
2849 bool N1IsConst = false;
2850 bool N1IsOpaqueConst = false;
2851 bool N0IsOpaqueConst = false;
2852 APInt ConstValue0, ConstValue1;
2853 // fold vector ops
2854 if (VT.isVector()) {
2855 if (SDValue FoldedVOp = SimplifyVBinOp(N))
2856 return FoldedVOp;
2857
2858 N0IsConst = ISD::isConstantSplatVector(N0.getNode(), ConstValue0);
2859 N1IsConst = ISD::isConstantSplatVector(N1.getNode(), ConstValue1);
2860 assert((!N0IsConst ||(static_cast <bool> ((!N0IsConst || ConstValue0.getBitWidth
() == VT.getScalarSizeInBits()) && "Splat APInt should be element width"
) ? void (0) : __assert_fail ("(!N0IsConst || ConstValue0.getBitWidth() == VT.getScalarSizeInBits()) && \"Splat APInt should be element width\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 2862, __extension__ __PRETTY_FUNCTION__))
2861 ConstValue0.getBitWidth() == VT.getScalarSizeInBits()) &&(static_cast <bool> ((!N0IsConst || ConstValue0.getBitWidth
() == VT.getScalarSizeInBits()) && "Splat APInt should be element width"
) ? void (0) : __assert_fail ("(!N0IsConst || ConstValue0.getBitWidth() == VT.getScalarSizeInBits()) && \"Splat APInt should be element width\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 2862, __extension__ __PRETTY_FUNCTION__))
2862 "Splat APInt should be element width")(static_cast <bool> ((!N0IsConst || ConstValue0.getBitWidth
() == VT.getScalarSizeInBits()) && "Splat APInt should be element width"
) ? void (0) : __assert_fail ("(!N0IsConst || ConstValue0.getBitWidth() == VT.getScalarSizeInBits()) && \"Splat APInt should be element width\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 2862, __extension__ __PRETTY_FUNCTION__))
;
2863 assert((!N1IsConst ||(static_cast <bool> ((!N1IsConst || ConstValue1.getBitWidth
() == VT.getScalarSizeInBits()) && "Splat APInt should be element width"
) ? void (0) : __assert_fail ("(!N1IsConst || ConstValue1.getBitWidth() == VT.getScalarSizeInBits()) && \"Splat APInt should be element width\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 2865, __extension__ __PRETTY_FUNCTION__))
2864 ConstValue1.getBitWidth() == VT.getScalarSizeInBits()) &&(static_cast <bool> ((!N1IsConst || ConstValue1.getBitWidth
() == VT.getScalarSizeInBits()) && "Splat APInt should be element width"
) ? void (0) : __assert_fail ("(!N1IsConst || ConstValue1.getBitWidth() == VT.getScalarSizeInBits()) && \"Splat APInt should be element width\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 2865, __extension__ __PRETTY_FUNCTION__))
2865 "Splat APInt should be element width")(static_cast <bool> ((!N1IsConst || ConstValue1.getBitWidth
() == VT.getScalarSizeInBits()) && "Splat APInt should be element width"
) ? void (0) : __assert_fail ("(!N1IsConst || ConstValue1.getBitWidth() == VT.getScalarSizeInBits()) && \"Splat APInt should be element width\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 2865, __extension__ __PRETTY_FUNCTION__))
;
2866 } else {
2867 N0IsConst = isa<ConstantSDNode>(N0);
2868 if (N0IsConst) {
2869 ConstValue0 = cast<ConstantSDNode>(N0)->getAPIntValue();
2870 N0IsOpaqueConst = cast<ConstantSDNode>(N0)->isOpaque();
2871 }
2872 N1IsConst = isa<ConstantSDNode>(N1);
2873 if (N1IsConst) {
2874 ConstValue1 = cast<ConstantSDNode>(N1)->getAPIntValue();
2875 N1IsOpaqueConst = cast<ConstantSDNode>(N1)->isOpaque();
2876 }
2877 }
2878
2879 // fold (mul c1, c2) -> c1*c2
2880 if (N0IsConst && N1IsConst && !N0IsOpaqueConst && !N1IsOpaqueConst)
2881 return DAG.FoldConstantArithmetic(ISD::MUL, SDLoc(N), VT,
2882 N0.getNode(), N1.getNode());
2883
2884 // canonicalize constant to RHS (vector doesn't have to splat)
2885 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
2886 !DAG.isConstantIntBuildVectorOrConstantInt(N1))
2887 return DAG.getNode(ISD::MUL, SDLoc(N), VT, N1, N0);
2888 // fold (mul x, 0) -> 0
2889 if (N1IsConst && ConstValue1.isNullValue())
2890 return N1;
2891 // fold (mul x, 1) -> x
2892 if (N1IsConst && ConstValue1.isOneValue())
2893 return N0;
2894
2895 if (SDValue NewSel = foldBinOpIntoSelect(N))
2896 return NewSel;
2897
2898 // fold (mul x, -1) -> 0-x
2899 if (N1IsConst && ConstValue1.isAllOnesValue()) {
2900 SDLoc DL(N);
2901 return DAG.getNode(ISD::SUB, DL, VT,
2902 DAG.getConstant(0, DL, VT), N0);
2903 }
2904 // fold (mul x, (1 << c)) -> x << c
2905 if (isConstantOrConstantVector(N1, /*NoOpaques*/ true) &&
2906 DAG.isKnownToBeAPowerOfTwo(N1) &&
2907 (!VT.isVector() || Level <= AfterLegalizeVectorOps)) {
2908 SDLoc DL(N);
2909 SDValue LogBase2 = BuildLogBase2(N1, DL);
2910 EVT ShiftVT = getShiftAmountTy(N0.getValueType());
2911 SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ShiftVT);
2912 return DAG.getNode(ISD::SHL, DL, VT, N0, Trunc);
2913 }
2914 // fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c
2915 if (N1IsConst && !N1IsOpaqueConst && (-ConstValue1).isPowerOf2()) {
2916 unsigned Log2Val = (-ConstValue1).logBase2();
2917 SDLoc DL(N);
2918 // FIXME: If the input is something that is easily negated (e.g. a
2919 // single-use add), we should put the negate there.
2920 return DAG.getNode(ISD::SUB, DL, VT,
2921 DAG.getConstant(0, DL, VT),
2922 DAG.getNode(ISD::SHL, DL, VT, N0,
2923 DAG.getConstant(Log2Val, DL,
2924 getShiftAmountTy(N0.getValueType()))));
2925 }
2926
2927 // (mul (shl X, c1), c2) -> (mul X, c2 << c1)
2928 if (N0.getOpcode() == ISD::SHL &&
2929 isConstantOrConstantVector(N1, /* NoOpaques */ true) &&
2930 isConstantOrConstantVector(N0.getOperand(1), /* NoOpaques */ true)) {
2931 SDValue C3 = DAG.getNode(ISD::SHL, SDLoc(N), VT, N1, N0.getOperand(1));
2932 if (isConstantOrConstantVector(C3))
2933 return DAG.getNode(ISD::MUL, SDLoc(N), VT, N0.getOperand(0), C3);
2934 }
2935
2936 // Change (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one
2937 // use.
2938 {
2939 SDValue Sh(nullptr, 0), Y(nullptr, 0);
2940
2941 // Check for both (mul (shl X, C), Y) and (mul Y, (shl X, C)).
2942 if (N0.getOpcode() == ISD::SHL &&
2943 isConstantOrConstantVector(N0.getOperand(1)) &&
2944 N0.getNode()->hasOneUse()) {
2945 Sh = N0; Y = N1;
2946 } else if (N1.getOpcode() == ISD::SHL &&
2947 isConstantOrConstantVector(N1.getOperand(1)) &&
2948 N1.getNode()->hasOneUse()) {
2949 Sh = N1; Y = N0;
2950 }
2951
2952 if (Sh.getNode()) {
2953 SDValue Mul = DAG.getNode(ISD::MUL, SDLoc(N), VT, Sh.getOperand(0), Y);
2954 return DAG.getNode(ISD::SHL, SDLoc(N), VT, Mul, Sh.getOperand(1));
2955 }
2956 }
2957
2958 // fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2)
2959 if (DAG.isConstantIntBuildVectorOrConstantInt(N1) &&
2960 N0.getOpcode() == ISD::ADD &&
2961 DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1)) &&
2962 isMulAddWithConstProfitable(N, N0, N1))
2963 return DAG.getNode(ISD::ADD, SDLoc(N), VT,
2964 DAG.getNode(ISD::MUL, SDLoc(N0), VT,
2965 N0.getOperand(0), N1),
2966 DAG.getNode(ISD::MUL, SDLoc(N1), VT,
2967 N0.getOperand(1), N1));
2968
2969 // reassociate mul
2970 if (SDValue RMUL = ReassociateOps(ISD::MUL, SDLoc(N), N0, N1))
2971 return RMUL;
2972
2973 return SDValue();
2974}
2975
2976/// Return true if divmod libcall is available.
2977static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned,
2978 const TargetLowering &TLI) {
2979 RTLIB::Libcall LC;
2980 EVT NodeType = Node->getValueType(0);
2981 if (!NodeType.isSimple())
2982 return false;
2983 switch (NodeType.getSimpleVT().SimpleTy) {
2984 default: return false; // No libcall for vector types.
2985 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break;
2986 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
2987 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
2988 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
2989 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break;
2990 }
2991
2992 return TLI.getLibcallName(LC) != nullptr;
2993}
2994
2995/// Issue divrem if both quotient and remainder are needed.
2996SDValue DAGCombiner::useDivRem(SDNode *Node) {
2997 if (Node->use_empty())
2998 return SDValue(); // This is a dead node, leave it alone.
2999
3000 unsigned Opcode = Node->getOpcode();
3001 bool isSigned = (Opcode == ISD::SDIV) || (Opcode == ISD::SREM);
3002 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM;
3003
3004 // DivMod lib calls can still work on non-legal types if using lib-calls.
3005 EVT VT = Node->getValueType(0);
3006 if (VT.isVector() || !VT.isInteger())
3007 return SDValue();
3008
3009 if (!TLI.isTypeLegal(VT) && !TLI.isOperationCustom(DivRemOpc, VT))
3010 return SDValue();
3011
3012 // If DIVREM is going to get expanded into a libcall,
3013 // but there is no libcall available, then don't combine.
3014 if (!TLI.isOperationLegalOrCustom(DivRemOpc, VT) &&
3015 !isDivRemLibcallAvailable(Node, isSigned, TLI))
3016 return SDValue();
3017
3018 // If div is legal, it's better to do the normal expansion
3019 unsigned OtherOpcode = 0;
3020 if ((Opcode == ISD::SDIV) || (Opcode == ISD::UDIV)) {
3021 OtherOpcode = isSigned ? ISD::SREM : ISD::UREM;
3022 if (TLI.isOperationLegalOrCustom(Opcode, VT))
3023 return SDValue();
3024 } else {
3025 OtherOpcode = isSigned ? ISD::SDIV : ISD::UDIV;
3026 if (TLI.isOperationLegalOrCustom(OtherOpcode, VT))
3027 return SDValue();
3028 }
3029
3030 SDValue Op0 = Node->getOperand(0);
3031 SDValue Op1 = Node->getOperand(1);
3032 SDValue combined;
3033 for (SDNode::use_iterator UI = Op0.getNode()->use_begin(),
3034 UE = Op0.getNode()->use_end(); UI != UE; ++UI) {
3035 SDNode *User = *UI;
3036 if (User == Node || User->getOpcode() == ISD::DELETED_NODE ||
3037 User->use_empty())
3038 continue;
3039 // Convert the other matching node(s), too;
3040 // otherwise, the DIVREM may get target-legalized into something
3041 // target-specific that we won't be able to recognize.
3042 unsigned UserOpc = User->getOpcode();
3043 if ((UserOpc == Opcode || UserOpc == OtherOpcode || UserOpc == DivRemOpc) &&
3044 User->getOperand(0) == Op0 &&
3045 User->getOperand(1) == Op1) {
3046 if (!combined) {
3047 if (UserOpc == OtherOpcode) {
3048 SDVTList VTs = DAG.getVTList(VT, VT);
3049 combined = DAG.getNode(DivRemOpc, SDLoc(Node), VTs, Op0, Op1);
3050 } else if (UserOpc == DivRemOpc) {
3051 combined = SDValue(User, 0);
3052 } else {
3053 assert(UserOpc == Opcode)(static_cast <bool> (UserOpc == Opcode) ? void (0) : __assert_fail
("UserOpc == Opcode", "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3053, __extension__ __PRETTY_FUNCTION__))
;
3054 continue;
3055 }
3056 }
3057 if (UserOpc == ISD::SDIV || UserOpc == ISD::UDIV)
3058 CombineTo(User, combined);
3059 else if (UserOpc == ISD::SREM || UserOpc == ISD::UREM)
3060 CombineTo(User, combined.getValue(1));
3061 }
3062 }
3063 return combined;
3064}
3065
3066static SDValue simplifyDivRem(SDNode *N, SelectionDAG &DAG) {
3067 SDValue N0 = N->getOperand(0);
3068 SDValue N1 = N->getOperand(1);
3069 EVT VT = N->getValueType(0);
3070 SDLoc DL(N);
3071
3072 if (DAG.isUndef(N->getOpcode(), {N0, N1}))
3073 return DAG.getUNDEF(VT);
3074
3075 // undef / X -> 0
3076 // undef % X -> 0
3077 if (N0.isUndef())
3078 return DAG.getConstant(0, DL, VT);
3079
3080 return SDValue();
3081}
3082
3083SDValue DAGCombiner::visitSDIV(SDNode *N) {
3084 SDValue N0 = N->getOperand(0);
3085 SDValue N1 = N->getOperand(1);
3086 EVT VT = N->getValueType(0);
3087 EVT CCVT = getSetCCResultType(VT);
3088
3089 // fold vector ops
3090 if (VT.isVector())
3091 if (SDValue FoldedVOp = SimplifyVBinOp(N))
3092 return FoldedVOp;
3093
3094 SDLoc DL(N);
3095
3096 // fold (sdiv c1, c2) -> c1/c2
3097 ConstantSDNode *N0C = isConstOrConstSplat(N0);
3098 ConstantSDNode *N1C = isConstOrConstSplat(N1);
3099 if (N0C && N1C && !N0C->isOpaque() && !N1C->isOpaque())
3100 return DAG.FoldConstantArithmetic(ISD::SDIV, DL, VT, N0C, N1C);
3101 // fold (sdiv X, 1) -> X
3102 if (N1C && N1C->isOne())
3103 return N0;
3104 // fold (sdiv X, -1) -> 0-X
3105 if (N1C && N1C->isAllOnesValue())
3106 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), N0);
3107 // fold (sdiv X, MIN_SIGNED) -> select(X == MIN_SIGNED, 1, 0)
3108 if (N1C && N1C->getAPIntValue().isMinSignedValue())
3109 return DAG.getSelect(DL, VT, DAG.getSetCC(DL, CCVT, N0, N1, ISD::SETEQ),
3110 DAG.getConstant(1, DL, VT),
3111 DAG.getConstant(0, DL, VT));
3112
3113 if (SDValue V = simplifyDivRem(N, DAG))
3114 return V;
3115
3116 if (SDValue NewSel = foldBinOpIntoSelect(N))
3117 return NewSel;
3118
3119 // If we know the sign bits of both operands are zero, strength reduce to a
3120 // udiv instead. Handles (X&15) /s 4 -> X&15 >> 2
3121 if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
3122 return DAG.getNode(ISD::UDIV, DL, N1.getValueType(), N0, N1);
3123
3124 if (SDValue V = visitSDIVLike(N0, N1, N))
3125 return V;
3126
3127 // sdiv, srem -> sdivrem
3128 // If the divisor is constant, then return DIVREM only if isIntDivCheap() is
3129 // true. Otherwise, we break the simplification logic in visitREM().
3130 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
3131 if (!N1C || TLI.isIntDivCheap(N->getValueType(0), Attr))
3132 if (SDValue DivRem = useDivRem(N))
3133 return DivRem;
3134
3135 return SDValue();
3136}
3137
3138SDValue DAGCombiner::visitSDIVLike(SDValue N0, SDValue N1, SDNode *N) {
3139 SDLoc DL(N);
3140 EVT VT = N->getValueType(0);
3141 EVT CCVT = getSetCCResultType(VT);
3142 unsigned BitWidth = VT.getScalarSizeInBits();
3143
3144 ConstantSDNode *N1C = isConstOrConstSplat(N1);
3145
3146 // Helper for determining whether a value is a power-2 constant scalar or a
3147 // vector of such elements.
3148 auto IsPowerOfTwo = [](ConstantSDNode *C) {
3149 if (C->isNullValue() || C->isOpaque())
3150 return false;
3151 if (C->getAPIntValue().isPowerOf2())
3152 return true;
3153 if ((-C->getAPIntValue()).isPowerOf2())
3154 return true;
3155 return false;
3156 };
3157
3158 // fold (sdiv X, pow2) -> simple ops after legalize
3159 // FIXME: We check for the exact bit here because the generic lowering gives
3160 // better results in that case. The target-specific lowering should learn how
3161 // to handle exact sdivs efficiently.
3162 if (!N->getFlags().hasExact() &&
3163 ISD::matchUnaryPredicate(N1C ? SDValue(N1C, 0) : N1, IsPowerOfTwo)) {
3164 // Target-specific implementation of sdiv x, pow2.
3165 if (SDValue Res = BuildSDIVPow2(N))
3166 return Res;
3167
3168 // Create constants that are functions of the shift amount value.
3169 EVT ShiftAmtTy = getShiftAmountTy(N0.getValueType());
3170 SDValue Bits = DAG.getConstant(BitWidth, DL, ShiftAmtTy);
3171 SDValue C1 = DAG.getNode(ISD::CTTZ, DL, VT, N1);
3172 C1 = DAG.getZExtOrTrunc(C1, DL, ShiftAmtTy);
3173 SDValue Inexact = DAG.getNode(ISD::SUB, DL, ShiftAmtTy, Bits, C1);
3174 if (!isConstantOrConstantVector(Inexact))
3175 return SDValue();
3176
3177 // Splat the sign bit into the register
3178 SDValue Sign = DAG.getNode(ISD::SRA, DL, VT, N0,
3179 DAG.getConstant(BitWidth - 1, DL, ShiftAmtTy));
3180 AddToWorklist(Sign.getNode());
3181
3182 // Add (N0 < 0) ? abs2 - 1 : 0;
3183 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, Sign, Inexact);
3184 AddToWorklist(Srl.getNode());
3185 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Srl);
3186 AddToWorklist(Add.getNode());
3187 SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, Add, C1);
3188 AddToWorklist(Sra.getNode());
3189
3190 // Special case: (sdiv X, 1) -> X
3191 // Special Case: (sdiv X, -1) -> 0-X
3192 SDValue One = DAG.getConstant(1, DL, VT);
3193 SDValue AllOnes = DAG.getAllOnesConstant(DL, VT);
3194 SDValue IsOne = DAG.getSetCC(DL, CCVT, N1, One, ISD::SETEQ);
3195 SDValue IsAllOnes = DAG.getSetCC(DL, CCVT, N1, AllOnes, ISD::SETEQ);
3196 SDValue IsOneOrAllOnes = DAG.getNode(ISD::OR, DL, CCVT, IsOne, IsAllOnes);
3197 Sra = DAG.getSelect(DL, VT, IsOneOrAllOnes, N0, Sra);
3198
3199 // If dividing by a positive value, we're done. Otherwise, the result must
3200 // be negated.
3201 SDValue Zero = DAG.getConstant(0, DL, VT);
3202 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, Zero, Sra);
3203
3204 // FIXME: Use SELECT_CC once we improve SELECT_CC constant-folding.
3205 SDValue IsNeg = DAG.getSetCC(DL, CCVT, N1, Zero, ISD::SETLT);
3206 SDValue Res = DAG.getSelect(DL, VT, IsNeg, Sub, Sra);
3207 return Res;
3208 }
3209
3210 // If integer divide is expensive and we satisfy the requirements, emit an
3211 // alternate sequence. Targets may check function attributes for size/speed
3212 // trade-offs.
3213 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
3214 if (N1C && !TLI.isIntDivCheap(N->getValueType(0), Attr))
3215 if (SDValue Op = BuildSDIV(N))
3216 return Op;
3217
3218 return SDValue();
3219}
3220
3221SDValue DAGCombiner::visitUDIV(SDNode *N) {
3222 SDValue N0 = N->getOperand(0);
3223 SDValue N1 = N->getOperand(1);
3224 EVT VT = N->getValueType(0);
3225 EVT CCVT = getSetCCResultType(VT);
3226
3227 // fold vector ops
3228 if (VT.isVector())
3229 if (SDValue FoldedVOp = SimplifyVBinOp(N))
3230 return FoldedVOp;
3231
3232 SDLoc DL(N);
3233
3234 // fold (udiv c1, c2) -> c1/c2
3235 ConstantSDNode *N0C = isConstOrConstSplat(N0);
3236 ConstantSDNode *N1C = isConstOrConstSplat(N1);
3237 if (N0C && N1C)
3238 if (SDValue Folded = DAG.FoldConstantArithmetic(ISD::UDIV, DL, VT,
3239 N0C, N1C))
3240 return Folded;
3241 // fold (udiv X, 1) -> X
3242 if (N1C && N1C->isOne())
3243 return N0;
3244 // fold (udiv X, -1) -> select(X == -1, 1, 0)
3245 if (N1C && N1C->getAPIntValue().isAllOnesValue())
3246 return DAG.getSelect(DL, VT, DAG.getSetCC(DL, CCVT, N0, N1, ISD::SETEQ),
3247 DAG.getConstant(1, DL, VT),
3248 DAG.getConstant(0, DL, VT));
3249
3250 if (SDValue V = simplifyDivRem(N, DAG))
3251 return V;
3252
3253 if (SDValue NewSel = foldBinOpIntoSelect(N))
3254 return NewSel;
3255
3256 if (SDValue V = visitUDIVLike(N0, N1, N))
3257 return V;
3258
3259 // sdiv, srem -> sdivrem
3260 // If the divisor is constant, then return DIVREM only if isIntDivCheap() is
3261 // true. Otherwise, we break the simplification logic in visitREM().
3262 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
3263 if (!N1C || TLI.isIntDivCheap(N->getValueType(0), Attr))
3264 if (SDValue DivRem = useDivRem(N))
3265 return DivRem;
3266
3267 return SDValue();
3268}
3269
3270SDValue DAGCombiner::visitUDIVLike(SDValue N0, SDValue N1, SDNode *N) {
3271 SDLoc DL(N);
3272 EVT VT = N->getValueType(0);
3273
3274 ConstantSDNode *N1C = isConstOrConstSplat(N1);
3275
3276 // fold (udiv x, (1 << c)) -> x >>u c
3277 if (isConstantOrConstantVector(N1, /*NoOpaques*/ true) &&
3278 DAG.isKnownToBeAPowerOfTwo(N1)) {
3279 SDValue LogBase2 = BuildLogBase2(N1, DL);
3280 AddToWorklist(LogBase2.getNode());
3281
3282 EVT ShiftVT = getShiftAmountTy(N0.getValueType());
3283 SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ShiftVT);
3284 AddToWorklist(Trunc.getNode());
3285 return DAG.getNode(ISD::SRL, DL, VT, N0, Trunc);
3286 }
3287
3288 // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
3289 if (N1.getOpcode() == ISD::SHL) {
3290 SDValue N10 = N1.getOperand(0);
3291 if (isConstantOrConstantVector(N10, /*NoOpaques*/ true) &&
3292 DAG.isKnownToBeAPowerOfTwo(N10)) {
3293 SDValue LogBase2 = BuildLogBase2(N10, DL);
3294 AddToWorklist(LogBase2.getNode());
3295
3296 EVT ADDVT = N1.getOperand(1).getValueType();
3297 SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ADDVT);
3298 AddToWorklist(Trunc.getNode());
3299 SDValue Add = DAG.getNode(ISD::ADD, DL, ADDVT, N1.getOperand(1), Trunc);
3300 AddToWorklist(Add.getNode());
3301 return DAG.getNode(ISD::SRL, DL, VT, N0, Add);
3302 }
3303 }
3304
3305 // fold (udiv x, c) -> alternate
3306 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
3307 if (N1C && !TLI.isIntDivCheap(N->getValueType(0), Attr))
3308 if (SDValue Op = BuildUDIV(N))
3309 return Op;
3310
3311 return SDValue();
3312}
3313
3314// handles ISD::SREM and ISD::UREM
3315SDValue DAGCombiner::visitREM(SDNode *N) {
3316 unsigned Opcode = N->getOpcode();
3317 SDValue N0 = N->getOperand(0);
3318 SDValue N1 = N->getOperand(1);
3319 EVT VT = N->getValueType(0);
3320 EVT CCVT = getSetCCResultType(VT);
3321
3322 bool isSigned = (Opcode == ISD::SREM);
3323 SDLoc DL(N);
3324
3325 // fold (rem c1, c2) -> c1%c2
3326 ConstantSDNode *N0C = isConstOrConstSplat(N0);
3327 ConstantSDNode *N1C = isConstOrConstSplat(N1);
3328 if (N0C && N1C)
3329 if (SDValue Folded = DAG.FoldConstantArithmetic(Opcode, DL, VT, N0C, N1C))
3330 return Folded;
3331 // fold (urem X, -1) -> select(X == -1, 0, x)
3332 if (!isSigned && N1C && N1C->getAPIntValue().isAllOnesValue())
3333 return DAG.getSelect(DL, VT, DAG.getSetCC(DL, CCVT, N0, N1, ISD::SETEQ),
3334 DAG.getConstant(0, DL, VT), N0);
3335
3336 if (SDValue V = simplifyDivRem(N, DAG))
3337 return V;
3338
3339 if (SDValue NewSel = foldBinOpIntoSelect(N))
3340 return NewSel;
3341
3342 if (isSigned) {
3343 // If we know the sign bits of both operands are zero, strength reduce to a
3344 // urem instead. Handles (X & 0x0FFFFFFF) %s 16 -> X&15
3345 if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
3346 return DAG.getNode(ISD::UREM, DL, VT, N0, N1);
3347 } else {
3348 SDValue NegOne = DAG.getAllOnesConstant(DL, VT);
3349 if (DAG.isKnownToBeAPowerOfTwo(N1)) {
3350 // fold (urem x, pow2) -> (and x, pow2-1)
3351 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N1, NegOne);
3352 AddToWorklist(Add.getNode());
3353 return DAG.getNode(ISD::AND, DL, VT, N0, Add);
3354 }
3355 if (N1.getOpcode() == ISD::SHL &&
3356 DAG.isKnownToBeAPowerOfTwo(N1.getOperand(0))) {
3357 // fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1))
3358 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N1, NegOne);
3359 AddToWorklist(Add.getNode());
3360 return DAG.getNode(ISD::AND, DL, VT, N0, Add);
3361 }
3362 }
3363
3364 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
3365
3366 // If X/C can be simplified by the division-by-constant logic, lower
3367 // X%C to the equivalent of X-X/C*C.
3368 // Reuse the SDIVLike/UDIVLike combines - to avoid mangling nodes, the
3369 // speculative DIV must not cause a DIVREM conversion. We guard against this
3370 // by skipping the simplification if isIntDivCheap(). When div is not cheap,
3371 // combine will not return a DIVREM. Regardless, checking cheapness here
3372 // makes sense since the simplification results in fatter code.
3373 if (DAG.isKnownNeverZero(N1) && !TLI.isIntDivCheap(VT, Attr)) {
3374 SDValue OptimizedDiv =
3375 isSigned ? visitSDIVLike(N0, N1, N) : visitUDIVLike(N0, N1, N);
3376 if (OptimizedDiv.getNode() && OptimizedDiv.getOpcode() != ISD::UDIVREM &&
3377 OptimizedDiv.getOpcode() != ISD::SDIVREM) {
3378 SDValue Mul = DAG.getNode(ISD::MUL, DL, VT, OptimizedDiv, N1);
3379 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, Mul);
3380 AddToWorklist(OptimizedDiv.getNode());
3381 AddToWorklist(Mul.getNode());
3382 return Sub;
3383 }
3384 }
3385
3386 // sdiv, srem -> sdivrem
3387 if (SDValue DivRem = useDivRem(N))
3388 return DivRem.getValue(1);
3389
3390 return SDValue();
3391}
3392
3393SDValue DAGCombiner::visitMULHS(SDNode *N) {
3394 SDValue N0 = N->getOperand(0);
3395 SDValue N1 = N->getOperand(1);
3396 EVT VT = N->getValueType(0);
3397 SDLoc DL(N);
3398
3399 if (VT.isVector()) {
3400 // fold (mulhs x, 0) -> 0
3401 if (ISD::isBuildVectorAllZeros(N1.getNode()))
3402 return N1;
3403 if (ISD::isBuildVectorAllZeros(N0.getNode()))
3404 return N0;
3405 }
3406
3407 // fold (mulhs x, 0) -> 0
3408 if (isNullConstant(N1))
3409 return N1;
3410 // fold (mulhs x, 1) -> (sra x, size(x)-1)
3411 if (isOneConstant(N1))
3412 return DAG.getNode(ISD::SRA, DL, N0.getValueType(), N0,
3413 DAG.getConstant(N0.getValueSizeInBits() - 1, DL,
3414 getShiftAmountTy(N0.getValueType())));
3415
3416 // fold (mulhs x, undef) -> 0
3417 if (N0.isUndef() || N1.isUndef())
3418 return DAG.getConstant(0, DL, VT);
3419
3420 // If the type twice as wide is legal, transform the mulhs to a wider multiply
3421 // plus a shift.
3422 if (VT.isSimple() && !VT.isVector()) {
3423 MVT Simple = VT.getSimpleVT();
3424 unsigned SimpleSize = Simple.getSizeInBits();
3425 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
3426 if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
3427 N0 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N0);
3428 N1 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N1);
3429 N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1);
3430 N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1,
3431 DAG.getConstant(SimpleSize, DL,
3432 getShiftAmountTy(N1.getValueType())));
3433 return DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
3434 }
3435 }
3436
3437 return SDValue();
3438}
3439
3440SDValue DAGCombiner::visitMULHU(SDNode *N) {
3441 SDValue N0 = N->getOperand(0);
3442 SDValue N1 = N->getOperand(1);
3443 EVT VT = N->getValueType(0);
3444 SDLoc DL(N);
3445
3446 if (VT.isVector()) {
3447 // fold (mulhu x, 0) -> 0
3448 if (ISD::isBuildVectorAllZeros(N1.getNode()))
3449 return N1;
3450 if (ISD::isBuildVectorAllZeros(N0.getNode()))
3451 return N0;
3452 }
3453
3454 // fold (mulhu x, 0) -> 0
3455 if (isNullConstant(N1))
3456 return N1;
3457 // fold (mulhu x, 1) -> 0
3458 if (isOneConstant(N1))
3459 return DAG.getConstant(0, DL, N0.getValueType());
3460 // fold (mulhu x, undef) -> 0
3461 if (N0.isUndef() || N1.isUndef())
3462 return DAG.getConstant(0, DL, VT);
3463
3464 // If the type twice as wide is legal, transform the mulhu to a wider multiply
3465 // plus a shift.
3466 if (VT.isSimple() && !VT.isVector()) {
3467 MVT Simple = VT.getSimpleVT();
3468 unsigned SimpleSize = Simple.getSizeInBits();
3469 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
3470 if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
3471 N0 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N0);
3472 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N1);
3473 N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1);
3474 N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1,
3475 DAG.getConstant(SimpleSize, DL,
3476 getShiftAmountTy(N1.getValueType())));
3477 return DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
3478 }
3479 }
3480
3481 return SDValue();
3482}
3483
3484/// Perform optimizations common to nodes that compute two values. LoOp and HiOp
3485/// give the opcodes for the two computations that are being performed. Return
3486/// true if a simplification was made.
3487SDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
3488 unsigned HiOp) {
3489 // If the high half is not needed, just compute the low half.
3490 bool HiExists = N->hasAnyUseOfValue(1);
3491 if (!HiExists &&
3492 (!LegalOperations ||
3493 TLI.isOperationLegalOrCustom(LoOp, N->getValueType(0)))) {
3494 SDValue Res = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), N->ops());
3495 return CombineTo(N, Res, Res);
3496 }
3497
3498 // If the low half is not needed, just compute the high half.
3499 bool LoExists = N->hasAnyUseOfValue(0);
3500 if (!LoExists &&
3501 (!LegalOperations ||
3502 TLI.isOperationLegal(HiOp, N->getValueType(1)))) {
3503 SDValue Res = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), N->ops());
3504 return CombineTo(N, Res, Res);
3505 }
3506
3507 // If both halves are used, return as it is.
3508 if (LoExists && HiExists)
3509 return SDValue();
3510
3511 // If the two computed results can be simplified separately, separate them.
3512 if (LoExists) {
3513 SDValue Lo = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), N->ops());
3514 AddToWorklist(Lo.getNode());
3515 SDValue LoOpt = combine(Lo.getNode());
3516 if (LoOpt.getNode() && LoOpt.getNode() != Lo.getNode() &&
3517 (!LegalOperations ||
3518 TLI.isOperationLegal(LoOpt.getOpcode(), LoOpt.getValueType())))
3519 return CombineTo(N, LoOpt, LoOpt);
3520 }
3521
3522 if (HiExists) {
3523 SDValue Hi = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), N->ops());
3524 AddToWorklist(Hi.getNode());
3525 SDValue HiOpt = combine(Hi.getNode());
3526 if (HiOpt.getNode() && HiOpt != Hi &&
3527 (!LegalOperations ||
3528 TLI.isOperationLegal(HiOpt.getOpcode(), HiOpt.getValueType())))
3529 return CombineTo(N, HiOpt, HiOpt);
3530 }
3531
3532 return SDValue();
3533}
3534
3535SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) {
3536 if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS))
3537 return Res;
3538
3539 EVT VT = N->getValueType(0);
3540 SDLoc DL(N);
3541
3542 // If the type is twice as wide is legal, transform the mulhu to a wider
3543 // multiply plus a shift.
3544 if (VT.isSimple() && !VT.isVector()) {
3545 MVT Simple = VT.getSimpleVT();
3546 unsigned SimpleSize = Simple.getSizeInBits();
3547 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
3548 if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
3549 SDValue Lo = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(0));
3550 SDValue Hi = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(1));
3551 Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi);
3552 // Compute the high part as N1.
3553 Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo,
3554 DAG.getConstant(SimpleSize, DL,
3555 getShiftAmountTy(Lo.getValueType())));
3556 Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi);
3557 // Compute the low part as N0.
3558 Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo);
3559 return CombineTo(N, Lo, Hi);
3560 }
3561 }
3562
3563 return SDValue();
3564}
3565
3566SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) {
3567 if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU))
3568 return Res;
3569
3570 EVT VT = N->getValueType(0);
3571 SDLoc DL(N);
3572
3573 // If the type is twice as wide is legal, transform the mulhu to a wider
3574 // multiply plus a shift.
3575 if (VT.isSimple() && !VT.isVector()) {
3576 MVT Simple = VT.getSimpleVT();
3577 unsigned SimpleSize = Simple.getSizeInBits();
3578 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
3579 if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
3580 SDValue Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(0));
3581 SDValue Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(1));
3582 Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi);
3583 // Compute the high part as N1.
3584 Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo,
3585 DAG.getConstant(SimpleSize, DL,
3586 getShiftAmountTy(Lo.getValueType())));
3587 Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi);
3588 // Compute the low part as N0.
3589 Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo);
3590 return CombineTo(N, Lo, Hi);
3591 }
3592 }
3593
3594 return SDValue();
3595}
3596
3597SDValue DAGCombiner::visitSMULO(SDNode *N) {
3598 // (smulo x, 2) -> (saddo x, x)
3599 if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1)))
3600 if (C2->getAPIntValue() == 2)
3601 return DAG.getNode(ISD::SADDO, SDLoc(N), N->getVTList(),
3602 N->getOperand(0), N->getOperand(0));
3603
3604 return SDValue();
3605}
3606
3607SDValue DAGCombiner::visitUMULO(SDNode *N) {
3608 // (umulo x, 2) -> (uaddo x, x)
3609 if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1)))
3610 if (C2->getAPIntValue() == 2)
3611 return DAG.getNode(ISD::UADDO, SDLoc(N), N->getVTList(),
3612 N->getOperand(0), N->getOperand(0));
3613
3614 return SDValue();
3615}
3616
3617SDValue DAGCombiner::visitIMINMAX(SDNode *N) {
3618 SDValue N0 = N->getOperand(0);
3619 SDValue N1 = N->getOperand(1);
3620 EVT VT = N0.getValueType();
3621
3622 // fold vector ops
3623 if (VT.isVector())
3624 if (SDValue FoldedVOp = SimplifyVBinOp(N))
3625 return FoldedVOp;
3626
3627 // fold operation with constant operands.
3628 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
3629 ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);
3630 if (N0C && N1C)
3631 return DAG.FoldConstantArithmetic(N->getOpcode(), SDLoc(N), VT, N0C, N1C);
3632
3633 // canonicalize constant to RHS
3634 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
3635 !DAG.isConstantIntBuildVectorOrConstantInt(N1))
3636 return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N1, N0);
3637
3638 // Is sign bits are zero, flip between UMIN/UMAX and SMIN/SMAX.
3639 // Only do this if the current op isn't legal and the flipped is.
3640 unsigned Opcode = N->getOpcode();
3641 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3642 if (!TLI.isOperationLegal(Opcode, VT) &&
3643 (N0.isUndef() || DAG.SignBitIsZero(N0)) &&
3644 (N1.isUndef() || DAG.SignBitIsZero(N1))) {
3645 unsigned AltOpcode;
3646 switch (Opcode) {
3647 case ISD::SMIN: AltOpcode = ISD::UMIN; break;
3648 case ISD::SMAX: AltOpcode = ISD::UMAX; break;
3649 case ISD::UMIN: AltOpcode = ISD::SMIN; break;
3650 case ISD::UMAX: AltOpcode = ISD::SMAX; break;
3651 default: llvm_unreachable("Unknown MINMAX opcode")::llvm::llvm_unreachable_internal("Unknown MINMAX opcode", "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3651)
;
3652 }
3653 if (TLI.isOperationLegal(AltOpcode, VT))
3654 return DAG.getNode(AltOpcode, SDLoc(N), VT, N0, N1);
3655 }
3656
3657 return SDValue();
3658}
3659
3660/// If this is a binary operator with two operands of the same opcode, try to
3661/// simplify it.
3662SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) {
3663 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
3664 EVT VT = N0.getValueType();
3665 assert(N0.getOpcode() == N1.getOpcode() && "Bad input!")(static_cast <bool> (N0.getOpcode() == N1.getOpcode() &&
"Bad input!") ? void (0) : __assert_fail ("N0.getOpcode() == N1.getOpcode() && \"Bad input!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3665, __extension__ __PRETTY_FUNCTION__))
;
3666
3667 // Bail early if none of these transforms apply.
3668 if (N0.getNumOperands() == 0) return SDValue();
3669
3670 // For each of OP in AND/OR/XOR:
3671 // fold (OP (zext x), (zext y)) -> (zext (OP x, y))
3672 // fold (OP (sext x), (sext y)) -> (sext (OP x, y))
3673 // fold (OP (aext x), (aext y)) -> (aext (OP x, y))
3674 // fold (OP (bswap x), (bswap y)) -> (bswap (OP x, y))
3675 // fold (OP (trunc x), (trunc y)) -> (trunc (OP x, y)) (if trunc isn't free)
3676 //
3677 // do not sink logical op inside of a vector extend, since it may combine
3678 // into a vsetcc.
3679 EVT Op0VT = N0.getOperand(0).getValueType();
3680 if ((N0.getOpcode() == ISD::ZERO_EXTEND ||
3681 N0.getOpcode() == ISD::SIGN_EXTEND ||
3682 N0.getOpcode() == ISD::BSWAP ||
3683 // Avoid infinite looping with PromoteIntBinOp.
3684 (N0.getOpcode() == ISD::ANY_EXTEND &&
3685 (!LegalTypes || TLI.isTypeDesirableForOp(N->getOpcode(), Op0VT))) ||
3686 (N0.getOpcode() == ISD::TRUNCATE &&
3687 (!TLI.isZExtFree(VT, Op0VT) ||
3688 !TLI.isTruncateFree(Op0VT, VT)) &&
3689 TLI.isTypeLegal(Op0VT))) &&
3690 !VT.isVector() &&
3691 Op0VT == N1.getOperand(0).getValueType() &&
3692 (!LegalOperations || TLI.isOperationLegal(N->getOpcode(), Op0VT))) {
3693 SDValue ORNode = DAG.getNode(N->getOpcode(), SDLoc(N0),
3694 N0.getOperand(0).getValueType(),
3695 N0.getOperand(0), N1.getOperand(0));
3696 AddToWorklist(ORNode.getNode());
3697 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, ORNode);
3698 }
3699
3700 // For each of OP in SHL/SRL/SRA/AND...
3701 // fold (and (OP x, z), (OP y, z)) -> (OP (and x, y), z)
3702 // fold (or (OP x, z), (OP y, z)) -> (OP (or x, y), z)
3703 // fold (xor (OP x, z), (OP y, z)) -> (OP (xor x, y), z)
3704 if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL ||
3705 N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::AND) &&
3706 N0.getOperand(1) == N1.getOperand(1)) {
3707 SDValue ORNode = DAG.getNode(N->getOpcode(), SDLoc(N0),
3708 N0.getOperand(0).getValueType(),
3709 N0.getOperand(0), N1.getOperand(0));
3710 AddToWorklist(ORNode.getNode());
3711 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT,
3712 ORNode, N0.getOperand(1));
3713 }
3714
3715 // Simplify xor/and/or (bitcast(A), bitcast(B)) -> bitcast(op (A,B))
3716 // Only perform this optimization up until type legalization, before
3717 // LegalizeVectorOprs. LegalizeVectorOprs promotes vector operations by
3718 // adding bitcasts. For example (xor v4i32) is promoted to (v2i64), and
3719 // we don't want to undo this promotion.
3720 // We also handle SCALAR_TO_VECTOR because xor/or/and operations are cheaper
3721 // on scalars.
3722 if ((N0.getOpcode() == ISD::BITCAST ||
3723 N0.getOpcode() == ISD::SCALAR_TO_VECTOR) &&
3724 Level <= AfterLegalizeTypes) {
3725 SDValue In0 = N0.getOperand(0);
3726 SDValue In1 = N1.getOperand(0);
3727 EVT In0Ty = In0.getValueType();
3728 EVT In1Ty = In1.getValueType();
3729 SDLoc DL(N);
3730 // If both incoming values are integers, and the original types are the
3731 // same.
3732 if (In0Ty.isInteger() && In1Ty.isInteger() && In0Ty == In1Ty) {
3733 SDValue Op = DAG.getNode(N->getOpcode(), DL, In0Ty, In0, In1);
3734 SDValue BC = DAG.getNode(N0.getOpcode(), DL, VT, Op);
3735 AddToWorklist(Op.getNode());
3736 return BC;
3737 }
3738 }
3739
3740 // Xor/and/or are indifferent to the swizzle operation (shuffle of one value).
3741 // Simplify xor/and/or (shuff(A), shuff(B)) -> shuff(op (A,B))
3742 // If both shuffles use the same mask, and both shuffle within a single
3743 // vector, then it is worthwhile to move the swizzle after the operation.
3744 // The type-legalizer generates this pattern when loading illegal
3745 // vector types from memory. In many cases this allows additional shuffle
3746 // optimizations.
3747 // There are other cases where moving the shuffle after the xor/and/or
3748 // is profitable even if shuffles don't perform a swizzle.
3749 // If both shuffles use the same mask, and both shuffles have the same first
3750 // or second operand, then it might still be profitable to move the shuffle
3751 // after the xor/and/or operation.
3752 if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG) {
3753 ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(N0);
3754 ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(N1);
3755
3756 assert(N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType() &&(static_cast <bool> (N0.getOperand(0).getValueType() ==
N1.getOperand(0).getValueType() && "Inputs to shuffles are not the same type"
) ? void (0) : __assert_fail ("N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType() && \"Inputs to shuffles are not the same type\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3757, __extension__ __PRETTY_FUNCTION__))
3757 "Inputs to shuffles are not the same type")(static_cast <bool> (N0.getOperand(0).getValueType() ==
N1.getOperand(0).getValueType() && "Inputs to shuffles are not the same type"
) ? void (0) : __assert_fail ("N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType() && \"Inputs to shuffles are not the same type\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3757, __extension__ __PRETTY_FUNCTION__))
;
3758
3759 // Check that both shuffles use the same mask. The masks are known to be of
3760 // the same length because the result vector type is the same.
3761 // Check also that shuffles have only one use to avoid introducing extra
3762 // instructions.
3763 if (SVN0->hasOneUse() && SVN1->hasOneUse() &&
3764 SVN0->getMask().equals(SVN1->getMask())) {
3765 SDValue ShOp = N0->getOperand(1);
3766
3767 // Don't try to fold this node if it requires introducing a
3768 // build vector of all zeros that might be illegal at this stage.
3769 if (N->getOpcode() == ISD::XOR && !ShOp.isUndef()) {
3770 if (!LegalTypes)
3771 ShOp = DAG.getConstant(0, SDLoc(N), VT);
3772 else
3773 ShOp = SDValue();
3774 }
3775
3776 // (AND (shuf (A, C), shuf (B, C))) -> shuf (AND (A, B), C)
3777 // (OR (shuf (A, C), shuf (B, C))) -> shuf (OR (A, B), C)
3778 // (XOR (shuf (A, C), shuf (B, C))) -> shuf (XOR (A, B), V_0)
3779 if (N0.getOperand(1) == N1.getOperand(1) && ShOp.getNode()) {
3780 SDValue NewNode = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
3781 N0->getOperand(0), N1->getOperand(0));
3782 AddToWorklist(NewNode.getNode());
3783 return DAG.getVectorShuffle(VT, SDLoc(N), NewNode, ShOp,
3784 SVN0->getMask());
3785 }
3786
3787 // Don't try to fold this node if it requires introducing a
3788 // build vector of all zeros that might be illegal at this stage.
3789 ShOp = N0->getOperand(0);
3790 if (N->getOpcode() == ISD::XOR && !ShOp.isUndef()) {
3791 if (!LegalTypes)
3792 ShOp = DAG.getConstant(0, SDLoc(N), VT);
3793 else
3794 ShOp = SDValue();
3795 }
3796
3797 // (AND (shuf (C, A), shuf (C, B))) -> shuf (C, AND (A, B))
3798 // (OR (shuf (C, A), shuf (C, B))) -> shuf (C, OR (A, B))
3799 // (XOR (shuf (C, A), shuf (C, B))) -> shuf (V_0, XOR (A, B))
3800 if (N0->getOperand(0) == N1->getOperand(0) && ShOp.getNode()) {
3801 SDValue NewNode = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
3802 N0->getOperand(1), N1->getOperand(1));
3803 AddToWorklist(NewNode.getNode());
3804 return DAG.getVectorShuffle(VT, SDLoc(N), ShOp, NewNode,
3805 SVN0->getMask());
3806 }
3807 }
3808 }
3809
3810 return SDValue();
3811}
3812
3813/// Try to make (and/or setcc (LL, LR), setcc (RL, RR)) more efficient.
3814SDValue DAGCombiner::foldLogicOfSetCCs(bool IsAnd, SDValue N0, SDValue N1,
3815 const SDLoc &DL) {
3816 SDValue LL, LR, RL, RR, N0CC, N1CC;
3817 if (!isSetCCEquivalent(N0, LL, LR, N0CC) ||
3818 !isSetCCEquivalent(N1, RL, RR, N1CC))
3819 return SDValue();
3820
3821 assert(N0.getValueType() == N1.getValueType() &&(static_cast <bool> (N0.getValueType() == N1.getValueType
() && "Unexpected operand types for bitwise logic op"
) ? void (0) : __assert_fail ("N0.getValueType() == N1.getValueType() && \"Unexpected operand types for bitwise logic op\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3822, __extension__ __PRETTY_FUNCTION__))
3822 "Unexpected operand types for bitwise logic op")(static_cast <bool> (N0.getValueType() == N1.getValueType
() && "Unexpected operand types for bitwise logic op"
) ? void (0) : __assert_fail ("N0.getValueType() == N1.getValueType() && \"Unexpected operand types for bitwise logic op\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3822, __extension__ __PRETTY_FUNCTION__))
;
3823 assert(LL.getValueType() == LR.getValueType() &&(static_cast <bool> (LL.getValueType() == LR.getValueType
() && RL.getValueType() == RR.getValueType() &&
"Unexpected operand types for setcc") ? void (0) : __assert_fail
("LL.getValueType() == LR.getValueType() && RL.getValueType() == RR.getValueType() && \"Unexpected operand types for setcc\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3825, __extension__ __PRETTY_FUNCTION__))
3824 RL.getValueType() == RR.getValueType() &&(static_cast <bool> (LL.getValueType() == LR.getValueType
() && RL.getValueType() == RR.getValueType() &&
"Unexpected operand types for setcc") ? void (0) : __assert_fail
("LL.getValueType() == LR.getValueType() && RL.getValueType() == RR.getValueType() && \"Unexpected operand types for setcc\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3825, __extension__ __PRETTY_FUNCTION__))
3825 "Unexpected operand types for setcc")(static_cast <bool> (LL.getValueType() == LR.getValueType
() && RL.getValueType() == RR.getValueType() &&
"Unexpected operand types for setcc") ? void (0) : __assert_fail
("LL.getValueType() == LR.getValueType() && RL.getValueType() == RR.getValueType() && \"Unexpected operand types for setcc\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3825, __extension__ __PRETTY_FUNCTION__))
;
3826
3827 // If we're here post-legalization or the logic op type is not i1, the logic
3828 // op type must match a setcc result type. Also, all folds require new
3829 // operations on the left and right operands, so those types must match.
3830 EVT VT = N0.getValueType();
3831 EVT OpVT = LL.getValueType();
3832 if (LegalOperations || VT.getScalarType() != MVT::i1)
3833 if (VT != getSetCCResultType(OpVT))
3834 return SDValue();
3835 if (OpVT != RL.getValueType())
3836 return SDValue();
3837
3838 ISD::CondCode CC0 = cast<CondCodeSDNode>(N0CC)->get();
3839 ISD::CondCode CC1 = cast<CondCodeSDNode>(N1CC)->get();
3840 bool IsInteger = OpVT.isInteger();
3841 if (LR == RR && CC0 == CC1 && IsInteger) {
3842 bool IsZero = isNullConstantOrNullSplatConstant(LR);
3843 bool IsNeg1 = isAllOnesConstantOrAllOnesSplatConstant(LR);
3844
3845 // All bits clear?
3846 bool AndEqZero = IsAnd && CC1 == ISD::SETEQ && IsZero;
3847 // All sign bits clear?
3848 bool AndGtNeg1 = IsAnd && CC1 == ISD::SETGT && IsNeg1;
3849 // Any bits set?
3850 bool OrNeZero = !IsAnd && CC1 == ISD::SETNE && IsZero;
3851 // Any sign bits set?
3852 bool OrLtZero = !IsAnd && CC1 == ISD::SETLT && IsZero;
3853
3854 // (and (seteq X, 0), (seteq Y, 0)) --> (seteq (or X, Y), 0)
3855 // (and (setgt X, -1), (setgt Y, -1)) --> (setgt (or X, Y), -1)
3856 // (or (setne X, 0), (setne Y, 0)) --> (setne (or X, Y), 0)
3857 // (or (setlt X, 0), (setlt Y, 0)) --> (setlt (or X, Y), 0)
3858 if (AndEqZero || AndGtNeg1 || OrNeZero || OrLtZero) {
3859 SDValue Or = DAG.getNode(ISD::OR, SDLoc(N0), OpVT, LL, RL);
3860 AddToWorklist(Or.getNode());
3861 return DAG.getSetCC(DL, VT, Or, LR, CC1);
3862 }
3863
3864 // All bits set?
3865 bool AndEqNeg1 = IsAnd && CC1 == ISD::SETEQ && IsNeg1;
3866 // All sign bits set?
3867 bool AndLtZero = IsAnd && CC1 == ISD::SETLT && IsZero;
3868 // Any bits clear?
3869 bool OrNeNeg1 = !IsAnd && CC1 == ISD::SETNE && IsNeg1;
3870 // Any sign bits clear?
3871 bool OrGtNeg1 = !IsAnd && CC1 == ISD::SETGT && IsNeg1;
3872
3873 // (and (seteq X, -1), (seteq Y, -1)) --> (seteq (and X, Y), -1)
3874 // (and (setlt X, 0), (setlt Y, 0)) --> (setlt (and X, Y), 0)
3875 // (or (setne X, -1), (setne Y, -1)) --> (setne (and X, Y), -1)
3876 // (or (setgt X, -1), (setgt Y -1)) --> (setgt (and X, Y), -1)
3877 if (AndEqNeg1 || AndLtZero || OrNeNeg1 || OrGtNeg1) {
3878 SDValue And = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, LL, RL);
3879 AddToWorklist(And.getNode());
3880 return DAG.getSetCC(DL, VT, And, LR, CC1);
3881 }
3882 }
3883
3884 // TODO: What is the 'or' equivalent of this fold?
3885 // (and (setne X, 0), (setne X, -1)) --> (setuge (add X, 1), 2)
3886 if (IsAnd && LL == RL && CC0 == CC1 && OpVT.getScalarSizeInBits() > 1 &&
3887 IsInteger && CC0 == ISD::SETNE &&
3888 ((isNullConstant(LR) && isAllOnesConstant(RR)) ||
3889 (isAllOnesConstant(LR) && isNullConstant(RR)))) {
3890 SDValue One = DAG.getConstant(1, DL, OpVT);
3891 SDValue Two = DAG.getConstant(2, DL, OpVT);
3892 SDValue Add = DAG.getNode(ISD::ADD, SDLoc(N0), OpVT, LL, One);
3893 AddToWorklist(Add.getNode());
3894 return DAG.getSetCC(DL, VT, Add, Two, ISD::SETUGE);
3895 }
3896
3897 // Try more general transforms if the predicates match and the only user of
3898 // the compares is the 'and' or 'or'.
3899 if (IsInteger && TLI.convertSetCCLogicToBitwiseLogic(OpVT) && CC0 == CC1 &&
3900 N0.hasOneUse() && N1.hasOneUse()) {
3901 // and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
3902 // or (setne A, B), (setne C, D) --> setne (or (xor A, B), (xor C, D)), 0
3903 if ((IsAnd && CC1 == ISD::SETEQ) || (!IsAnd && CC1 == ISD::SETNE)) {
3904 SDValue XorL = DAG.getNode(ISD::XOR, SDLoc(N0), OpVT, LL, LR);
3905 SDValue XorR = DAG.getNode(ISD::XOR, SDLoc(N1), OpVT, RL, RR);
3906 SDValue Or = DAG.getNode(ISD::OR, DL, OpVT, XorL, XorR);
3907 SDValue Zero = DAG.getConstant(0, DL, OpVT);
3908 return DAG.getSetCC(DL, VT, Or, Zero, CC1);
3909 }
3910 }
3911
3912 // Canonicalize equivalent operands to LL == RL.
3913 if (LL == RR && LR == RL) {
3914 CC1 = ISD::getSetCCSwappedOperands(CC1);
3915 std::swap(RL, RR);
3916 }
3917
3918 // (and (setcc X, Y, CC0), (setcc X, Y, CC1)) --> (setcc X, Y, NewCC)
3919 // (or (setcc X, Y, CC0), (setcc X, Y, CC1)) --> (setcc X, Y, NewCC)
3920 if (LL == RL && LR == RR) {
3921 ISD::CondCode NewCC = IsAnd ? ISD::getSetCCAndOperation(CC0, CC1, IsInteger)
3922 : ISD::getSetCCOrOperation(CC0, CC1, IsInteger);
3923 if (NewCC != ISD::SETCC_INVALID &&
3924 (!LegalOperations ||
3925 (TLI.isCondCodeLegal(NewCC, LL.getSimpleValueType()) &&
3926 TLI.isOperationLegal(ISD::SETCC, OpVT))))
3927 return DAG.getSetCC(DL, VT, LL, LR, NewCC);
3928 }
3929
3930 return SDValue();
3931}
3932
3933/// This contains all DAGCombine rules which reduce two values combined by
3934/// an And operation to a single value. This makes them reusable in the context
3935/// of visitSELECT(). Rules involving constants are not included as
3936/// visitSELECT() already handles those cases.
3937SDValue DAGCombiner::visitANDLike(SDValue N0, SDValue N1, SDNode *N) {
3938 EVT VT = N1.getValueType();
3939 SDLoc DL(N);
3940
3941 // fold (and x, undef) -> 0
3942 if (N0.isUndef() || N1.isUndef())
3943 return DAG.getConstant(0, DL, VT);
3944
3945 if (SDValue V = foldLogicOfSetCCs(true, N0, N1, DL))
3946 return V;
3947
3948 if (N0.getOpcode() == ISD::ADD && N1.getOpcode() == ISD::SRL &&
3949 VT.getSizeInBits() <= 64) {
3950 if (ConstantSDNode *ADDI = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
3951 if (ConstantSDNode *SRLI = dyn_cast<ConstantSDNode>(N1.getOperand(1))) {
3952 // Look for (and (add x, c1), (lshr y, c2)). If C1 wasn't a legal
3953 // immediate for an add, but it is legal if its top c2 bits are set,
3954 // transform the ADD so the immediate doesn't need to be materialized
3955 // in a register.
3956 APInt ADDC = ADDI->getAPIntValue();
3957 APInt SRLC = SRLI->getAPIntValue();
3958 if (ADDC.getMinSignedBits() <= 64 &&
3959 SRLC.ult(VT.getSizeInBits()) &&
3960 !TLI.isLegalAddImmediate(ADDC.getSExtValue())) {
3961 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
3962 SRLC.getZExtValue());
3963 if (DAG.MaskedValueIsZero(N0.getOperand(1), Mask)) {
3964 ADDC |= Mask;
3965 if (TLI.isLegalAddImmediate(ADDC.getSExtValue())) {
3966 SDLoc DL0(N0);
3967 SDValue NewAdd =
3968 DAG.getNode(ISD::ADD, DL0, VT,
3969 N0.getOperand(0), DAG.getConstant(ADDC, DL, VT));
3970 CombineTo(N0.getNode(), NewAdd);
3971 // Return N so it doesn't get rechecked!
3972 return SDValue(N, 0);
3973 }
3974 }
3975 }
3976 }
3977 }
3978 }
3979
3980 // Reduce bit extract of low half of an integer to the narrower type.
3981 // (and (srl i64:x, K), KMask) ->
3982 // (i64 zero_extend (and (srl (i32 (trunc i64:x)), K)), KMask)
3983 if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
3984 if (ConstantSDNode *CAnd = dyn_cast<ConstantSDNode>(N1)) {
3985 if (ConstantSDNode *CShift = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
3986 unsigned Size = VT.getSizeInBits();
3987 const APInt &AndMask = CAnd->getAPIntValue();
3988 unsigned ShiftBits = CShift->getZExtValue();
3989
3990 // Bail out, this node will probably disappear anyway.
3991 if (ShiftBits == 0)
3992 return SDValue();
3993
3994 unsigned MaskBits = AndMask.countTrailingOnes();
3995 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), Size / 2);
3996
3997 if (AndMask.isMask() &&
3998 // Required bits must not span the two halves of the integer and
3999 // must fit in the half size type.
4000 (ShiftBits + MaskBits <= Size / 2) &&
4001 TLI.isNarrowingProfitable(VT, HalfVT) &&
4002 TLI.isTypeDesirableForOp(ISD::AND, HalfVT) &&
4003 TLI.isTypeDesirableForOp(ISD::SRL, HalfVT) &&
4004 TLI.isTruncateFree(VT, HalfVT) &&
4005 TLI.isZExtFree(HalfVT, VT)) {
4006 // The isNarrowingProfitable is to avoid regressions on PPC and
4007 // AArch64 which match a few 64-bit bit insert / bit extract patterns
4008 // on downstream users of this. Those patterns could probably be
4009 // extended to handle extensions mixed in.
4010
4011 SDValue SL(N0);
4012 assert(MaskBits <= Size)(static_cast <bool> (MaskBits <= Size) ? void (0) : __assert_fail
("MaskBits <= Size", "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 4012, __extension__ __PRETTY_FUNCTION__))
;
4013
4014 // Extracting the highest bit of the low half.
4015 EVT ShiftVT = TLI.getShiftAmountTy(HalfVT, DAG.getDataLayout());
4016 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, HalfVT,
4017 N0.getOperand(0));
4018
4019 SDValue NewMask = DAG.getConstant(AndMask.trunc(Size / 2), SL, HalfVT);
4020 SDValue ShiftK = DAG.getConstant(ShiftBits, SL, ShiftVT);
4021 SDValue Shift = DAG.getNode(ISD::SRL, SL, HalfVT, Trunc, ShiftK);
4022 SDValue And = DAG.getNode(ISD::AND, SL, HalfVT, Shift, NewMask);
4023 return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, And);
4024 }
4025 }
4026 }
4027 }
4028
4029 return SDValue();
4030}
4031
4032bool DAGCombiner::isAndLoadExtLoad(ConstantSDNode *AndC, LoadSDNode *LoadN,
4033 EVT LoadResultTy, EVT &ExtVT) {
4034 if (!AndC->getAPIntValue().isMask())
4035 return false;
4036
4037 unsigned ActiveBits = AndC->getAPIntValue().countTrailingOnes();
4038
4039 ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
4040 EVT LoadedVT = LoadN->getMemoryVT();
4041
4042 if (ExtVT == LoadedVT &&
4043 (!LegalOperations ||
4044 TLI.isLoadExtLegal(ISD::ZEXTLOAD, LoadResultTy, ExtVT))) {
4045 // ZEXTLOAD will match without needing to change the size of the value being
4046 // loaded.
4047 return true;
4048 }
4049
4050 // Do not change the width of a volatile load.
4051 if (LoadN->isVolatile())
4052 return false;
4053
4054 // Do not generate loads of non-round integer types since these can
4055 // be expensive (and would be wrong if the type is not byte sized).
4056 if (!LoadedVT.bitsGT(ExtVT) || !ExtVT.isRound())
4057 return false;
4058
4059 if (LegalOperations &&
4060 !TLI.isLoadExtLegal(ISD::ZEXTLOAD, LoadResultTy, ExtVT))
4061 return false;
4062
4063 if (!TLI.shouldReduceLoadWidth(LoadN, ISD::ZEXTLOAD, ExtVT))
4064 return false;
4065
4066 return true;
4067}
4068
4069bool DAGCombiner::isLegalNarrowLdSt(LSBaseSDNode *LDST,
4070 ISD::LoadExtType ExtType, EVT &MemVT,
4071 unsigned ShAmt) {
4072 if (!LDST)
4073 return false;
4074 // Only allow byte offsets.
4075 if (ShAmt % 8)
4076 return false;
4077
4078 // Do not generate loads of non-round integer types since these can
4079 // be expensive (and would be wrong if the type is not byte sized).
4080 if (!MemVT.isRound())
4081 return false;
4082
4083 // Don't change the width of a volatile load.
4084 if (LDST->isVolatile())
4085 return false;
4086
4087 // Verify that we are actually reducing a load width here.
4088 if (LDST->getMemoryVT().getSizeInBits() < MemVT.getSizeInBits())
4089 return false;
4090
4091 // Ensure that this isn't going to produce an unsupported unaligned access.
4092 if (ShAmt &&
4093 !TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
4094 LDST->getAddressSpace(), ShAmt / 8))
4095 return false;
4096
4097 // It's not possible to generate a constant of extended or untyped type.
4098 EVT PtrType = LDST->getBasePtr().getValueType();
4099 if (PtrType == MVT::Untyped || PtrType.isExtended())
4100 return false;
4101
4102 if (isa<LoadSDNode>(LDST)) {
4103 LoadSDNode *Load = cast<LoadSDNode>(LDST);
4104 // Don't transform one with multiple uses, this would require adding a new
4105 // load.
4106 if (!SDValue(Load, 0).hasOneUse())
4107 return false;
4108
4109 if (LegalOperations &&
4110 !TLI.isLoadExtLegal(ExtType, Load->getValueType(0), MemVT))
4111 return false;
4112
4113 // For the transform to be legal, the load must produce only two values
4114 // (the value loaded and the chain). Don't transform a pre-increment
4115 // load, for example, which produces an extra value. Otherwise the
4116 // transformation is not equivalent, and the downstream logic to replace
4117 // uses gets things wrong.
4118 if (Load->getNumValues() > 2)
4119 return false;
4120
4121 // If the load that we're shrinking is an extload and we're not just
4122 // discarding the extension we can't simply shrink the load. Bail.
4123 // TODO: It would be possible to merge the extensions in some cases.
4124 if (Load->getExtensionType() != ISD::NON_EXTLOAD &&
4125 Load->getMemoryVT().getSizeInBits() < MemVT.getSizeInBits() + ShAmt)
4126 return false;
4127
4128 if (!TLI.shouldReduceLoadWidth(Load, ExtType, MemVT))
4129 return false;
4130 } else {
4131 assert(isa<StoreSDNode>(LDST) && "It is not a Load nor a Store SDNode")(static_cast <bool> (isa<StoreSDNode>(LDST) &&
"It is not a Load nor a Store SDNode") ? void (0) : __assert_fail
("isa<StoreSDNode>(LDST) && \"It is not a Load nor a Store SDNode\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 4131, __extension__ __PRETTY_FUNCTION__))
;
4132 StoreSDNode *Store = cast<StoreSDNode>(LDST);
4133 // Can't write outside the original store
4134 if (Store->getMemoryVT().getSizeInBits() < MemVT.getSizeInBits() + ShAmt)
4135 return false;
4136
4137 if (LegalOperations &&
4138 !TLI.isTruncStoreLegal(Store->getValue().getValueType(), MemVT))
4139 return false;
4140 }
4141 return true;
4142}
4143
4144bool DAGCombiner::SearchForAndLoads(SDNode *N,
4145 SmallPtrSetImpl<LoadSDNode*> &Loads,
4146 SmallPtrSetImpl<SDNode*> &NodesWithConsts,
4147 ConstantSDNode *Mask,
4148 SDNode *&NodeToMask) {
4149 // Recursively search for the operands, looking for loads which can be
4150 // narrowed.
4151 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) {
4152 SDValue Op = N->getOperand(i);
4153
4154 if (Op.getValueType().isVector())
4155 return false;
4156
4157 // Some constants may need fixing up later if they are too large.
4158 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
4159 if ((N->getOpcode() == ISD::OR || N->getOpcode() == ISD::XOR) &&
4160 (Mask->getAPIntValue() & C->getAPIntValue()) != C->getAPIntValue())
4161 NodesWithConsts.insert(N);
4162 continue;
4163 }
4164
4165 if (!Op.hasOneUse())
4166 return false;
4167
4168 switch(Op.getOpcode()) {
4169 case ISD::LOAD: {
4170 auto *Load = cast<LoadSDNode>(Op);
4171 EVT ExtVT;
4172 if (isAndLoadExtLoad(Mask, Load, Load->getValueType(0), ExtVT) &&
4173 isLegalNarrowLdSt(Load, ISD::ZEXTLOAD, ExtVT)) {
4174
4175 // ZEXTLOAD is already small enough.
4176 if (Load->getExtensionType() == ISD::ZEXTLOAD &&
4177 ExtVT.bitsGE(Load->getMemoryVT()))
4178 continue;
4179
4180 // Use LE to convert equal sized loads to zext.
4181 if (ExtVT.bitsLE(Load->getMemoryVT()))
4182 Loads.insert(Load);
4183
4184 continue;
4185 }
4186 return false;
4187 }
4188 case ISD::ZERO_EXTEND:
4189 case ISD::AssertZext: {
4190 unsigned ActiveBits = Mask->getAPIntValue().countTrailingOnes();
4191 EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
4192 EVT VT = Op.getOpcode() == ISD::AssertZext ?
4193 cast<VTSDNode>(Op.getOperand(1))->getVT() :
4194 Op.getOperand(0).getValueType();
4195
4196 // We can accept extending nodes if the mask is wider or an equal
4197 // width to the original type.
4198 if (ExtVT.bitsGE(VT))
4199 continue;
4200 break;
4201 }
4202 case ISD::OR:
4203 case ISD::XOR:
4204 case ISD::AND:
4205 if (!SearchForAndLoads(Op.getNode(), Loads, NodesWithConsts, Mask,
4206 NodeToMask))
4207 return false;
4208 continue;
4209 }
4210
4211 // Allow one node which will masked along with any loads found.
4212 if (NodeToMask)
4213 return false;
4214
4215 // Also ensure that the node to be masked only produces one data result.
4216 NodeToMask = Op.getNode();
4217 if (NodeToMask->getNumValues() > 1) {
4218 bool HasValue = false;
4219 for (unsigned i = 0, e = NodeToMask->getNumValues(); i < e; ++i) {
4220 MVT VT = SDValue(NodeToMask, i).getSimpleValueType();
4221 if (VT != MVT::Glue && VT != MVT::Other) {
4222 if (HasValue) {
4223 NodeToMask = nullptr;
4224 return false;
4225 }
4226 HasValue = true;
4227 }
4228 }
4229 assert(HasValue && "Node to be masked has no data result?")(static_cast <bool> (HasValue && "Node to be masked has no data result?"
) ? void (0) : __assert_fail ("HasValue && \"Node to be masked has no data result?\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 4229, __extension__ __PRETTY_FUNCTION__))
;
4230 }
4231 }
4232 return true;
4233}
4234
4235bool DAGCombiner::BackwardsPropagateMask(SDNode *N, SelectionDAG &DAG) {
4236 auto *Mask = dyn_cast<ConstantSDNode>(N->getOperand(1));
4237 if (!Mask)
4238 return false;
4239
4240 if (!Mask->getAPIntValue().isMask())
4241 return false;
4242
4243 // No need to do anything if the and directly uses a load.
4244 if (isa<LoadSDNode>(N->getOperand(0)))
4245 return false;
4246
4247 SmallPtrSet<LoadSDNode*, 8> Loads;
4248 SmallPtrSet<SDNode*, 2> NodesWithConsts;
4249 SDNode *FixupNode = nullptr;
4250 if (SearchForAndLoads(N, Loads, NodesWithConsts, Mask, FixupNode)) {
4251 if (Loads.size() == 0)
4252 return false;
4253
4254 LLVM_DEBUG(dbgs() << "Backwards propagate AND: "; N->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "Backwards propagate AND: "
; N->dump(); } } while (false)
;
4255 SDValue MaskOp = N->getOperand(1);
4256
4257 // If it exists, fixup the single node we allow in the tree that needs
4258 // masking.
4259 if (FixupNode) {
4260 LLVM_DEBUG(dbgs() << "First, need to fix up: "; FixupNode->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "First, need to fix up: "; FixupNode
->dump(); } } while (false)
;
4261 SDValue And = DAG.getNode(ISD::AND, SDLoc(FixupNode),
4262 FixupNode->getValueType(0),
4263 SDValue(FixupNode, 0), MaskOp);
4264 DAG.ReplaceAllUsesOfValueWith(SDValue(FixupNode, 0), And);
4265 if (And.getOpcode() == ISD ::AND)
4266 DAG.UpdateNodeOperands(And.getNode(), SDValue(FixupNode, 0), MaskOp);
4267 }
4268
4269 // Narrow any constants that need it.
4270 for (auto *LogicN : NodesWithConsts) {
4271 SDValue Op0 = LogicN->getOperand(0);
4272 SDValue Op1 = LogicN->getOperand(1);
4273
4274 if (isa<ConstantSDNode>(Op0))
4275 std::swap(Op0, Op1);
4276
4277 SDValue And = DAG.getNode(ISD::AND, SDLoc(Op1), Op1.getValueType(),
4278 Op1, MaskOp);
4279
4280 DAG.UpdateNodeOperands(LogicN, Op0, And);
4281 }
4282
4283 // Create narrow loads.
4284 for (auto *Load : Loads) {
4285 LLVM_DEBUG(dbgs() << "Propagate AND back to: "; Load->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "Propagate AND back to: "; Load
->dump(); } } while (false)
;
4286 SDValue And = DAG.getNode(ISD::AND, SDLoc(Load), Load->getValueType(0),
4287 SDValue(Load, 0), MaskOp);
4288 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), And);
4289 if (And.getOpcode() == ISD ::AND)
4290 And = SDValue(
4291 DAG.UpdateNodeOperands(And.getNode(), SDValue(Load, 0), MaskOp), 0);
4292 SDValue NewLoad = ReduceLoadWidth(And.getNode());
4293 assert(NewLoad &&(static_cast <bool> (NewLoad && "Shouldn't be masking the load if it can't be narrowed"
) ? void (0) : __assert_fail ("NewLoad && \"Shouldn't be masking the load if it can't be narrowed\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 4294, __extension__ __PRETTY_FUNCTION__))
4294 "Shouldn't be masking the load if it can't be narrowed")(static_cast <bool> (NewLoad && "Shouldn't be masking the load if it can't be narrowed"
) ? void (0) : __assert_fail ("NewLoad && \"Shouldn't be masking the load if it can't be narrowed\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 4294, __extension__ __PRETTY_FUNCTION__))
;
4295 CombineTo(Load, NewLoad, NewLoad.getValue(1));
4296 }
4297 DAG.ReplaceAllUsesWith(N, N->getOperand(0).getNode());
4298 return true;
4299 }
4300 return false;
4301}
4302
4303// Unfold
4304// x & (-1 'logical shift' y)
4305// To
4306// (x 'opposite logical shift' y) 'logical shift' y
4307// if it is better for performance.
4308SDValue DAGCombiner::unfoldExtremeBitClearingToShifts(SDNode *N) {
4309 assert(N->getOpcode() == ISD::AND)(static_cast <bool> (N->getOpcode() == ISD::AND) ? void
(0) : __assert_fail ("N->getOpcode() == ISD::AND", "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 4309, __extension__ __PRETTY_FUNCTION__))
;
4310
4311 SDValue N0 = N->getOperand(0);
4312 SDValue N1 = N->getOperand(1);
4313
4314 // Do we actually prefer shifts over mask?
4315 if (!TLI.preferShiftsToClearExtremeBits(N0))
4316 return SDValue();
4317
4318 // Try to match (-1 '[outer] logical shift' y)
4319 unsigned OuterShift;
4320 unsigned InnerShift; // The opposite direction to the OuterShift.
4321 SDValue Y; // Shift amount.
4322 auto matchMask = [&OuterShift, &InnerShift, &Y](SDValue M) -> bool {
4323 if (!M.hasOneUse())
4324 return false;
4325 OuterShift = M->getOpcode();
4326 if (OuterShift == ISD::SHL)
4327 InnerShift = ISD::SRL;
4328 else if (OuterShift == ISD::SRL)
4329 InnerShift = ISD::SHL;
4330 else
4331 return false;
4332 if (!isAllOnesConstant(M->getOperand(0)))
4333 return false;
4334 Y = M->getOperand(1);
4335 return true;
4336 };
4337
4338 SDValue X;
4339 if (matchMask(N1))
4340 X = N0;
4341 else if (matchMask(N0))
4342 X = N1;
4343 else
4344 return SDValue();
4345
4346 SDLoc DL(N);
4347 EVT VT = N->getValueType(0);
4348
4349 // tmp = x 'opposite logical shift' y
4350 SDValue T0 = DAG.getNode(InnerShift, DL, VT, X, Y);
4351 // ret = tmp 'logical shift' y
4352 SDValue T1 = DAG.getNode(OuterShift, DL, VT, T0, Y);
4353
4354 return T1;
4355}
4356
4357SDValue DAGCombiner::visitAND(SDNode *N) {
4358 SDValue N0 = N->getOperand(0);
4359 SDValue N1 = N->getOperand(1);
4360 EVT VT = N1.getValueType();
4361
4362 // x & x --> x
4363 if (N0 == N1)
4364 return N0;
4365
4366 // fold vector ops
4367 if (VT.isVector()) {
4368 if (SDValue FoldedVOp = SimplifyVBinOp(N))
4369 return FoldedVOp;
4370
4371 // fold (and x, 0) -> 0, vector edition
4372 if (ISD::isBuildVectorAllZeros(N0.getNode()))
4373 // do not return N0, because undef node may exist in N0
4374 return DAG.getConstant(APInt::getNullValue(N0.getScalarValueSizeInBits()),
4375 SDLoc(N), N0.getValueType());
4376 if (ISD::isBuildVectorAllZeros(N1.getNode()))
4377 // do not return N1, because undef node may exist in N1
4378 return DAG.getConstant(APInt::getNullValue(N1.getScalarValueSizeInBits()),
4379 SDLoc(N), N1.getValueType());
4380
4381 // fold (and x, -1) -> x, vector edition
4382 if (ISD::isBuildVectorAllOnes(N0.getNode()))
4383 return N1;
4384 if (ISD::isBuildVectorAllOnes(N1.getNode()))
4385 return N0;
4386 }
4387
4388 // fold (and c1, c2) -> c1&c2
4389 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
4390 ConstantSDNode *N1C = isConstOrConstSplat(N1);
4391 if (N0C && N1C && !N1C->isOpaque())
4392 return DAG.FoldConstantArithmetic(ISD::AND, SDLoc(N), VT, N0C, N1C);
4393 // canonicalize constant to RHS
4394 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
4395 !DAG.isConstantIntBuildVectorOrConstantInt(N1))
4396 return DAG.getNode(ISD::AND, SDLoc(N), VT, N1, N0);
4397 // fold (and x, -1) -> x
4398 if (isAllOnesConstant(N1))
4399 return N0;
4400 // if (and x, c) is known to be zero, return 0
4401 unsigned BitWidth = VT.getScalarSizeInBits();
4402 if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0),
4403 APInt::getAllOnesValue(BitWidth)))
4404 return DAG.getConstant(0, SDLoc(N), VT);
4405
4406 if (SDValue NewSel = foldBinOpIntoSelect(N))
4407 return NewSel;
4408
4409 // reassociate and
4410 if (SDValue RAND = ReassociateOps(ISD::AND, SDLoc(N), N0, N1))
4411 return RAND;
4412
4413 // Try to convert a constant mask AND into a shuffle clear mask.
4414 if (VT.isVector())
4415 if (SDValue Shuffle = XformToShuffleWithZero(N))
4416 return Shuffle;
4417
4418 // fold (and (or x, C), D) -> D if (C & D) == D
4419 auto MatchSubset = [](ConstantSDNode *LHS, ConstantSDNode *RHS) {
4420 return RHS->getAPIntValue().isSubsetOf(LHS->getAPIntValue());
4421 };
4422 if (N0.getOpcode() == ISD::OR &&
4423 ISD::matchBinaryPredicate(N0.getOperand(1), N1, MatchSubset))
4424 return N1;
4425 // fold (and (any_ext V), c) -> (zero_ext V) if 'and' only clears top bits.
4426 if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
4427 SDValue N0Op0 = N0.getOperand(0);
4428 APInt Mask = ~N1C->getAPIntValue();
4429 Mask = Mask.trunc(N0Op0.getScalarValueSizeInBits());
4430 if (DAG.MaskedValueIsZero(N0Op0, Mask)) {
4431 SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N),
4432 N0.getValueType(), N0Op0);
4433
4434 // Replace uses of the AND with uses of the Zero extend node.
4435 CombineTo(N, Zext);
4436
4437 // We actually want to replace all uses of the any_extend with the
4438 // zero_extend, to avoid duplicating things. This will later cause this
4439 // AND to be folded.
4440 CombineTo(N0.getNode(), Zext);
4441 return SDValue(N, 0); // Return N so it doesn't get rechecked!
4442 }
4443 }
4444 // similarly fold (and (X (load ([non_ext|any_ext|zero_ext] V))), c) ->
4445 // (X (load ([non_ext|zero_ext] V))) if 'and' only clears top bits which must
4446 // already be zero by virtue of the width of the base type of the load.
4447 //
4448 // the 'X' node here can either be nothing or an extract_vector_elt to catch
4449 // more cases.
4450 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
4451 N0.getValueSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits() &&
4452 N0.getOperand(0).getOpcode() == ISD::LOAD &&
4453 N0.getOperand(0).getResNo() == 0) ||
4454 (N0.getOpcode() == ISD::LOAD && N0.getResNo() == 0)) {
4455 LoadSDNode *Load = cast<LoadSDNode>( (N0.getOpcode() == ISD::LOAD) ?
4456 N0 : N0.getOperand(0) );
4457
4458 // Get the constant (if applicable) the zero'th operand is being ANDed with.
4459 // This can be a pure constant or a vector splat, in which case we treat the
4460 // vector as a scalar and use the splat value.
4461 APInt Constant = APInt::getNullValue(1);
4462 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
4463 Constant = C->getAPIntValue();
4464 } else if (BuildVectorSDNode *Vector = dyn_cast<BuildVectorSDNode>(N1)) {
4465 APInt SplatValue, SplatUndef;
4466 unsigned SplatBitSize;
4467 bool HasAnyUndefs;
4468 bool IsSplat = Vector->isConstantSplat(SplatValue, SplatUndef,
4469 SplatBitSize, HasAnyUndefs);
4470 if (IsSplat) {
4471 // Undef bits can contribute to a possible optimisation if set, so
4472 // set them.
4473 SplatValue |= SplatUndef;
4474
4475 // The splat value may be something like "0x00FFFFFF", which means 0 for
4476 // the first vector value and FF for the rest, repeating. We need a mask
4477 // that will apply equally to all members of the vector, so AND all the
4478 // lanes of the constant together.
4479 EVT VT = Vector->getValueType(0);
4480 unsigned BitWidth = VT.getScalarSizeInBits();
4481
4482 // If the splat value has been compressed to a bitlength lower
4483 // than the size of the vector lane, we need to re-expand it to
4484 // the lane size.
4485 if (BitWidth > SplatBitSize)
4486 for (SplatValue = SplatValue.zextOrTrunc(BitWidth);
4487 SplatBitSize < BitWidth;
4488 SplatBitSize = SplatBitSize * 2)
4489 SplatValue |= SplatValue.shl(SplatBitSize);
4490
4491 // Make sure that variable 'Constant' is only set if 'SplatBitSize' is a
4492 // multiple of 'BitWidth'. Otherwise, we could propagate a wrong value.
4493 if (SplatBitSize % BitWidth == 0) {
4494 Constant = APInt::getAllOnesValue(BitWidth);
4495 for (unsigned i = 0, n = SplatBitSize/BitWidth; i < n; ++i)
4496 Constant &= SplatValue.lshr(i*BitWidth).zextOrTrunc(BitWidth);
4497 }
4498 }
4499 }
4500
4501 // If we want to change an EXTLOAD to a ZEXTLOAD, ensure a ZEXTLOAD is
4502 // actually legal and isn't going to get expanded, else this is a false
4503 // optimisation.
4504 bool CanZextLoadProfitably = TLI.isLoadExtLegal(ISD::ZEXTLOAD,
4505 Load->getValueType(0),
4506 Load->getMemoryVT());
4507
4508 // Resize the constant to the same size as the original memory access before
4509 // extension. If it is still the AllOnesValue then this AND is completely
4510 // unneeded.
4511 Constant = Constant.zextOrTrunc(Load->getMemoryVT().getScalarSizeInBits());
4512
4513 bool B;
4514 switch (Load->getExtensionType()) {
4515 default: B = false; break;
4516 case ISD::EXTLOAD: B = CanZextLoadProfitably; break;
4517 case ISD::ZEXTLOAD:
4518 case ISD::NON_EXTLOAD: B = true; break;
4519 }
4520
4521 if (B && Constant.isAllOnesValue()) {
4522 // If the load type was an EXTLOAD, convert to ZEXTLOAD in order to
4523 // preserve semantics once we get rid of the AND.
4524 SDValue NewLoad(Load, 0);
4525
4526 // Fold the AND away. NewLoad may get replaced immediately.
4527 CombineTo(N, (N0.getNode() == Load) ? NewLoad : N0);
4528
4529 if (Load->getExtensionType() == ISD::EXTLOAD) {
4530 NewLoad = DAG.getLoad(Load->getAddressingMode(), ISD::ZEXTLOAD,
4531 Load->getValueType(0), SDLoc(Load),
4532 Load->getChain(), Load->getBasePtr(),
4533 Load->getOffset(), Load->getMemoryVT(),
4534 Load->getMemOperand());
4535 // Replace uses of the EXTLOAD with the new ZEXTLOAD.
4536 if (Load->getNumValues() == 3) {
4537 // PRE/POST_INC loads have 3 values.
4538 SDValue To[] = { NewLoad.getValue(0), NewLoad.getValue(1),
4539 NewLoad.getValue(2) };
4540 CombineTo(Load, To, 3, true);
4541 } else {
4542 CombineTo(Load, NewLoad.getValue(0), NewLoad.getValue(1));
4543 }
4544 }
4545
4546 return SDValue(N, 0); // Return N so it doesn't get rechecked!
4547 }
4548 }
4549
4550 // fold (and (load x), 255) -> (zextload x, i8)
4551 // fold (and (extload x, i16), 255) -> (zextload x, i8)
4552 // fold (and (any_ext (extload x, i16)), 255) -> (zextload x, i8)
4553 if (!VT.isVector() && N1C && (N0.getOpcode() == ISD::LOAD ||
4554 (N0.getOpcode() == ISD::ANY_EXTEND &&
4555 N0.getOperand(0).getOpcode() == ISD::LOAD))) {
4556 if (SDValue Res = ReduceLoadWidth(N)) {
4557 LoadSDNode *LN0 = N0->getOpcode() == ISD::ANY_EXTEND
4558 ? cast<LoadSDNode>(N0.getOperand(0)) : cast<LoadSDNode>(N0);
4559
4560 AddToWorklist(N);
4561 CombineTo(LN0, Res, Res.getValue(1));
4562 return SDValue(N, 0);
4563 }
4564 }
4565
4566 if (Level >= AfterLegalizeTypes) {
4567 // Attempt to propagate the AND back up to the leaves which, if they're
4568 // loads, can be combined to narrow loads and the AND node can be removed.
4569 // Perform after legalization so that extend nodes will already be
4570 // combined into the loads.
4571 if (BackwardsPropagateMask(N, DAG)) {
4572 return SDValue(N, 0);
4573 }
4574 }
4575
4576 if (SDValue Combined = visitANDLike(N0, N1, N))
4577 return Combined;
4578
4579 // Simplify: (and (op x...), (op y...)) -> (op (and x, y))
4580 if (N0.getOpcode() == N1.getOpcode())
4581 if (SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N))
4582 return Tmp;
4583
4584 // Masking the negated extension of a boolean is just the zero-extended
4585 // boolean:
4586 // and (sub 0, zext(bool X)), 1 --> zext(bool X)
4587 // and (sub 0, sext(bool X)), 1 --> zext(bool X)
4588 //
4589 // Note: the SimplifyDemandedBits fold below can make an information-losing
4590 // transform, and then we have no way to find this better fold.
4591 if (N1C && N1C->isOne() && N0.getOpcode() == ISD::SUB) {
4592 if (isNullConstantOrNullSplatConstant(N0.getOperand(0))) {
4593 SDValue SubRHS = N0.getOperand(1);
4594 if (SubRHS.getOpcode() == ISD::ZERO_EXTEND &&
4595 SubRHS.getOperand(0).getScalarValueSizeInBits() == 1)
4596 return SubRHS;
4597 if (SubRHS.getOpcode() == ISD::SIGN_EXTEND &&
4598 SubRHS.getOperand(0).getScalarValueSizeInBits() == 1)
4599 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, SubRHS.getOperand(0));
4600 }
4601 }
4602
4603 // fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1)
4604 // fold (and (sra)) -> (and (srl)) when possible.
4605 if (SimplifyDemandedBits(SDValue(N, 0)))
4606 return SDValue(N, 0);
4607
4608 // fold (zext_inreg (extload x)) -> (zextload x)
4609 if (ISD::isEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode())) {
4610 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
4611 EVT MemVT = LN0->getMemoryVT();
4612 // If we zero all the possible extended bits, then we can turn this into
4613 // a zextload if we are running before legalize or the operation is legal.
4614 unsigned BitWidth = N1.getScalarValueSizeInBits();
4615 if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
4616 BitWidth - MemVT.getScalarSizeInBits())) &&
4617 ((!LegalOperations && !LN0->isVolatile()) ||
4618 TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT))) {
4619 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT,
4620 LN0->getChain(), LN0->getBasePtr(),
4621 MemVT, LN0->getMemOperand());
4622 AddToWorklist(N);
4623 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
4624 return SDValue(N, 0); // Return N so it doesn't get rechecked!
4625 }
4626 }
4627 // fold (zext_inreg (sextload x)) -> (zextload x) iff load has one use
4628 if (ISD::isSEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
4629 N0.hasOneUse()) {
4630 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
4631 EVT MemVT = LN0->getMemoryVT();
4632 // If we zero all the possible extended bits, then we can turn this into
4633 // a zextload if we are running before legalize or the operation is legal.
4634 unsigned BitWidth = N1.getScalarValueSizeInBits();
4635 if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
4636 BitWidth - MemVT.getScalarSizeInBits())) &&
4637 ((!LegalOperations && !LN0->isVolatile()) ||
4638 TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT))) {
4639 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT,
4640 LN0->getChain(), LN0->getBasePtr(),
4641 MemVT, LN0->getMemOperand());
4642 AddToWorklist(N);
4643 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
4644 return SDValue(N, 0); // Return N so it doesn't get rechecked!
4645 }
4646 }
4647 // fold (and (or (srl N, 8), (shl N, 8)), 0xffff) -> (srl (bswap N), const)
4648 if (N1C && N1C->getAPIntValue() == 0xffff && N0.getOpcode() == ISD::OR) {
4649 if (SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0),
4650 N0.getOperand(1), false))
4651 return BSwap;
4652 }
4653
4654 if (SDValue Shifts = unfoldExtremeBitClearingToShifts(N))
4655 return Shifts;
4656
4657 return SDValue();
4658}
4659
4660/// Match (a >> 8) | (a << 8) as (bswap a) >> 16.
4661SDValue DAGCombiner::MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1,
4662 bool DemandHighBits) {
4663 if (!LegalOperations)
4664 return SDValue();
4665
4666 EVT VT = N->getValueType(0);
4667 if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16)
4668 return SDValue();
4669 if (!TLI.isOperationLegalOrCustom(ISD::BSWAP, VT))
4670 return SDValue();
4671
4672 // Recognize (and (shl a, 8), 0xff00), (and (srl a, 8), 0xff)
4673 bool LookPassAnd0 = false;
4674 bool LookPassAnd1 = false;
4675 if (N0.getOpcode() == ISD::AND && N0.getOperand(0).getOpcode() == ISD::SRL)
4676 std::swap(N0, N1);
4677 if (N1.getOpcode() == ISD::AND && N1.getOperand(0).getOpcode() == ISD::SHL)
4678 std::swap(N0, N1);
4679 if (N0.getOpcode() == ISD::AND) {
4680 if (!N0.getNode()->hasOneUse())
4681 return SDValue();
4682 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
4683 // Also handle 0xffff since the LHS is guaranteed to have zeros there.
4684 // This is needed for X86.
4685 if (!N01C || (N01C->getZExtValue() != 0xFF00 &&
4686 N01C->getZExtValue() != 0xFFFF))
4687 return SDValue();
4688 N0 = N0.getOperand(0);
4689 LookPassAnd0 = true;
4690 }
4691
4692 if (N1.getOpcode() == ISD::AND) {
4693 if (!N1.getNode()->hasOneUse())
4694 return SDValue();
4695 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
4696 if (!N11C || N11C->getZExtValue() != 0xFF)
4697 return SDValue();
4698 N1 = N1.getOperand(0);
4699 LookPassAnd1 = true;
4700 }
4701
4702 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
4703 std::swap(N0, N1);
4704 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
4705 return SDValue();
4706 if (!N0.getNode()->hasOneUse() || !N1.getNode()->hasOneUse())
4707 return SDValue();
4708
4709 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
4710 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
4711 if (!N01C || !N11C)
4712 return SDValue();
4713 if (N01C->getZExtValue() != 8 || N11C->getZExtValue() != 8)
4714 return SDValue();
4715
4716 // Look for (shl (and a, 0xff), 8), (srl (and a, 0xff00), 8)
4717 SDValue N00 = N0->getOperand(0);
4718 if (!LookPassAnd0 && N00.getOpcode() == ISD::AND) {
4719 if (!N00.getNode()->hasOneUse())
4720 return SDValue();
4721 ConstantSDNode *N001C = dyn_cast<ConstantSDNode>(N00.getOperand(1));
4722 if (!N001C || N001C->getZExtValue() != 0xFF)
4723 return SDValue();
4724 N00 = N00.getOperand(0);
4725 LookPassAnd0 = true;
4726 }
4727
4728 SDValue N10 = N1->getOperand(0);
4729 if (!LookPassAnd1 && N10.getOpcode() == ISD::AND) {
4730 if (!N10.getNode()->hasOneUse())
4731 return SDValue();
4732 ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N10.getOperand(1));
4733 // Also allow 0xFFFF since the bits will be shifted out. This is needed
4734 // for X86.
4735 if (!N101C || (N101C->getZExtValue() != 0xFF00 &&
4736 N101C->getZExtValue() != 0xFFFF))
4737 return SDValue();
4738 N10 = N10.getOperand(0);
4739 LookPassAnd1 = true;
4740 }
4741
4742 if (N00 != N10)
4743 return SDValue();
4744
4745 // Make sure everything beyond the low halfword gets set to zero since the SRL
4746 // 16 will clear the top bits.
4747 unsigned OpSizeInBits = VT.getSizeInBits();
4748 if (DemandHighBits && OpSizeInBits > 16) {
4749 // If the left-shift isn't masked out then the only way this is a bswap is
4750 // if all bits beyond the low 8 are 0. In that case the entire pattern
4751 // reduces to a left shift anyway: leave it for other parts of the combiner.
4752 if (!LookPassAnd0)
4753 return SDValue();
4754
4755 // However, if the right shift isn't masked out then it might be because
4756 // it's not needed. See if we can spot that too.
4757 if (!LookPassAnd1 &&
4758 !DAG.MaskedValueIsZero(
4759 N10, APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - 16)))
4760 return SDValue();
4761 }
4762
4763 SDValue Res = DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N00);
4764 if (OpSizeInBits > 16) {
4765 SDLoc DL(N);
4766 Res = DAG.getNode(ISD::SRL, DL, VT, Res,
4767 DAG.getConstant(OpSizeInBits - 16, DL,
4768 getShiftAmountTy(VT)));
4769 }
4770 return Res;
4771}
4772
4773/// Return true if the specified node is an element that makes up a 32-bit
4774/// packed halfword byteswap.
4775/// ((x & 0x000000ff) << 8) |
4776/// ((x & 0x0000ff00) >> 8) |
4777/// ((x & 0x00ff0000) << 8) |
4778/// ((x & 0xff000000) >> 8)
4779static bool isBSwapHWordElement(SDValue N, MutableArrayRef<SDNode *> Parts) {
4780 if (!N.getNode()->hasOneUse())
4781 return false;
4782
4783 unsigned Opc = N.getOpcode();
4784 if (Opc != ISD::AND && Opc != ISD::SHL && Opc != ISD::SRL)
4785 return false;
4786
4787 SDValue N0 = N.getOperand(0);
4788 unsigned Opc0 = N0.getOpcode();
4789 if (Opc0 != ISD::AND && Opc0 != ISD::SHL && Opc0 != ISD::SRL)
4790 return false;
4791
4792 ConstantSDNode *N1C = nullptr;
4793 // SHL or SRL: look upstream for AND mask operand
4794 if (Opc == ISD::AND)
4795 N1C = dyn_cast<ConstantSDNode>(N.getOperand(1));
4796 else if (Opc0 == ISD::AND)
4797 N1C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
4798 if (!N1C)
4799 return false;
4800
4801 unsigned MaskByteOffset;
4802 switch (N1C->getZExtValue()) {
4803 default:
4804 return false;
4805 case 0xFF: MaskByteOffset = 0; break;
4806 case 0xFF00: MaskByteOffset = 1; break;
4807 case 0xFFFF:
4808 // In case demanded bits didn't clear the bits that will be shifted out.
4809 // This is needed for X86.
4810 if (Opc == ISD::SRL || (Opc == ISD::AND && Opc0 == ISD::SHL)) {
4811 MaskByteOffset = 1;
4812 break;
4813 }
4814 return false;
4815 case 0xFF0000: MaskByteOffset = 2; break;
4816 case 0xFF000000: MaskByteOffset = 3; break;
4817 }
4818
4819 // Look for (x & 0xff) << 8 as well as ((x << 8) & 0xff00).
4820 if (Opc == ISD::AND) {
4821 if (MaskByteOffset == 0 || MaskByteOffset == 2) {
4822 // (x >> 8) & 0xff
4823 // (x >> 8) & 0xff0000
4824 if (Opc0 != ISD::SRL)
4825 return false;
4826 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
4827 if (!C || C->getZExtValue() != 8)
4828 return false;
4829 } else {
4830 // (x << 8) & 0xff00
4831 // (x << 8) & 0xff000000
4832 if (Opc0 != ISD::SHL)
4833 return false;
4834 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
4835 if (!C || C->getZExtValue() != 8)
4836 return false;
4837 }
4838 } else if (Opc == ISD::SHL) {
4839 // (x & 0xff) << 8
4840 // (x & 0xff0000) << 8
4841 if (MaskByteOffset != 0 && MaskByteOffset != 2)
4842 return false;
4843 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
4844 if (!C || C->getZExtValue() != 8)
4845 return false;
4846 } else { // Opc == ISD::SRL
4847 // (x & 0xff00) >> 8
4848 // (x & 0xff000000) >> 8
4849 if (MaskByteOffset != 1 && MaskByteOffset != 3)
4850 return false;
4851 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
4852 if (!C || C->getZExtValue() != 8)
4853 return false;
4854 }
4855
4856 if (Parts[MaskByteOffset])
4857 return false;
4858
4859 Parts[MaskByteOffset] = N0.getOperand(0).getNode();
4860 return true;
4861}
4862
4863/// Match a 32-bit packed halfword bswap. That is
4864/// ((x & 0x000000ff) << 8) |
4865/// ((x & 0x0000ff00) >> 8) |
4866/// ((x & 0x00ff0000) << 8) |
4867/// ((x & 0xff000000) >> 8)
4868/// => (rotl (bswap x), 16)
4869SDValue DAGCombiner::MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1) {
4870 if (!LegalOperations)
4871 return SDValue();
4872
4873 EVT VT = N->getValueType(0);
4874 if (VT != MVT::i32)
4875 return SDValue();
4876 if (!TLI.isOperationLegalOrCustom(ISD::BSWAP, VT))
4877 return SDValue();
4878
4879 // Look for either
4880 // (or (or (and), (and)), (or (and), (and)))
4881 // (or (or (or (and), (and)), (and)), (and))
4882 if (N0.getOpcode() != ISD::OR)
4883 return SDValue();
4884 SDValue N00 = N0.getOperand(0);
4885 SDValue N01 = N0.getOperand(1);
4886 SDNode *Parts[4] = {};
4887
4888 if (N1.getOpcode() == ISD::OR &&
4889 N00.getNumOperands() == 2 && N01.getNumOperands() == 2) {
4890 // (or (or (and), (and)), (or (and), (and)))
4891 if (!isBSwapHWordElement(N00, Parts))
4892 return SDValue();
4893
4894 if (!isBSwapHWordElement(N01, Parts))
4895 return SDValue();
4896 SDValue N10 = N1.getOperand(0);
4897 if (!isBSwapHWordElement(N10, Parts))
4898 return SDValue();
4899 SDValue N11 = N1.getOperand(1);
4900 if (!isBSwapHWordElement(N11, Parts))
4901 return SDValue();
4902 } else {
4903 // (or (or (or (and), (and)), (and)), (and))
4904 if (!isBSwapHWordElement(N1, Parts))
4905 return SDValue();
4906 if (!isBSwapHWordElement(N01, Parts))
4907 return SDValue();
4908 if (N00.getOpcode() != ISD::OR)
4909 return SDValue();
4910 SDValue N000 = N00.getOperand(0);
4911 if (!isBSwapHWordElement(N000, Parts))
4912 return SDValue();
4913 SDValue N001 = N00.getOperand(1);
4914 if (!isBSwapHWordElement(N001, Parts))
4915 return SDValue();
4916 }
4917
4918 // Make sure the parts are all coming from the same node.
4919 if (Parts[0] != Parts[1] || Parts[0] != Parts[2] || Parts[0] != Parts[3])
4920 return SDValue();
4921
4922 SDLoc DL(N);
4923 SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT,
4924 SDValue(Parts[0], 0));
4925
4926 // Result of the bswap should be rotated by 16. If it's not legal, then
4927 // do (x << 16) | (x >> 16).
4928 SDValue ShAmt = DAG.getConstant(16, DL, getShiftAmountTy(VT));
4929 if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT))
4930 return DAG.getNode(ISD::ROTL, DL, VT, BSwap, ShAmt);
4931 if (TLI.isOperationLegalOrCustom(ISD::ROTR, VT))
4932 return DAG.getNode(ISD::ROTR, DL, VT, BSwap, ShAmt);
4933 return DAG.getNode(ISD::OR, DL, VT,
4934 DAG.getNode(ISD::SHL, DL, VT, BSwap, ShAmt),
4935 DAG.getNode(ISD::SRL, DL, VT, BSwap, ShAmt));
4936}
4937
4938/// This contains all DAGCombine rules which reduce two values combined by
4939/// an Or operation to a single value \see visitANDLike().
4940SDValue DAGCombiner::visitORLike(SDValue N0, SDValue N1, SDNode *N) {
4941 EVT VT = N1.getValueType();
4942 SDLoc DL(N);
4943
4944 // fold (or x, undef) -> -1
4945 if (!LegalOperations && (N0.isUndef() || N1.isUndef()))
4946 return DAG.getAllOnesConstant(DL, VT);
4947
4948 if (SDValue V = foldLogicOfSetCCs(false, N0, N1, DL))
4949 return V;
4950
4951 // (or (and X, C1), (and Y, C2)) -> (and (or X, Y), C3) if possible.
4952 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == ISD::AND &&
4953 // Don't increase # computations.
4954 (N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) {
4955 // We can only do this xform if we know that bits from X that are set in C2
4956 // but not in C1 are already zero. Likewise for Y.
4957 if (const ConstantSDNode *N0O1C =
4958 getAsNonOpaqueConstant(N0.getOperand(1))) {
4959 if (const ConstantSDNode *N1O1C =
4960 getAsNonOpaqueConstant(N1.getOperand(1))) {
4961 // We can only do this xform if we know that bits from X that are set in
4962 // C2 but not in C1 are already zero. Likewise for Y.
4963 const APInt &LHSMask = N0O1C->getAPIntValue();
4964 const APInt &RHSMask = N1O1C->getAPIntValue();
4965
4966 if (DAG.MaskedValueIsZero(N0.getOperand(0), RHSMask&~LHSMask) &&
4967 DAG.MaskedValueIsZero(N1.getOperand(0), LHSMask&~RHSMask)) {
4968 SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT,
4969 N0.getOperand(0), N1.getOperand(0));
4970 return DAG.getNode(ISD::AND, DL, VT, X,
4971 DAG.getConstant(LHSMask | RHSMask, DL, VT));
4972 }
4973 }
4974 }
4975 }
4976
4977 // (or (and X, M), (and X, N)) -> (and X, (or M, N))
4978 if (N0.getOpcode() == ISD::AND &&
4979 N1.getOpcode() == ISD::AND &&
4980 N0.getOperand(0) == N1.getOperand(0) &&
4981 // Don't increase # computations.
4982 (N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) {
4983 SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT,
4984 N0.getOperand(1), N1.getOperand(1));
4985 return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), X);
4986 }
4987
4988 return SDValue();
4989}
4990
4991SDValue DAGCombiner::visitOR(SDNode *N) {
4992 SDValue N0 = N->getOperand(0);
4993 SDValue N1 = N->getOperand(1);
4994 EVT VT = N1.getValueType();
4995
4996 // x | x --> x
4997 if (N0 == N1)
4998 return N0;
4999
5000 // fold vector ops
5001 if (VT.isVector()) {
5002 if (SDValue FoldedVOp = SimplifyVBinOp(N))
5003 return FoldedVOp;
5004
5005 // fold (or x, 0) -> x, vector edition
5006 if (ISD::isBuildVectorAllZeros(N0.getNode()))
5007 return N1;
5008 if (ISD::isBuildVectorAllZeros(N1.getNode()))
5009 return N0;
5010
5011 // fold (or x, -1) -> -1, vector edition
5012 if (ISD::isBuildVectorAllOnes(N0.getNode()))
5013 // do not return N0, because undef node may exist in N0
5014 return DAG.getAllOnesConstant(SDLoc(N), N0.getValueType());
5015 if (ISD::isBuildVectorAllOnes(N1.getNode()))
5016 // do not return N1, because undef node may exist in N1
5017 return DAG.getAllOnesConstant(SDLoc(N), N1.getValueType());
5018
5019 // fold (or (shuf A, V_0, MA), (shuf B, V_0, MB)) -> (shuf A, B, Mask)
5020 // Do this only if the resulting shuffle is legal.
5021 if (isa<ShuffleVectorSDNode>(N0) &&
5022 isa<ShuffleVectorSDNode>(N1) &&
5023 // Avoid folding a node with illegal type.
5024 TLI.isTypeLegal(VT)) {
5025 bool ZeroN00 = ISD::isBuildVectorAllZeros(N0.getOperand(0).getNode());
5026 bool ZeroN01 = ISD::isBuildVectorAllZeros(N0.getOperand(1).getNode());
5027 bool ZeroN10 = ISD::isBuildVectorAllZeros(N1.getOperand(0).getNode());
5028 bool ZeroN11 = ISD::isBuildVectorAllZeros(N1.getOperand(1).getNode());
5029 // Ensure both shuffles have a zero input.
5030 if ((ZeroN00 != ZeroN01) && (ZeroN10 != ZeroN11)) {
5031 assert((!ZeroN00 || !ZeroN01) && "Both inputs zero!")(static_cast <bool> ((!ZeroN00 || !ZeroN01) && "Both inputs zero!"
) ? void (0) : __assert_fail ("(!ZeroN00 || !ZeroN01) && \"Both inputs zero!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5031, __extension__ __PRETTY_FUNCTION__))
;
5032 assert((!ZeroN10 || !ZeroN11) && "Both inputs zero!")(static_cast <bool> ((!ZeroN10 || !ZeroN11) && "Both inputs zero!"
) ? void (0) : __assert_fail ("(!ZeroN10 || !ZeroN11) && \"Both inputs zero!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5032, __extension__ __PRETTY_FUNCTION__))
;
5033 const ShuffleVectorSDNode *SV0 = cast<ShuffleVectorSDNode>(N0);
5034 const ShuffleVectorSDNode *SV1 = cast<ShuffleVectorSDNode>(N1);
5035 bool CanFold = true;
5036 int NumElts = VT.getVectorNumElements();
5037 SmallVector<int, 4> Mask(NumElts);
5038
5039 for (int i = 0; i != NumElts; ++i) {
5040 int M0 = SV0->getMaskElt(i);
5041 int M1 = SV1->getMaskElt(i);
5042
5043 // Determine if either index is pointing to a zero vector.
5044 bool M0Zero = M0 < 0 || (ZeroN00 == (M0 < NumElts));
5045 bool M1Zero = M1 < 0 || (ZeroN10 == (M1 < NumElts));
5046
5047 // If one element is zero and the otherside is undef, keep undef.
5048 // This also handles the case that both are undef.
5049 if ((M0Zero && M1 < 0) || (M1Zero && M0 < 0)) {
5050 Mask[i] = -1;
5051 continue;
5052 }
5053
5054 // Make sure only one of the elements is zero.
5055 if (M0Zero == M1Zero) {
5056 CanFold = false;
5057 break;
5058 }
5059
5060 assert((M0 >= 0 || M1 >= 0) && "Undef index!")(static_cast <bool> ((M0 >= 0 || M1 >= 0) &&
"Undef index!") ? void (0) : __assert_fail ("(M0 >= 0 || M1 >= 0) && \"Undef index!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5060, __extension__ __PRETTY_FUNCTION__))
;
5061
5062 // We have a zero and non-zero element. If the non-zero came from
5063 // SV0 make the index a LHS index. If it came from SV1, make it
5064 // a RHS index. We need to mod by NumElts because we don't care
5065 // which operand it came from in the original shuffles.
5066 Mask[i] = M1Zero ? M0 % NumElts : (M1 % NumElts) + NumElts;
5067 }
5068
5069 if (CanFold) {
5070 SDValue NewLHS = ZeroN00 ? N0.getOperand(1) : N0.getOperand(0);
5071 SDValue NewRHS = ZeroN10 ? N1.getOperand(1) : N1.getOperand(0);
5072
5073 bool LegalMask = TLI.isShuffleMaskLegal(Mask, VT);
5074 if (!LegalMask) {
5075 std::swap(NewLHS, NewRHS);
5076 ShuffleVectorSDNode::commuteMask(Mask);
5077 LegalMask = TLI.isShuffleMaskLegal(Mask, VT);
5078 }
5079
5080 if (LegalMask)
5081 return DAG.getVectorShuffle(VT, SDLoc(N), NewLHS, NewRHS, Mask);
5082 }
5083 }
5084 }
5085 }
5086
5087 // fold (or c1, c2) -> c1|c2
5088 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
5089 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
5090 if (N0C && N1C && !N1C->isOpaque())
5091 return DAG.FoldConstantArithmetic(ISD::OR, SDLoc(N), VT, N0C, N1C);
5092 // canonicalize constant to RHS
5093 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
5094 !DAG.isConstantIntBuildVectorOrConstantInt(N1))
5095 return DAG.getNode(ISD::OR, SDLoc(N), VT, N1, N0);
5096 // fold (or x, 0) -> x
5097 if (isNullConstant(N1))
5098 return N0;
5099 // fold (or x, -1) -> -1
5100 if (isAllOnesConstant(N1))
5101 return N1;
5102
5103 if (SDValue NewSel = foldBinOpIntoSelect(N))
5104 return NewSel;
5105
5106 // fold (or x, c) -> c iff (x & ~c) == 0
5107 if (N1C && DAG.MaskedValueIsZero(N0, ~N1C->getAPIntValue()))
5108 return N1;
5109
5110 if (SDValue Combined = visitORLike(N0, N1, N))
5111 return Combined;
5112
5113 // Recognize halfword bswaps as (bswap + rotl 16) or (bswap + shl 16)
5114 if (SDValue BSwap = MatchBSwapHWord(N, N0, N1))
5115 return BSwap;
5116 if (SDValue BSwap = MatchBSwapHWordLow(N, N0, N1))
5117 return BSwap;
5118
5119 // reassociate or
5120 if (SDValue ROR = ReassociateOps(ISD::OR, SDLoc(N), N0, N1))
5121 return ROR;
5122
5123 // Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2)
5124 // iff (c1 & c2) != 0.
5125 auto MatchIntersect = [](ConstantSDNode *LHS, ConstantSDNode *RHS) {
5126 return LHS->getAPIntValue().intersects(RHS->getAPIntValue());
5127 };
5128 if (N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
5129 ISD::matchBinaryPredicate(N0.getOperand(1), N1, MatchIntersect)) {
5130 if (SDValue COR = DAG.FoldConstantArithmetic(
5131 ISD::OR, SDLoc(N1), VT, N1.getNode(), N0.getOperand(1).getNode())) {
5132 SDValue IOR = DAG.getNode(ISD::OR, SDLoc(N0), VT, N0.getOperand(0), N1);
5133 AddToWorklist(IOR.getNode());
5134 return DAG.getNode(ISD::AND, SDLoc(N), VT, COR, IOR);
5135 }
5136 }
5137
5138 // Simplify: (or (op x...), (op y...)) -> (op (or x, y))
5139 if (N0.getOpcode() == N1.getOpcode())
5140 if (SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N))
5141 return Tmp;
5142
5143 // See if this is some rotate idiom.
5144 if (SDNode *Rot = MatchRotate(N0, N1, SDLoc(N)))
5145 return SDValue(Rot, 0);
5146
5147 if (SDValue Load = MatchLoadCombine(N))
5148 return Load;
5149
5150 // Simplify the operands using demanded-bits information.
5151 if (SimplifyDemandedBits(SDValue(N, 0)))
5152 return SDValue(N, 0);
5153
5154 return SDValue();
5155}
5156
5157/// Match "(X shl/srl V1) & V2" where V2 may not be present.
5158bool DAGCombiner::MatchRotateHalf(SDValue Op, SDValue &Shift, SDValue &Mask) {
5159 if (Op.getOpcode() == ISD::AND) {
5160 if (DAG.isConstantIntBuildVectorOrConstantInt(Op.getOperand(1))) {
5161 Mask = Op.getOperand(1);
5162 Op = Op.getOperand(0);
5163 } else {
5164 return false;
5165 }
5166 }
5167
5168 if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) {
5169 Shift = Op;
5170 return true;
5171 }
5172
5173 return false;
5174}
5175
5176// Return true if we can prove that, whenever Neg and Pos are both in the
5177// range [0, EltSize), Neg == (Pos == 0 ? 0 : EltSize - Pos). This means that
5178// for two opposing shifts shift1 and shift2 and a value X with OpBits bits:
5179//
5180// (or (shift1 X, Neg), (shift2 X, Pos))
5181//
5182// reduces to a rotate in direction shift2 by Pos or (equivalently) a rotate
5183// in direction shift1 by Neg. The range [0, EltSize) means that we only need
5184// to consider shift amounts with defined behavior.
5185static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned EltSize,
5186 SelectionDAG &DAG) {
5187 // If EltSize is a power of 2 then:
5188 //
5189 // (a) (Pos == 0 ? 0 : EltSize - Pos) == (EltSize - Pos) & (EltSize - 1)
5190 // (b) Neg == Neg & (EltSize - 1) whenever Neg is in [0, EltSize).
5191 //
5192 // So if EltSize is a power of 2 and Neg is (and Neg', EltSize-1), we check
5193 // for the stronger condition:
5194 //
5195 // Neg & (EltSize - 1) == (EltSize - Pos) & (EltSize - 1) [A]
5196 //
5197 // for all Neg and Pos. Since Neg & (EltSize - 1) == Neg' & (EltSize - 1)
5198 // we can just replace Neg with Neg' for the rest of the function.
5199 //
5200 // In other cases we check for the even stronger condition:
5201 //
5202 // Neg == EltSize - Pos [B]
5203 //
5204 // for all Neg and Pos. Note that the (or ...) then invokes undefined
5205 // behavior if Pos == 0 (and consequently Neg == EltSize).
5206 //
5207 // We could actually use [A] whenever EltSize is a power of 2, but the
5208 // only extra cases that it would match are those uninteresting ones
5209 // where Neg and Pos are never in range at the same time. E.g. for
5210 // EltSize == 32, using [A] would allow a Neg of the form (sub 64, Pos)
5211 // as well as (sub 32, Pos), but:
5212 //
5213 // (or (shift1 X, (sub 64, Pos)), (shift2 X, Pos))
5214 //
5215 // always invokes undefined behavior for 32-bit X.
5216 //
5217 // Below, Mask == EltSize - 1 when using [A] and is all-ones otherwise.
5218 unsigned MaskLoBits = 0;
5219 if (Neg.getOpcode() == ISD::AND && isPowerOf2_64(EltSize)) {
5220 if (ConstantSDNode *NegC = isConstOrConstSplat(Neg.getOperand(1))) {
5221 KnownBits Known;
5222 DAG.computeKnownBits(Neg.getOperand(0), Known);
5223 unsigned Bits = Log2_64(EltSize);
5224 if (NegC->getAPIntValue().getActiveBits() <= Bits &&
5225 ((NegC->getAPIntValue() | Known.Zero).countTrailingOnes() >= Bits)) {
5226 Neg = Neg.getOperand(0);
5227 MaskLoBits = Bits;
5228 }
5229 }
5230 }
5231
5232 // Check whether Neg has the form (sub NegC, NegOp1) for some NegC and NegOp1.
5233 if (Neg.getOpcode() != ISD::SUB)
5234 return false;
5235 ConstantSDNode *NegC = isConstOrConstSplat(Neg.getOperand(0));
5236 if (!NegC)
5237 return false;
5238 SDValue NegOp1 = Neg.getOperand(1);
5239
5240 // On the RHS of [A], if Pos is Pos' & (EltSize - 1), just replace Pos with
5241 // Pos'. The truncation is redundant for the purpose of the equality.
5242 if (MaskLoBits && Pos.getOpcode() == ISD::AND) {
5243 if (ConstantSDNode *PosC = isConstOrConstSplat(Pos.getOperand(1))) {
5244 KnownBits Known;
5245 DAG.computeKnownBits(Pos.getOperand(0), Known);
5246 if (PosC->getAPIntValue().getActiveBits() <= MaskLoBits &&
5247 ((PosC->getAPIntValue() | Known.Zero).countTrailingOnes() >=
5248 MaskLoBits))
5249 Pos = Pos.getOperand(0);
5250 }
5251 }
5252
5253 // The condition we need is now:
5254 //
5255 // (NegC - NegOp1) & Mask == (EltSize - Pos) & Mask
5256 //
5257 // If NegOp1 == Pos then we need:
5258 //
5259 // EltSize & Mask == NegC & Mask
5260 //
5261 // (because "x & Mask" is a truncation and distributes through subtraction).
5262 APInt Width;
5263 if (Pos == NegOp1)
5264 Width = NegC->getAPIntValue();
5265
5266 // Check for cases where Pos has the form (add NegOp1, PosC) for some PosC.
5267 // Then the condition we want to prove becomes:
5268 //
5269 // (NegC - NegOp1) & Mask == (EltSize - (NegOp1 + PosC)) & Mask
5270 //
5271 // which, again because "x & Mask" is a truncation, becomes:
5272 //
5273 // NegC & Mask == (EltSize - PosC) & Mask
5274 // EltSize & Mask == (NegC + PosC) & Mask
5275 else if (Pos.getOpcode() == ISD::ADD && Pos.getOperand(0) == NegOp1) {
5276 if (ConstantSDNode *PosC = isConstOrConstSplat(Pos.getOperand(1)))
5277 Width = PosC->getAPIntValue() + NegC->getAPIntValue();
5278 else
5279 return false;
5280 } else
5281 return false;
5282
5283 // Now we just need to check that EltSize & Mask == Width & Mask.
5284 if (MaskLoBits)
5285 // EltSize & Mask is 0 since Mask is EltSize - 1.
5286 return Width.getLoBits(MaskLoBits) == 0;
5287 return Width == EltSize;
5288}
5289
5290// A subroutine of MatchRotate used once we have found an OR of two opposite
5291// shifts of Shifted. If Neg == <operand size> - Pos then the OR reduces
5292// to both (PosOpcode Shifted, Pos) and (NegOpcode Shifted, Neg), with the
5293// former being preferred if supported. InnerPos and InnerNeg are Pos and
5294// Neg with outer conversions stripped away.
5295SDNode *DAGCombiner::MatchRotatePosNeg(SDValue Shifted, SDValue Pos,
5296 SDValue Neg, SDValue InnerPos,
5297 SDValue InnerNeg, unsigned PosOpcode,
5298 unsigned NegOpcode, const SDLoc &DL) {
5299 // fold (or (shl x, (*ext y)),
5300 // (srl x, (*ext (sub 32, y)))) ->
5301 // (rotl x, y) or (rotr x, (sub 32, y))
5302 //
5303 // fold (or (shl x, (*ext (sub 32, y))),
5304 // (srl x, (*ext y))) ->
5305 // (rotr x, y) or (rotl x, (sub 32, y))
5306 EVT VT = Shifted.getValueType();
5307 if (matchRotateSub(InnerPos, InnerNeg, VT.getScalarSizeInBits(), DAG)) {
5308 bool HasPos = TLI.isOperationLegalOrCustom(PosOpcode, VT);
5309 return DAG.getNode(HasPos ? PosOpcode : NegOpcode, DL, VT, Shifted,
5310 HasPos ? Pos : Neg).getNode();
5311 }
5312
5313 return nullptr;
5314}
5315
5316// MatchRotate - Handle an 'or' of two operands. If this is one of the many
5317// idioms for rotate, and if the target supports rotation instructions, generate
5318// a rot[lr].
5319SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL) {
5320 // Must be a legal type. Expanded 'n promoted things won't work with rotates.
5321 EVT VT = LHS.getValueType();
5322 if (!TLI.isTypeLegal(VT)) return nullptr;
5323
5324 // The target must have at least one rotate flavor.
5325 bool HasROTL = hasOperation(ISD::ROTL, VT);
5326 bool HasROTR = hasOperation(ISD::ROTR, VT);
5327 if (!HasROTL && !HasROTR) return nullptr;
5328
5329 // Check for truncated rotate.
5330 if (LHS.getOpcode() == ISD::TRUNCATE && RHS.getOpcode() == ISD::TRUNCATE &&
5331 LHS.getOperand(0).getValueType() == RHS.getOperand(0).getValueType()) {
5332 assert(LHS.getValueType() == RHS.getValueType())(static_cast <bool> (LHS.getValueType() == RHS.getValueType
()) ? void (0) : __assert_fail ("LHS.getValueType() == RHS.getValueType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5332, __extension__ __PRETTY_FUNCTION__))
;
5333 if (SDNode *Rot = MatchRotate(LHS.getOperand(0), RHS.getOperand(0), DL)) {
5334 return DAG.getNode(ISD::TRUNCATE, SDLoc(LHS), LHS.getValueType(),
5335 SDValue(Rot, 0)).getNode();
5336 }
5337 }
5338
5339 // Match "(X shl/srl V1) & V2" where V2 may not be present.
5340 SDValue LHSShift; // The shift.
5341 SDValue LHSMask; // AND value if any.
5342 if (!MatchRotateHalf(LHS, LHSShift, LHSMask))
5343 return nullptr; // Not part of a rotate.
5344
5345 SDValue RHSShift; // The shift.
5346 SDValue RHSMask; // AND value if any.
5347 if (!MatchRotateHalf(RHS, RHSShift, RHSMask))
5348 return nullptr; // Not part of a rotate.
5349
5350 if (LHSShift.getOperand(0) != RHSShift.getOperand(0))
5351 return nullptr; // Not shifting the same value.
5352
5353 if (LHSShift.getOpcode() == RHSShift.getOpcode())
5354 return nullptr; // Shifts must disagree.
5355
5356 // Canonicalize shl to left side in a shl/srl pair.
5357 if (RHSShift.getOpcode() == ISD::SHL) {
5358 std::swap(LHS, RHS);
5359 std::swap(LHSShift, RHSShift);
5360 std::swap(LHSMask, RHSMask);
5361 }
5362
5363 unsigned EltSizeInBits = VT.getScalarSizeInBits();
5364 SDValue LHSShiftArg = LHSShift.getOperand(0);
5365 SDValue LHSShiftAmt = LHSShift.getOperand(1);
5366 SDValue RHSShiftArg = RHSShift.getOperand(0);
5367 SDValue RHSShiftAmt = RHSShift.getOperand(1);
5368
5369 // fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1)
5370 // fold (or (shl x, C1), (srl x, C2)) -> (rotr x, C2)
5371 auto MatchRotateSum = [EltSizeInBits](ConstantSDNode *LHS,
5372 ConstantSDNode *RHS) {
5373 return (LHS->getAPIntValue() + RHS->getAPIntValue()) == EltSizeInBits;
5374 };
5375 if (ISD::matchBinaryPredicate(LHSShiftAmt, RHSShiftAmt, MatchRotateSum)) {
5376 SDValue Rot = DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT,
5377 LHSShiftArg, HasROTL ? LHSShiftAmt : RHSShiftAmt);
5378
5379 // If there is an AND of either shifted operand, apply it to the result.
5380 if (LHSMask.getNode() || RHSMask.getNode()) {
5381 SDValue AllOnes = DAG.getAllOnesConstant(DL, VT);
5382 SDValue Mask = AllOnes;
5383
5384 if (LHSMask.getNode()) {
5385 SDValue RHSBits = DAG.getNode(ISD::SRL, DL, VT, AllOnes, RHSShiftAmt);
5386 Mask = DAG.getNode(ISD::AND, DL, VT, Mask,
5387 DAG.getNode(ISD::OR, DL, VT, LHSMask, RHSBits));
5388 }
5389 if (RHSMask.getNode()) {
5390 SDValue LHSBits = DAG.getNode(ISD::SHL, DL, VT, AllOnes, LHSShiftAmt);
5391 Mask = DAG.getNode(ISD::AND, DL, VT, Mask,
5392 DAG.getNode(ISD::OR, DL, VT, RHSMask, LHSBits));
5393 }
5394
5395 Rot = DAG.getNode(ISD::AND, DL, VT, Rot, Mask);
5396 }
5397
5398 return Rot.getNode();
5399 }
5400
5401 // If there is a mask here, and we have a variable shift, we can't be sure
5402 // that we're masking out the right stuff.
5403 if (LHSMask.getNode() || RHSMask.getNode())
5404 return nullptr;
5405
5406 // If the shift amount is sign/zext/any-extended just peel it off.
5407 SDValue LExtOp0 = LHSShiftAmt;
5408 SDValue RExtOp0 = RHSShiftAmt;
5409 if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND ||
5410 LHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND ||
5411 LHSShiftAmt.getOpcode() == ISD::ANY_EXTEND ||
5412 LHSShiftAmt.getOpcode() == ISD::TRUNCATE) &&
5413 (RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND ||
5414 RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND ||
5415 RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND ||
5416 RHSShiftAmt.getOpcode() == ISD::TRUNCATE)) {
5417 LExtOp0 = LHSShiftAmt.getOperand(0);
5418 RExtOp0 = RHSShiftAmt.getOperand(0);
5419 }
5420
5421 SDNode *TryL = MatchRotatePosNeg(LHSShiftArg, LHSShiftAmt, RHSShiftAmt,
5422 LExtOp0, RExtOp0, ISD::ROTL, ISD::ROTR, DL);
5423 if (TryL)
5424 return TryL;
5425
5426 SDNode *TryR = MatchRotatePosNeg(RHSShiftArg, RHSShiftAmt, LHSShiftAmt,
5427 RExtOp0, LExtOp0, ISD::ROTR, ISD::ROTL, DL);
5428 if (TryR)
5429 return TryR;
5430
5431 return nullptr;
5432}
5433
5434namespace {
5435
5436/// Represents known origin of an individual byte in load combine pattern. The
5437/// value of the byte is either constant zero or comes from memory.
5438struct ByteProvider {
5439 // For constant zero providers Load is set to nullptr. For memory providers
5440 // Load represents the node which loads the byte from memory.
5441 // ByteOffset is the offset of the byte in the value produced by the load.
5442 LoadSDNode *Load = nullptr;
5443 unsigned ByteOffset = 0;
5444
5445 ByteProvider() = default;
5446
5447 static ByteProvider getMemory(LoadSDNode *Load, unsigned ByteOffset) {
5448 return ByteProvider(Load, ByteOffset);
5449 }
5450
5451 static ByteProvider getConstantZero() { return ByteProvider(nullptr, 0); }
5452
5453 bool isConstantZero() const { return !Load; }
5454 bool isMemory() const { return Load; }
5455
5456 bool operator==(const ByteProvider &Other) const {
5457 return Other.Load == Load && Other.ByteOffset == ByteOffset;
5458 }
5459
5460private:
5461 ByteProvider(LoadSDNode *Load, unsigned ByteOffset)
5462 : Load(Load), ByteOffset(ByteOffset) {}
5463};
5464
5465} // end anonymous namespace
5466
5467/// Recursively traverses the expression calculating the origin of the requested
5468/// byte of the given value. Returns None if the provider can't be calculated.
5469///
5470/// For all the values except the root of the expression verifies that the value
5471/// has exactly one use and if it's not true return None. This way if the origin
5472/// of the byte is returned it's guaranteed that the values which contribute to
5473/// the byte are not used outside of this expression.
5474///
5475/// Because the parts of the expression are not allowed to have more than one
5476/// use this function iterates over trees, not DAGs. So it never visits the same
5477/// node more than once.
5478static const Optional<ByteProvider>
5479calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth,
5480 bool Root = false) {
5481 // Typical i64 by i8 pattern requires recursion up to 8 calls depth
5482 if (Depth == 10)
5483 return None;
5484
5485 if (!Root && !Op.hasOneUse())
5486 return None;
5487
5488 assert(Op.getValueType().isScalarInteger() && "can't handle other types")(static_cast <bool> (Op.getValueType().isScalarInteger(
) && "can't handle other types") ? void (0) : __assert_fail
("Op.getValueType().isScalarInteger() && \"can't handle other types\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5488, __extension__ __PRETTY_FUNCTION__))
;
5489 unsigned BitWidth = Op.getValueSizeInBits();
5490 if (BitWidth % 8 != 0)
5491 return None;
5492 unsigned ByteWidth = BitWidth / 8;
5493 assert(Index < ByteWidth && "invalid index requested")(static_cast <bool> (Index < ByteWidth && "invalid index requested"
) ? void (0) : __assert_fail ("Index < ByteWidth && \"invalid index requested\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5493, __extension__ __PRETTY_FUNCTION__))
;
5494 (void) ByteWidth;
5495
5496 switch (Op.getOpcode()) {
5497 case ISD::OR: {
5498 auto LHS = calculateByteProvider(Op->getOperand(0), Index, Depth + 1);
5499 if (!LHS)
5500 return None;
5501 auto RHS = calculateByteProvider(Op->getOperand(1), Index, Depth + 1);
5502 if (!RHS)
5503 return None;
5504
5505 if (LHS->isConstantZero())
5506 return RHS;
5507 if (RHS->isConstantZero())
5508 return LHS;
5509 return None;
5510 }
5511 case ISD::SHL: {
5512 auto ShiftOp = dyn_cast<ConstantSDNode>(Op->getOperand(1));
5513 if (!ShiftOp)
5514 return None;
5515
5516 uint64_t BitShift = ShiftOp->getZExtValue();
5517 if (BitShift % 8 != 0)
5518 return None;
5519 uint64_t ByteShift = BitShift / 8;
5520
5521 return Index < ByteShift
5522 ? ByteProvider::getConstantZero()
5523 : calculateByteProvider(Op->getOperand(0), Index - ByteShift,
5524 Depth + 1);
5525 }
5526 case ISD::ANY_EXTEND:
5527 case ISD::SIGN_EXTEND:
5528 case ISD::ZERO_EXTEND: {
5529 SDValue NarrowOp = Op->getOperand(0);
5530 unsigned NarrowBitWidth = NarrowOp.getScalarValueSizeInBits();
5531 if (NarrowBitWidth % 8 != 0)
5532 return None;
5533 uint64_t NarrowByteWidth = NarrowBitWidth / 8;
5534
5535 if (Index >= NarrowByteWidth)
5536 return Op.getOpcode() == ISD::ZERO_EXTEND
5537 ? Optional<ByteProvider>(ByteProvider::getConstantZero())
5538 : None;
5539 return calculateByteProvider(NarrowOp, Index, Depth + 1);
5540 }
5541 case ISD::BSWAP:
5542 return calculateByteProvider(Op->getOperand(0), ByteWidth - Index - 1,
5543 Depth + 1);
5544 case ISD::LOAD: {
5545 auto L = cast<LoadSDNode>(Op.getNode());
5546 if (L->isVolatile() || L->isIndexed())
5547 return None;
5548
5549 unsigned NarrowBitWidth = L->getMemoryVT().getSizeInBits();
5550 if (NarrowBitWidth % 8 != 0)
5551 return None;
5552 uint64_t NarrowByteWidth = NarrowBitWidth / 8;
5553
5554 if (Index >= NarrowByteWidth)
5555 return L->getExtensionType() == ISD::ZEXTLOAD
5556 ? Optional<ByteProvider>(ByteProvider::getConstantZero())
5557 : None;
5558 return ByteProvider::getMemory(L, Index);
5559 }
5560 }
5561
5562 return None;
5563}
5564
5565/// Match a pattern where a wide type scalar value is loaded by several narrow
5566/// loads and combined by shifts and ors. Fold it into a single load or a load
5567/// and a BSWAP if the targets supports it.
5568///
5569/// Assuming little endian target:
5570/// i8 *a = ...
5571/// i32 val = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24)
5572/// =>
5573/// i32 val = *((i32)a)
5574///
5575/// i8 *a = ...
5576/// i32 val = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]
5577/// =>
5578/// i32 val = BSWAP(*((i32)a))
5579///
5580/// TODO: This rule matches complex patterns with OR node roots and doesn't
5581/// interact well with the worklist mechanism. When a part of the pattern is
5582/// updated (e.g. one of the loads) its direct users are put into the worklist,
5583/// but the root node of the pattern which triggers the load combine is not
5584/// necessarily a direct user of the changed node. For example, once the address
5585/// of t28 load is reassociated load combine won't be triggered:
5586/// t25: i32 = add t4, Constant:i32<2>
5587/// t26: i64 = sign_extend t25
5588/// t27: i64 = add t2, t26
5589/// t28: i8,ch = load<LD1[%tmp9]> t0, t27, undef:i64
5590/// t29: i32 = zero_extend t28
5591/// t32: i32 = shl t29, Constant:i8<8>
5592/// t33: i32 = or t23, t32
5593/// As a possible fix visitLoad can check if the load can be a part of a load
5594/// combine pattern and add corresponding OR roots to the worklist.
5595SDValue DAGCombiner::MatchLoadCombine(SDNode *N) {
5596 assert(N->getOpcode() == ISD::OR &&(static_cast <bool> (N->getOpcode() == ISD::OR &&
"Can only match load combining against OR nodes") ? void (0)
: __assert_fail ("N->getOpcode() == ISD::OR && \"Can only match load combining against OR nodes\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5597, __extension__ __PRETTY_FUNCTION__))
5597 "Can only match load combining against OR nodes")(static_cast <bool> (N->getOpcode() == ISD::OR &&
"Can only match load combining against OR nodes") ? void (0)
: __assert_fail ("N->getOpcode() == ISD::OR && \"Can only match load combining against OR nodes\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5597, __extension__ __PRETTY_FUNCTION__))
;
5598
5599 // Handles simple types only
5600 EVT VT = N->getValueType(0);
5601 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
5602 return SDValue();
5603 unsigned ByteWidth = VT.getSizeInBits() / 8;
5604
5605 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5606 // Before legalize we can introduce too wide illegal loads which will be later
5607 // split into legal sized loads. This enables us to combine i64 load by i8
5608 // patterns to a couple of i32 loads on 32 bit targets.
5609 if (LegalOperations && !TLI.isOperationLegal(ISD::LOAD, VT))
5610 return SDValue();
5611
5612 std::function<unsigned(unsigned, unsigned)> LittleEndianByteAt = [](
5613 unsigned BW, unsigned i) { return i; };
5614 std::function<unsigned(unsigned, unsigned)> BigEndianByteAt = [](
5615 unsigned BW, unsigned i) { return BW - i - 1; };
5616
5617 bool IsBigEndianTarget = DAG.getDataLayout().isBigEndian();
5618 auto MemoryByteOffset = [&] (ByteProvider P) {
5619 assert(P.isMemory() && "Must be a memory byte provider")(static_cast <bool> (P.isMemory() && "Must be a memory byte provider"
) ? void (0) : __assert_fail ("P.isMemory() && \"Must be a memory byte provider\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5619, __extension__ __PRETTY_FUNCTION__))
;
5620 unsigned LoadBitWidth = P.Load->getMemoryVT().getSizeInBits();
5621 assert(LoadBitWidth % 8 == 0 &&(static_cast <bool> (LoadBitWidth % 8 == 0 && "can only analyze providers for individual bytes not bit"
) ? void (0) : __assert_fail ("LoadBitWidth % 8 == 0 && \"can only analyze providers for individual bytes not bit\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5622, __extension__ __PRETTY_FUNCTION__))
5622 "can only analyze providers for individual bytes not bit")(static_cast <bool> (LoadBitWidth % 8 == 0 && "can only analyze providers for individual bytes not bit"
) ? void (0) : __assert_fail ("LoadBitWidth % 8 == 0 && \"can only analyze providers for individual bytes not bit\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5622, __extension__ __PRETTY_FUNCTION__))
;
5623 unsigned LoadByteWidth = LoadBitWidth / 8;
5624 return IsBigEndianTarget
5625 ? BigEndianByteAt(LoadByteWidth, P.ByteOffset)
5626 : LittleEndianByteAt(LoadByteWidth, P.ByteOffset);
5627 };
5628
5629 Optional<BaseIndexOffset> Base;
5630 SDValue Chain;
5631
5632 SmallPtrSet<LoadSDNode *, 8> Loads;
5633 Optional<ByteProvider> FirstByteProvider;
5634 int64_t FirstOffset = INT64_MAX(9223372036854775807L);
5635
5636 // Check if all the bytes of the OR we are looking at are loaded from the same
5637 // base address. Collect bytes offsets from Base address in ByteOffsets.
5638 SmallVector<int64_t, 4> ByteOffsets(ByteWidth);
5639 for (unsigned i = 0; i < ByteWidth; i++) {
5640 auto P = calculateByteProvider(SDValue(N, 0), i, 0, /*Root=*/true);
5641 if (!P || !P->isMemory()) // All the bytes must be loaded from memory
5642 return SDValue();
5643
5644 LoadSDNode *L = P->Load;
5645 assert(L->hasNUsesOfValue(1, 0) && !L->isVolatile() && !L->isIndexed() &&(static_cast <bool> (L->hasNUsesOfValue(1, 0) &&
!L->isVolatile() && !L->isIndexed() &&
"Must be enforced by calculateByteProvider") ? void (0) : __assert_fail
("L->hasNUsesOfValue(1, 0) && !L->isVolatile() && !L->isIndexed() && \"Must be enforced by calculateByteProvider\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5646, __extension__ __PRETTY_FUNCTION__))
5646 "Must be enforced by calculateByteProvider")(static_cast <bool> (L->hasNUsesOfValue(1, 0) &&
!L->isVolatile() && !L->isIndexed() &&
"Must be enforced by calculateByteProvider") ? void (0) : __assert_fail
("L->hasNUsesOfValue(1, 0) && !L->isVolatile() && !L->isIndexed() && \"Must be enforced by calculateByteProvider\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5646, __extension__ __PRETTY_FUNCTION__))
;
5647 assert(L->getOffset().isUndef() && "Unindexed load must have undef offset")(static_cast <bool> (L->getOffset().isUndef() &&
"Unindexed load must have undef offset") ? void (0) : __assert_fail
("L->getOffset().isUndef() && \"Unindexed load must have undef offset\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5647, __extension__ __PRETTY_FUNCTION__))
;
5648
5649 // All loads must share the same chain
5650 SDValue LChain = L->getChain();
5651 if (!Chain)
5652 Chain = LChain;
5653 else if (Chain != LChain)
5654 return SDValue();
5655
5656 // Loads must share the same base address
5657 BaseIndexOffset Ptr = BaseIndexOffset::match(L, DAG);
5658 int64_t ByteOffsetFromBase = 0;
5659 if (!Base)
5660 Base = Ptr;
5661 else if (!Base->equalBaseIndex(Ptr, DAG, ByteOffsetFromBase))
5662 return SDValue();
5663
5664 // Calculate the offset of the current byte from the base address
5665 ByteOffsetFromBase += MemoryByteOffset(*P);
5666 ByteOffsets[i] = ByteOffsetFromBase;
5667
5668 // Remember the first byte load
5669 if (ByteOffsetFromBase < FirstOffset) {
5670 FirstByteProvider = P;
5671 FirstOffset = ByteOffsetFromBase;
5672 }
5673
5674 Loads.insert(L);
5675 }
5676 assert(!Loads.empty() && "All the bytes of the value must be loaded from "(static_cast <bool> (!Loads.empty() && "All the bytes of the value must be loaded from "
"memory, so there must be at least one load which produces the value"
) ? void (0) : __assert_fail ("!Loads.empty() && \"All the bytes of the value must be loaded from \" \"memory, so there must be at least one load which produces the value\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5677, __extension__ __PRETTY_FUNCTION__))
5677 "memory, so there must be at least one load which produces the value")(static_cast <bool> (!Loads.empty() && "All the bytes of the value must be loaded from "
"memory, so there must be at least one load which produces the value"
) ? void (0) : __assert_fail ("!Loads.empty() && \"All the bytes of the value must be loaded from \" \"memory, so there must be at least one load which produces the value\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5677, __extension__ __PRETTY_FUNCTION__))
;
5678 assert(Base && "Base address of the accessed memory location must be set")(static_cast <bool> (Base && "Base address of the accessed memory location must be set"
) ? void (0) : __assert_fail ("Base && \"Base address of the accessed memory location must be set\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5678, __extension__ __PRETTY_FUNCTION__))
;
5679 assert(FirstOffset != INT64_MAX && "First byte offset must be set")(static_cast <bool> (FirstOffset != (9223372036854775807L
) && "First byte offset must be set") ? void (0) : __assert_fail
("FirstOffset != INT64_MAX && \"First byte offset must be set\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5679, __extension__ __PRETTY_FUNCTION__))
;
5680
5681 // Check if the bytes of the OR we are looking at match with either big or
5682 // little endian value load
5683 bool BigEndian = true, LittleEndian = true;
5684 for (unsigned i = 0; i < ByteWidth; i++) {
5685 int64_t CurrentByteOffset = ByteOffsets[i] - FirstOffset;
5686 LittleEndian &= CurrentByteOffset == LittleEndianByteAt(ByteWidth, i);
5687 BigEndian &= CurrentByteOffset == BigEndianByteAt(ByteWidth, i);
5688 if (!BigEndian && !LittleEndian)
5689 return SDValue();
5690 }
5691 assert((BigEndian != LittleEndian) && "should be either or")(static_cast <bool> ((BigEndian != LittleEndian) &&
"should be either or") ? void (0) : __assert_fail ("(BigEndian != LittleEndian) && \"should be either or\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5691, __extension__ __PRETTY_FUNCTION__))
;
5692 assert(FirstByteProvider && "must be set")(static_cast <bool> (FirstByteProvider && "must be set"
) ? void (0) : __assert_fail ("FirstByteProvider && \"must be set\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5692, __extension__ __PRETTY_FUNCTION__))
;
5693
5694 // Ensure that the first byte is loaded from zero offset of the first load.
5695 // So the combined value can be loaded from the first load address.
5696 if (MemoryByteOffset(*FirstByteProvider) != 0)
5697 return SDValue();
5698 LoadSDNode *FirstLoad = FirstByteProvider->Load;
5699
5700 // The node we are looking at matches with the pattern, check if we can
5701 // replace it with a single load and bswap if needed.
5702
5703 // If the load needs byte swap check if the target supports it
5704 bool NeedsBswap = IsBigEndianTarget != BigEndian;
5705
5706 // Before legalize we can introduce illegal bswaps which will be later
5707 // converted to an explicit bswap sequence. This way we end up with a single
5708 // load and byte shuffling instead of several loads and byte shuffling.
5709 if (NeedsBswap && LegalOperations && !TLI.isOperationLegal(ISD::BSWAP, VT))
5710 return SDValue();
5711
5712 // Check that a load of the wide type is both allowed and fast on the target
5713 bool Fast = false;
5714 bool Allowed = TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(),
5715 VT, FirstLoad->getAddressSpace(),
5716 FirstLoad->getAlignment(), &Fast);
5717 if (!Allowed || !Fast)
5718 return SDValue();
5719
5720 SDValue NewLoad =
5721 DAG.getLoad(VT, SDLoc(N), Chain, FirstLoad->getBasePtr(),
5722 FirstLoad->getPointerInfo(), FirstLoad->getAlignment());
5723
5724 // Transfer chain users from old loads to the new load.
5725 for (LoadSDNode *L : Loads)
5726 DAG.ReplaceAllUsesOfValueWith(SDValue(L, 1), SDValue(NewLoad.getNode(), 1));
5727
5728 return NeedsBswap ? DAG.getNode(ISD::BSWAP, SDLoc(N), VT, NewLoad) : NewLoad;
5729}
5730
5731// If the target has andn, bsl, or a similar bit-select instruction,
5732// we want to unfold masked merge, with canonical pattern of:
5733// | A | |B|
5734// ((x ^ y) & m) ^ y
5735// | D |
5736// Into:
5737// (x & m) | (y & ~m)
5738// If y is a constant, and the 'andn' does not work with immediates,
5739// we unfold into a different pattern:
5740// ~(~x & m) & (m | y)
5741// NOTE: we don't unfold the pattern if 'xor' is actually a 'not', because at
5742// the very least that breaks andnpd / andnps patterns, and because those
5743// patterns are simplified in IR and shouldn't be created in the DAG
5744SDValue DAGCombiner::unfoldMaskedMerge(SDNode *N) {
5745 assert(N->getOpcode() == ISD::XOR)(static_cast <bool> (N->getOpcode() == ISD::XOR) ? void
(0) : __assert_fail ("N->getOpcode() == ISD::XOR", "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5745, __extension__ __PRETTY_FUNCTION__))
;
5746
5747 // Don't touch 'not' (i.e. where y = -1).
5748 if (isAllOnesConstantOrAllOnesSplatConstant(N->getOperand(1)))
5749 return SDValue();
5750
5751 EVT VT = N->getValueType(0);
5752
5753 // There are 3 commutable operators in the pattern,
5754 // so we have to deal with 8 possible variants of the basic pattern.
5755 SDValue X, Y, M;
5756 auto matchAndXor = [&X, &Y, &M](SDValue And, unsigned XorIdx, SDValue Other) {
5757 if (And.getOpcode() != ISD::AND || !And.hasOneUse())
5758 return false;
5759 SDValue Xor = And.getOperand(XorIdx);
5760 if (Xor.getOpcode() != ISD::XOR || !Xor.hasOneUse())
5761 return false;
5762 SDValue Xor0 = Xor.getOperand(0);
5763 SDValue Xor1 = Xor.getOperand(1);
5764 // Don't touch 'not' (i.e. where y = -1).
5765 if (isAllOnesConstantOrAllOnesSplatConstant(Xor1))
5766 return false;
5767 if (Other == Xor0)
5768 std::swap(Xor0, Xor1);
5769 if (Other != Xor1)
5770 return false;
5771 X = Xor0;
5772 Y = Xor1;
5773 M = And.getOperand(XorIdx ? 0 : 1);
5774 return true;
5775 };
5776
5777 SDValue N0 = N->getOperand(0);
5778 SDValue N1 = N->getOperand(1);
5779 if (!matchAndXor(N0, 0, N1) && !matchAndXor(N0, 1, N1) &&
5780 !matchAndXor(N1, 0, N0) && !matchAndXor(N1, 1, N0))
5781 return SDValue();
5782
5783 // Don't do anything if the mask is constant. This should not be reachable.
5784 // InstCombine should have already unfolded this pattern, and DAGCombiner
5785 // probably shouldn't produce it, too.
5786 if (isa<ConstantSDNode>(M.getNode()))
5787 return SDValue();
5788
5789 // We can transform if the target has AndNot
5790 if (!TLI.hasAndNot(M))
5791 return SDValue();
5792
5793 SDLoc DL(N);
5794
5795 // If Y is a constant, check that 'andn' works with immediates.
5796 if (!TLI.hasAndNot(Y)) {
5797 assert(TLI.hasAndNot(X) && "Only mask is a variable? Unreachable.")(static_cast <bool> (TLI.hasAndNot(X) && "Only mask is a variable? Unreachable."
) ? void (0) : __assert_fail ("TLI.hasAndNot(X) && \"Only mask is a variable? Unreachable.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5797, __extension__ __PRETTY_FUNCTION__))
;
5798 // If not, we need to do a bit more work to make sure andn is still used.
5799 SDValue NotX = DAG.getNOT(DL, X, VT);
5800 SDValue LHS = DAG.getNode(ISD::AND, DL, VT, NotX, M);
5801 SDValue NotLHS = DAG.getNOT(DL, LHS, VT);
5802 SDValue RHS = DAG.getNode(ISD::OR, DL, VT, M, Y);
5803 return DAG.getNode(ISD::AND, DL, VT, NotLHS, RHS);
5804 }
5805
5806 SDValue LHS = DAG.getNode(ISD::AND, DL, VT, X, M);
5807 SDValue NotM = DAG.getNOT(DL, M, VT);
5808 SDValue RHS = DAG.getNode(ISD::AND, DL, VT, Y, NotM);
5809
5810 return DAG.getNode(ISD::OR, DL, VT, LHS, RHS);
5811}
5812
5813SDValue DAGCombiner::visitXOR(SDNode *N) {
5814 SDValue N0 = N->getOperand(0);
5815 SDValue N1 = N->getOperand(1);
5816 EVT VT = N0.getValueType();
5817
5818 // fold vector ops
5819 if (VT.isVector()) {
5820 if (SDValue FoldedVOp = SimplifyVBinOp(N))
5821 return FoldedVOp;
5822
5823 // fold (xor x, 0) -> x, vector edition
5824 if (ISD::isBuildVectorAllZeros(N0.getNode()))
5825 return N1;
5826 if (ISD::isBuildVectorAllZeros(N1.getNode()))
5827 return N0;
5828 }
5829
5830 // fold (xor undef, undef) -> 0. This is a common idiom (misuse).
5831 if (N0.isUndef() && N1.isUndef())
5832 return DAG.getConstant(0, SDLoc(N), VT);
5833 // fold (xor x, undef) -> undef
5834 if (N0.isUndef())
5835 return N0;
5836 if (N1.isUndef())
5837 return N1;
5838 // fold (xor c1, c2) -> c1^c2
5839 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
5840 ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);
5841 if (N0C && N1C)
5842 return DAG.FoldConstantArithmetic(ISD::XOR, SDLoc(N), VT, N0C, N1C);
5843 // canonicalize constant to RHS
5844 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
5845 !DAG.isConstantIntBuildVectorOrConstantInt(N1))
5846 return DAG.getNode(ISD::XOR, SDLoc(N), VT, N1, N0);
5847 // fold (xor x, 0) -> x
5848 if (isNullConstant(N1))
5849 return N0;
5850
5851 if (SDValue NewSel = foldBinOpIntoSelect(N))
5852 return NewSel;
5853
5854 // reassociate xor
5855 if (SDValue RXOR = ReassociateOps(ISD::XOR, SDLoc(N), N0, N1))
5856 return RXOR;
5857
5858 // fold !(x cc y) -> (x !cc y)
5859 SDValue LHS, RHS, CC;
5860 if (TLI.isConstTrueVal(N1.getNode()) && isSetCCEquivalent(N0, LHS, RHS, CC)) {
5861 bool isInt = LHS.getValueType().isInteger();
5862 ISD::CondCode NotCC = ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
5863 isInt);
5864
5865 if (!LegalOperations ||
5866 TLI.isCondCodeLegal(NotCC, LHS.getSimpleValueType())) {
5867 switch (N0.getOpcode()) {
5868 default:
5869 llvm_unreachable("Unhandled SetCC Equivalent!")::llvm::llvm_unreachable_internal("Unhandled SetCC Equivalent!"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5869)
;
5870 case ISD::SETCC:
5871 return DAG.getSetCC(SDLoc(N0), VT, LHS, RHS, NotCC);
5872 case ISD::SELECT_CC:
5873 return DAG.getSelectCC(SDLoc(N0), LHS, RHS, N0.getOperand(2),
5874 N0.getOperand(3), NotCC);
5875 }
5876 }
5877 }
5878
5879 // fold (not (zext (setcc x, y))) -> (zext (not (setcc x, y)))
5880 if (isOneConstant(N1) && N0.getOpcode() == ISD::ZERO_EXTEND &&
5881 N0.getNode()->hasOneUse() &&
5882 isSetCCEquivalent(N0.getOperand(0), LHS, RHS, CC)){
5883 SDValue V = N0.getOperand(0);
5884 SDLoc DL(N0);
5885 V = DAG.getNode(ISD::XOR, DL, V.getValueType(), V,
5886 DAG.getConstant(1, DL, V.getValueType()));
5887 AddToWorklist(V.getNode());
5888 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, V);
5889 }
5890
5891 // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are setcc
5892 if (isOneConstant(N1) && VT == MVT::i1 && N0.hasOneUse() &&
5893 (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) {
5894 SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
5895 if (isOneUseSetCC(RHS) || isOneUseSetCC(LHS)) {
5896 unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND;
5897 LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), VT, LHS, N1); // LHS = ~LHS
5898 RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), VT, RHS, N1); // RHS = ~RHS
5899 AddToWorklist(LHS.getNode()); AddToWorklist(RHS.getNode());
5900 return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS);
5901 }
5902 }
5903 // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are constants
5904 if (isAllOnesConstant(N1) && N0.hasOneUse() &&
5905 (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) {
5906 SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
5907 if (isa<ConstantSDNode>(RHS) || isa<ConstantSDNode>(LHS)) {
5908 unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND;
5909 LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), VT, LHS, N1); // LHS = ~LHS
5910 RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), VT, RHS, N1); // RHS = ~RHS
5911 AddToWorklist(LHS.getNode()); AddToWorklist(RHS.getNode());
5912 return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS);
5913 }
5914 }
5915 // fold (xor (and x, y), y) -> (and (not x), y)
5916 if (N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
5917 N0->getOperand(1) == N1) {
5918 SDValue X = N0->getOperand(0);
5919 SDValue NotX = DAG.getNOT(SDLoc(X), X, VT);
5920 AddToWorklist(NotX.getNode());
5921 return DAG.getNode(ISD::AND, SDLoc(N), VT, NotX, N1);
5922 }
5923
5924 // fold Y = sra (X, size(X)-1); xor (add (X, Y), Y) -> (abs X)
5925 if (TLI.isOperationLegalOrCustom(ISD::ABS, VT)) {
5926 SDValue A = N0.getOpcode() == ISD::ADD ? N0 : N1;
5927 SDValue S = N0.getOpcode() == ISD::SRA ? N0 : N1;
5928 if (A.getOpcode() == ISD::ADD && S.getOpcode() == ISD::SRA) {
5929 SDValue A0 = A.getOperand(0), A1 = A.getOperand(1);
5930 SDValue S0 = S.getOperand(0);
5931 if ((A0 == S && A1 == S0) || (A1 == S && A0 == S0)) {
5932 unsigned OpSizeInBits = VT.getScalarSizeInBits();
5933 if (ConstantSDNode *C = isConstOrConstSplat(S.getOperand(1)))
5934 if (C->getAPIntValue() == (OpSizeInBits - 1))
5935 return DAG.getNode(ISD::ABS, SDLoc(N), VT, S0);
5936 }
5937 }
5938 }
5939
5940 // fold (xor x, x) -> 0
5941 if (N0 == N1)
5942 return tryFoldToZero(SDLoc(N), TLI, VT, DAG, LegalOperations, LegalTypes);
5943
5944 // fold (xor (shl 1, x), -1) -> (rotl ~1, x)
5945 // Here is a concrete example of this equivalence:
5946 // i16 x == 14
5947 // i16 shl == 1 << 14 == 16384 == 0b0100000000000000
5948 // i16 xor == ~(1 << 14) == 49151 == 0b1011111111111111
5949 //
5950 // =>
5951 //
5952 // i16 ~1 == 0b1111111111111110
5953 // i16 rol(~1, 14) == 0b1011111111111111
5954 //
5955 // Some additional tips to help conceptualize this transform:
5956 // - Try to see the operation as placing a single zero in a value of all ones.
5957 // - There exists no value for x which would allow the result to contain zero.
5958 // - Values of x larger than the bitwidth are undefined and do not require a
5959 // consistent result.
5960 // - Pushing the zero left requires shifting one bits in from the right.
5961 // A rotate left of ~1 is a nice way of achieving the desired result.
5962 if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT) && N0.getOpcode() == ISD::SHL
5963 && isAllOnesConstant(N1) && isOneConstant(N0.getOperand(0))) {
5964 SDLoc DL(N);
5965 return DAG.getNode(ISD::ROTL, DL, VT, DAG.getConstant(~1, DL, VT),
5966 N0.getOperand(1));
5967 }
5968
5969 // Simplify: xor (op x...), (op y...) -> (op (xor x, y))
5970 if (N0.getOpcode() == N1.getOpcode())
5971 if (SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N))
5972 return Tmp;
5973
5974 // Unfold ((x ^ y) & m) ^ y into (x & m) | (y & ~m) if profitable
5975 if (SDValue MM = unfoldMaskedMerge(N))
5976 return MM;
5977
5978 // Simplify the expression using non-local knowledge.
5979 if (SimplifyDemandedBits(SDValue(N, 0)))
5980 return SDValue(N, 0);
5981
5982 return SDValue();
5983}
5984
5985/// Handle transforms common to the three shifts, when the shift amount is a
5986/// constant.
5987SDValue DAGCombiner::visitShiftByConstant(SDNode *N, ConstantSDNode *Amt) {
5988 SDNode *LHS = N->getOperand(0).getNode();
5989 if (!LHS->hasOneUse()) return SDValue();
5990
5991 // We want to pull some binops through shifts, so that we have (and (shift))
5992 // instead of (shift (and)), likewise for add, or, xor, etc. This sort of
5993 // thing happens with address calculations, so it's important to canonicalize
5994 // it.
5995 bool HighBitSet = false; // Can we transform this if the high bit is set?
5996
5997 switch (LHS->getOpcode()) {
5998 default: return SDValue();
5999 case ISD::OR:
6000 case ISD::XOR:
6001 HighBitSet = false; // We can only transform sra if the high bit is clear.
6002 break;
6003 case ISD::AND:
6004 HighBitSet = true; // We can only transform sra if the high bit is set.
6005 break;
6006 case ISD::ADD:
6007 if (N->getOpcode() != ISD::SHL)
6008 return SDValue(); // only shl(add) not sr[al](add).
6009 HighBitSet = false; // We can only transform sra if the high bit is clear.
6010 break;
6011 }
6012
6013 // We require the RHS of the binop to be a constant and not opaque as well.
6014 ConstantSDNode *BinOpCst = getAsNonOpaqueConstant(LHS->getOperand(1));
6015 if (!BinOpCst) return SDValue();
6016
6017 // FIXME: disable this unless the input to the binop is a shift by a constant
6018 // or is copy/select.Enable this in other cases when figure out it's exactly profitable.
6019 SDNode *BinOpLHSVal = LHS->getOperand(0).getNode();
6020 bool isShift = BinOpLHSVal->getOpcode() == ISD::SHL ||
6021 BinOpLHSVal->getOpcode() == ISD::SRA ||
6022 BinOpLHSVal->getOpcode() == ISD::SRL;
6023 bool isCopyOrSelect = BinOpLHSVal->getOpcode() == ISD::CopyFromReg ||
6024 BinOpLHSVal->getOpcode() == ISD::SELECT;
6025
6026 if ((!isShift || !isa<ConstantSDNode>(BinOpLHSVal->getOperand(1))) &&
6027 !isCopyOrSelect)
6028 return SDValue();
6029
6030 if (isCopyOrSelect && N->hasOneUse())
6031 return SDValue();
6032
6033 EVT VT = N->getValueType(0);
6034
6035 // If this is a signed shift right, and the high bit is modified by the
6036 // logical operation, do not perform the transformation. The highBitSet
6037 // boolean indicates the value of the high bit of the constant which would
6038 // cause it to be modified for this operation.
6039 if (N->getOpcode() == ISD::SRA) {
6040 bool BinOpRHSSignSet = BinOpCst->getAPIntValue().isNegative();
6041 if (BinOpRHSSignSet != HighBitSet)
6042 return SDValue();
6043 }
6044
6045 if (!TLI.isDesirableToCommuteWithShift(LHS))
6046 return SDValue();
6047
6048 // Fold the constants, shifting the binop RHS by the shift amount.
6049 SDValue NewRHS = DAG.getNode(N->getOpcode(), SDLoc(LHS->getOperand(1)),
6050 N->getValueType(0),
6051 LHS->getOperand(1), N->getOperand(1));
6052 assert(isa<ConstantSDNode>(NewRHS) && "Folding was not successful!")(static_cast <bool> (isa<ConstantSDNode>(NewRHS) &&
"Folding was not successful!") ? void (0) : __assert_fail ("isa<ConstantSDNode>(NewRHS) && \"Folding was not successful!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 6052, __extension__ __PRETTY_FUNCTION__))
;
6053
6054 // Create the new shift.
6055 SDValue NewShift = DAG.getNode(N->getOpcode(),
6056 SDLoc(LHS->getOperand(0)),
6057 VT, LHS->getOperand(0), N->getOperand(1));
6058
6059 // Create the new binop.
6060 return DAG.getNode(LHS->getOpcode(), SDLoc(N), VT, NewShift, NewRHS);
6061}
6062
6063SDValue DAGCombiner::distributeTruncateThroughAnd(SDNode *N) {
6064 assert(N->getOpcode() == ISD::TRUNCATE)(static_cast <bool> (N->getOpcode() == ISD::TRUNCATE
) ? void (0) : __assert_fail ("N->getOpcode() == ISD::TRUNCATE"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 6064, __extension__ __PRETTY_FUNCTION__))
;
6065 assert(N->getOperand(0).getOpcode() == ISD::AND)(static_cast <bool> (N->getOperand(0).getOpcode() ==
ISD::AND) ? void (0) : __assert_fail ("N->getOperand(0).getOpcode() == ISD::AND"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 6065, __extension__ __PRETTY_FUNCTION__))
;
6066
6067 // (truncate:TruncVT (and N00, N01C)) -> (and (truncate:TruncVT N00), TruncC)
6068 if (N->hasOneUse() && N->getOperand(0).hasOneUse()) {
6069 SDValue N01 = N->getOperand(0).getOperand(1);
6070 if (isConstantOrConstantVector(N01, /* NoOpaques */ true)) {
6071 SDLoc DL(N);
6072 EVT TruncVT = N->getValueType(0);
6073 SDValue N00 = N->getOperand(0).getOperand(0);
6074 SDValue Trunc00 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N00);
6075 SDValue Trunc01 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N01);
6076 AddToWorklist(Trunc00.getNode());
6077 AddToWorklist(Trunc01.getNode());
6078 return DAG.getNode(ISD::AND, DL, TruncVT, Trunc00, Trunc01);
6079 }
6080 }
6081
6082 return SDValue();
6083}
6084
6085SDValue DAGCombiner::visitRotate(SDNode *N) {
6086 SDLoc dl(N);
6087 SDValue N0 = N->getOperand(0);
6088 SDValue N1 = N->getOperand(1);
6089 EVT VT = N->getValueType(0);
6090 unsigned Bitsize = VT.getScalarSizeInBits();
6091
6092 // fold (rot x, 0) -> x
6093 if (isNullConstantOrNullSplatConstant(N1))
6094 return N0;
6095
6096 // fold (rot x, c) -> (rot x, c % BitSize)
6097 if (ConstantSDNode *Cst = isConstOrConstSplat(N1)) {
6098 if (Cst->getAPIntValue().uge(Bitsize)) {
6099 uint64_t RotAmt = Cst->getAPIntValue().urem(Bitsize);
6100 return DAG.getNode(N->getOpcode(), dl, VT, N0,
6101 DAG.getConstant(RotAmt, dl, N1.getValueType()));
6102 }
6103 }
6104
6105 // fold (rot* x, (trunc (and y, c))) -> (rot* x, (and (trunc y), (trunc c))).
6106 if (N1.getOpcode() == ISD::TRUNCATE &&
6107 N1.getOperand(0).getOpcode() == ISD::AND) {
6108 if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()))
6109 return DAG.getNode(N->getOpcode(), dl, VT, N0, NewOp1);
6110 }
6111
6112 unsigned NextOp = N0.getOpcode();
6113 // fold (rot* (rot* x, c2), c1) -> (rot* x, c1 +- c2 % bitsize)
6114 if (NextOp == ISD::ROTL || NextOp == ISD::ROTR) {
6115 SDNode *C1 = DAG.isConstantIntBuildVectorOrConstantInt(N1);
6116 SDNode *C2 = DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1));
6117 if (C1 && C2 && C1->getValueType(0) == C2->getValueType(0)) {
6118 EVT ShiftVT = C1->getValueType(0);
6119 bool SameSide = (N->getOpcode() == NextOp);
6120 unsigned CombineOp = SameSide ? ISD::ADD : ISD::SUB;
6121 if (SDValue CombinedShift =
6122 DAG.FoldConstantArithmetic(CombineOp, dl, ShiftVT, C1, C2)) {
6123 SDValue BitsizeC = DAG.getConstant(Bitsize, dl, ShiftVT);
6124 SDValue CombinedShiftNorm = DAG.FoldConstantArithmetic(
6125 ISD::SREM, dl, ShiftVT, CombinedShift.getNode(),
6126 BitsizeC.getNode());
6127 return DAG.getNode(N->getOpcode(), dl, VT, N0->getOperand(0),
6128 CombinedShiftNorm);
6129 }
6130 }
6131 }
6132 return SDValue();
6133}
6134
6135SDValue DAGCombiner::visitSHL(SDNode *N) {
6136 SDValue N0 = N->getOperand(0);
6137 SDValue N1 = N->getOperand(1);
6138 EVT VT = N0.getValueType();
6139 unsigned OpSizeInBits = VT.getScalarSizeInBits();
6140
6141 // fold vector ops
6142 if (VT.isVector()) {
6143 if (SDValue FoldedVOp = SimplifyVBinOp(N))
6144 return FoldedVOp;
6145
6146 BuildVectorSDNode *N1CV = dyn_cast<BuildVectorSDNode>(N1);
6147 // If setcc produces all-one true value then:
6148 // (shl (and (setcc) N01CV) N1CV) -> (and (setcc) N01CV<<N1CV)
6149 if (N1CV && N1CV->isConstant()) {
6150 if (N0.getOpcode() == ISD::AND) {
6151 SDValue N00 = N0->getOperand(0);
6152 SDValue N01 = N0->getOperand(1);
6153 BuildVectorSDNode *N01CV = dyn_cast<BuildVectorSDNode>(N01);
6154
6155 if (N01CV && N01CV->isConstant() && N00.getOpcode() == ISD::SETCC &&
6156 TLI.getBooleanContents(N00.getOperand(0).getValueType()) ==
6157 TargetLowering::ZeroOrNegativeOneBooleanContent) {
6158 if (SDValue C = DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N), VT,
6159 N01CV, N1CV))
6160 return DAG.getNode(ISD::AND, SDLoc(N), VT, N00, C);
6161 }
6162 }
6163 }
6164 }
6165
6166 ConstantSDNode *N1C = isConstOrConstSplat(N1);
6167
6168 // fold (shl c1, c2) -> c1<<c2
6169 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
6170 if (N0C && N1C && !N1C->isOpaque())
6171 return DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N), VT, N0C, N1C);
6172 // fold (shl 0, x) -> 0
6173 if (isNullConstantOrNullSplatConstant(N0))
6174 return N0;
6175 // fold (shl x, c >= size(x)) -> undef
6176 // NOTE: ALL vector elements must be too big to avoid partial UNDEFs.
6177 auto MatchShiftTooBig = [OpSizeInBits](ConstantSDNode *Val) {
6178 return Val->getAPIntValue().uge(OpSizeInBits);
6179 };
6180 if (ISD::matchUnaryPredicate(N1, MatchShiftTooBig))
6181 return DAG.getUNDEF(VT);
6182 // fold (shl x, 0) -> x
6183 if (N1C && N1C->isNullValue())
6184 return N0;
6185 // fold (shl undef, x) -> 0
6186 if (N0.isUndef())
6187 return DAG.getConstant(0, SDLoc(N), VT);
6188
6189 if (SDValue NewSel = foldBinOpIntoSelect(N))
6190 return NewSel;
6191
6192 // if (shl x, c) is known to be zero, return 0
6193 if (DAG.MaskedValueIsZero(SDValue(N, 0),
6194 APInt::getAllOnesValue(OpSizeInBits)))
6195 return DAG.getConstant(0, SDLoc(N), VT);
6196 // fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))).
6197 if (N1.getOpcode() == ISD::TRUNCATE &&
6198 N1.getOperand(0).getOpcode() == ISD::AND) {
6199 if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()))
6200 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0, NewOp1);
6201 }
6202
6203 if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
6204 return SDValue(N, 0);
6205
6206 // fold (shl (shl x, c1), c2) -> 0 or (shl x, (add c1, c2))
6207 if (N0.getOpcode() == ISD::SHL) {
6208 auto MatchOutOfRange = [OpSizeInBits](ConstantSDNode *LHS,
6209 ConstantSDNode *RHS) {
6210 APInt c1 = LHS->getAPIntValue();
6211 APInt c2 = RHS->getAPIntValue();
6212 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
6213 return (c1 + c2).uge(OpSizeInBits);
6214 };
6215 if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchOutOfRange))
6216 return DAG.getConstant(0, SDLoc(N), VT);
6217
6218 auto MatchInRange = [OpSizeInBits](ConstantSDNode *LHS,
6219 ConstantSDNode *RHS) {
6220 APInt c1 = LHS->getAPIntValue();
6221 APInt c2 = RHS->getAPIntValue();
6222 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
6223 return (c1 + c2).ult(OpSizeInBits);
6224 };
6225 if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchInRange)) {
6226 SDLoc DL(N);
6227 EVT ShiftVT = N1.getValueType();
6228 SDValue Sum = DAG.getNode(ISD::ADD, DL, ShiftVT, N1, N0.getOperand(1));
6229 return DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0), Sum);
6230 }
6231 }
6232
6233 // fold (shl (ext (shl x, c1)), c2) -> (ext (shl x, (add c1, c2)))
6234 // For this to be valid, the second form must not preserve any of the bits
6235 // that are shifted out by the inner shift in the first form. This means
6236 // the outer shift size must be >= the number of bits added by the ext.
6237 // As a corollary, we don't care what kind of ext it is.
6238 if (N1C && (N0.getOpcode() == ISD::ZERO_EXTEND ||
6239 N0.getOpcode() == ISD::ANY_EXTEND ||
6240 N0.getOpcode() == ISD::SIGN_EXTEND) &&
6241 N0.getOperand(0).getOpcode() == ISD::SHL) {
6242 SDValue N0Op0 = N0.getOperand(0);
6243 if (ConstantSDNode *N0Op0C1 = isConstOrConstSplat(N0Op0.getOperand(1))) {
6244 APInt c1 = N0Op0C1->getAPIntValue();
6245 APInt c2 = N1C->getAPIntValue();
6246 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
6247
6248 EVT InnerShiftVT = N0Op0.getValueType();
6249 uint64_t InnerShiftSize = InnerShiftVT.getScalarSizeInBits();
6250 if (c2.uge(OpSizeInBits - InnerShiftSize)) {
6251 SDLoc DL(N0);
6252 APInt Sum = c1 + c2;
6253 if (Sum.uge(OpSizeInBits))
6254 return DAG.getConstant(0, DL, VT);
6255
6256 return DAG.getNode(
6257 ISD::SHL, DL, VT,
6258 DAG.getNode(N0.getOpcode(), DL, VT, N0Op0->getOperand(0)),
6259 DAG.getConstant(Sum.getZExtValue(), DL, N1.getValueType()));
6260 }
6261 }
6262 }
6263
6264 // fold (shl (zext (srl x, C)), C) -> (zext (shl (srl x, C), C))
6265 // Only fold this if the inner zext has no other uses to avoid increasing
6266 // the total number of instructions.
6267 if (N1C && N0.getOpcode() == ISD::ZERO_EXTEND && N0.hasOneUse() &&
6268 N0.getOperand(0).getOpcode() == ISD::SRL) {
6269 SDValue N0Op0 = N0.getOperand(0);
6270 if (ConstantSDNode *N0Op0C1 = isConstOrConstSplat(N0Op0.getOperand(1))) {
6271 if (N0Op0C1->getAPIntValue().ult(VT.getScalarSizeInBits())) {
6272 uint64_t c1 = N0Op0C1->getZExtValue();
6273 uint64_t c2 = N1C->getZExtValue();
6274 if (c1 == c2) {
6275 SDValue NewOp0 = N0.getOperand(0);
6276 EVT CountVT = NewOp0.getOperand(1).getValueType();
6277 SDLoc DL(N);
6278 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, NewOp0.getValueType(),
6279 NewOp0,
6280 DAG.getConstant(c2, DL, CountVT));
6281 AddToWorklist(NewSHL.getNode());
6282 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N0), VT, NewSHL);
6283 }
6284 }
6285 }
6286 }
6287
6288 // fold (shl (sr[la] exact X, C1), C2) -> (shl X, (C2-C1)) if C1 <= C2
6289 // fold (shl (sr[la] exact X, C1), C2) -> (sr[la] X, (C2-C1)) if C1 > C2
6290 if (N1C && (N0.getOpcode() == ISD::SRL || N0.getOpcode() == ISD::SRA) &&
6291 N0->getFlags().hasExact()) {
6292 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
6293 uint64_t C1 = N0C1->getZExtValue();
6294 uint64_t C2 = N1C->getZExtValue();
6295 SDLoc DL(N);
6296 if (C1 <= C2)
6297 return DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0),
6298 DAG.getConstant(C2 - C1, DL, N1.getValueType()));
6299 return DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(0),
6300 DAG.getConstant(C1 - C2, DL, N1.getValueType()));
6301 }
6302 }
6303
6304 // fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) or
6305 // (and (srl x, (sub c1, c2), MASK)
6306 // Only fold this if the inner shift has no other uses -- if it does, folding
6307 // this will increase the total number of instructions.
6308 if (N1C && N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
6309 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
6310 uint64_t c1 = N0C1->getZExtValue();
6311 if (c1 < OpSizeInBits) {
6312 uint64_t c2 = N1C->getZExtValue();
6313 APInt Mask = APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - c1);
6314 SDValue Shift;
6315 if (c2 > c1) {
6316 Mask <<= c2 - c1;
6317 SDLoc DL(N);
6318 Shift = DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0),
6319 DAG.getConstant(c2 - c1, DL, N1.getValueType()));
6320 } else {
6321 Mask.lshrInPlace(c1 - c2);
6322 SDLoc DL(N);
6323 Shift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0),
6324 DAG.getConstant(c1 - c2, DL, N1.getValueType()));
6325 }
6326 SDLoc DL(N0);
6327 return DAG.getNode(ISD::AND, DL, VT, Shift,
6328 DAG.getConstant(Mask, DL, VT));
6329 }
6330 }
6331 }
6332
6333 // fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
6334 if (N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1) &&
6335 isConstantOrConstantVector(N1, /* No Opaques */ true)) {
6336 SDLoc DL(N);
6337 SDValue AllBits = DAG.getAllOnesConstant(DL, VT);
6338 SDValue HiBitsMask = DAG.getNode(ISD::SHL, DL, VT, AllBits, N1);
6339 return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), HiBitsMask);
6340 }
6341
6342 // fold (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
6343 // fold (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
6344 // Variant of version done on multiply, except mul by a power of 2 is turned
6345 // into a shift.
6346 if ((N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR) &&
6347 N0.getNode()->hasOneUse() &&
6348 isConstantOrConstantVector(N1, /* No Opaques */ true) &&
6349 isConstantOrConstantVector(N0.getOperand(1), /* No Opaques */ true)) {
6350 SDValue Shl0 = DAG.getNode(ISD::SHL, SDLoc(N0), VT, N0.getOperand(0), N1);
6351 SDValue Shl1 = DAG.getNode(ISD::SHL, SDLoc(N1), VT, N0.getOperand(1), N1);
6352 AddToWorklist(Shl0.getNode());
6353 AddToWorklist(Shl1.getNode());
6354 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, Shl0, Shl1);
6355 }
6356
6357 // fold (shl (mul x, c1), c2) -> (mul x, c1 << c2)
6358 if (N0.getOpcode() == ISD::MUL && N0.getNode()->hasOneUse() &&
6359 isConstantOrConstantVector(N1, /* No Opaques */ true) &&
6360 isConstantOrConstantVector(N0.getOperand(1), /* No Opaques */ true)) {
6361 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(N1), VT, N0.getOperand(1), N1);
6362 if (isConstantOrConstantVector(Shl))
6363 return DAG.getNode(ISD::MUL, SDLoc(N), VT, N0.getOperand(0), Shl);
6364 }
6365
6366 if (N1C && !N1C->isOpaque())
6367 if (SDValue NewSHL = visitShiftByConstant(N, N1C))
6368 return NewSHL;
6369
6370 return SDValue();
6371}
6372
6373SDValue DAGCombiner::visitSRA(SDNode *N) {
6374 SDValue N0 = N->getOperand(0);
6375 SDValue N1 = N->getOperand(1);
6376 EVT VT = N0.getValueType();
6377 unsigned OpSizeInBits = VT.getScalarSizeInBits();
6378
6379 // Arithmetic shifting an all-sign-bit value is a no-op.
6380 // fold (sra 0, x) -> 0
6381 // fold (sra -1, x) -> -1
6382 if (DAG.ComputeNumSignBits(N0) == OpSizeInBits)
6383 return N0;
6384
6385 // fold vector ops
6386 if (VT.isVector())
6387 if (SDValue FoldedVOp = SimplifyVBinOp(N))
6388 return FoldedVOp;
6389
6390 ConstantSDNode *N1C = isConstOrConstSplat(N1);
6391
6392 // fold (sra c1, c2) -> (sra c1, c2)
6393 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
6394 if (N0C && N1C && !N1C->isOpaque())
6395 return DAG.FoldConstantArithmetic(ISD::SRA, SDLoc(N), VT, N0C, N1C);
6396 // fold (sra x, c >= size(x)) -> undef
6397 // NOTE: ALL vector elements must be too big to avoid partial UNDEFs.
6398 auto MatchShiftTooBig = [OpSizeInBits](ConstantSDNode *Val) {
6399 return Val->getAPIntValue().uge(OpSizeInBits);
6400 };
6401 if (ISD::matchUnaryPredicate(N1, MatchShiftTooBig))
6402 return DAG.getUNDEF(VT);
6403 // fold (sra x, 0) -> x
6404 if (N1C && N1C->isNullValue())
6405 return N0;
6406
6407 if (SDValue NewSel = foldBinOpIntoSelect(N))
6408 return NewSel;
6409
6410 // fold (sra (shl x, c1), c1) -> sext_inreg for some c1 and target supports
6411 // sext_inreg.
6412 if (N1C && N0.getOpcode() == ISD::SHL && N1 == N0.getOperand(1)) {
6413 unsigned LowBits = OpSizeInBits - (unsigned)N1C->getZExtValue();
6414 EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), LowBits);
6415 if (VT.isVector())
6416 ExtVT = EVT::getVectorVT(*DAG.getContext(),
6417 ExtVT, VT.getVectorNumElements());
6418 if ((!LegalOperations ||
6419 TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, ExtVT)))
6420 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT,
6421 N0.getOperand(0), DAG.getValueType(ExtVT));
6422 }
6423
6424 // fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
6425 if (N0.getOpcode() == ISD::SRA) {
6426 SDLoc DL(N);
6427 EVT ShiftVT = N1.getValueType();
6428
6429 auto MatchOutOfRange = [OpSizeInBits](ConstantSDNode *LHS,
6430 ConstantSDNode *RHS) {
6431 APInt c1 = LHS->getAPIntValue();
6432 APInt c2 = RHS->getAPIntValue();
6433 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
6434 return (c1 + c2).uge(OpSizeInBits);
6435 };
6436 if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchOutOfRange))
6437 return DAG.getNode(ISD::SRA, DL, VT, N0.getOperand(0),
6438 DAG.getConstant(OpSizeInBits - 1, DL, ShiftVT));
6439
6440 auto MatchInRange = [OpSizeInBits](ConstantSDNode *LHS,
6441 ConstantSDNode *RHS) {
6442 APInt c1 = LHS->getAPIntValue();
6443 APInt c2 = RHS->getAPIntValue();
6444 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
6445 return (c1 + c2).ult(OpSizeInBits);
6446 };
6447 if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchInRange)) {
6448 SDValue Sum = DAG.getNode(ISD::ADD, DL, ShiftVT, N1, N0.getOperand(1));
6449 return DAG.getNode(ISD::SRA, DL, VT, N0.getOperand(0), Sum);
6450 }
6451 }
6452
6453 // fold (sra (shl X, m), (sub result_size, n))
6454 // -> (sign_extend (trunc (shl X, (sub (sub result_size, n), m)))) for
6455 // result_size - n != m.
6456 // If truncate is free for the target sext(shl) is likely to result in better
6457 // code.
6458 if (N0.getOpcode() == ISD::SHL && N1C) {
6459 // Get the two constanst of the shifts, CN0 = m, CN = n.
6460 const ConstantSDNode *N01C = isConstOrConstSplat(N0.getOperand(1));
6461 if (N01C) {
6462 LLVMContext &Ctx = *DAG.getContext();
6463 // Determine what the truncate's result bitsize and type would be.
6464 EVT TruncVT = EVT::getIntegerVT(Ctx, OpSizeInBits - N1C->getZExtValue());
6465
6466 if (VT.isVector())
6467 TruncVT = EVT::getVectorVT(Ctx, TruncVT, VT.getVectorNumElements());
6468
6469 // Determine the residual right-shift amount.
6470 int ShiftAmt = N1C->getZExtValue() - N01C->getZExtValue();
6471
6472 // If the shift is not a no-op (in which case this should be just a sign
6473 // extend already), the truncated to type is legal, sign_extend is legal
6474 // on that type, and the truncate to that type is both legal and free,
6475 // perform the transform.
6476 if ((ShiftAmt > 0) &&
6477 TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND, TruncVT) &&
6478 TLI.isOperationLegalOrCustom(ISD::TRUNCATE, VT) &&
6479 TLI.isTruncateFree(VT, TruncVT)) {
6480 SDLoc DL(N);
6481 SDValue Amt = DAG.getConstant(ShiftAmt, DL,
6482 getShiftAmountTy(N0.getOperand(0).getValueType()));
6483 SDValue Shift = DAG.getNode(ISD::SRL, DL, VT,
6484 N0.getOperand(0), Amt);
6485 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT,
6486 Shift);
6487 return DAG.getNode(ISD::SIGN_EXTEND, DL,
6488 N->getValueType(0), Trunc);
6489 }
6490 }
6491 }
6492
6493 // fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))).
6494 if (N1.getOpcode() == ISD::TRUNCATE &&
6495 N1.getOperand(0).getOpcode() == ISD::AND) {
6496 if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()))
6497 return DAG.getNode(ISD::SRA, SDLoc(N), VT, N0, NewOp1);
6498 }
6499
6500 // fold (sra (trunc (srl x, c1)), c2) -> (trunc (sra x, c1 + c2))
6501 // if c1 is equal to the number of bits the trunc removes
6502 if (N0.getOpcode() == ISD::TRUNCATE &&
6503 (N0.getOperand(0).getOpcode() == ISD::SRL ||
6504 N0.getOperand(0).getOpcode() == ISD::SRA) &&
6505 N0.getOperand(0).hasOneUse() &&
6506 N0.getOperand(0).getOperand(1).hasOneUse() &&
6507 N1C) {
6508 SDValue N0Op0 = N0.getOperand(0);
6509 if (ConstantSDNode *LargeShift = isConstOrConstSplat(N0Op0.getOperand(1))) {
6510 unsigned LargeShiftVal = LargeShift->getZExtValue();
6511 EVT LargeVT = N0Op0.getValueType();
6512
6513 if (LargeVT.getScalarSizeInBits() - OpSizeInBits == LargeShiftVal) {
6514 SDLoc DL(N);
6515 SDValue Amt =
6516 DAG.getConstant(LargeShiftVal + N1C->getZExtValue(), DL,
6517 getShiftAmountTy(N0Op0.getOperand(0).getValueType()));
6518 SDValue SRA = DAG.getNode(ISD::SRA, DL, LargeVT,
6519 N0Op0.getOperand(0), Amt);
6520 return DAG.getNode(ISD::TRUNCATE, DL, VT, SRA);
6521 }
6522 }
6523 }
6524
6525 // Simplify, based on bits shifted out of the LHS.
6526 if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
6527 return SDValue(N, 0);
6528
6529 // If the sign bit is known to be zero, switch this to a SRL.
6530 if (DAG.SignBitIsZero(N0))
6531 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, N1);
6532
6533 if (N1C && !N1C->isOpaque())
6534 if (SDValue NewSRA = visitShiftByConstant(N, N1C))
6535 return NewSRA;
6536
6537 return SDValue();
6538}
6539
6540SDValue DAGCombiner::visitSRL(SDNode *N) {
6541 SDValue N0 = N->getOperand(0);
6542 SDValue N1 = N->getOperand(1);
6543 EVT VT = N0.getValueType();
6544 unsigned OpSizeInBits = VT.getScalarSizeInBits();
6545
6546 // fold vector ops
6547 if (VT.isVector())
6548 if (SDValue FoldedVOp = SimplifyVBinOp(N))
6549 return FoldedVOp;
6550
6551 ConstantSDNode *N1C = isConstOrConstSplat(N1);
6552
6553 // fold (srl c1, c2) -> c1 >>u c2
6554 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
6555 if (N0C && N1C && !N1C->isOpaque())
6556 return DAG.FoldConstantArithmetic(ISD::SRL, SDLoc(N), VT, N0C, N1C);
6557 // fold (srl 0, x) -> 0
6558 if (isNullConstantOrNullSplatConstant(N0))
6559 return N0;
6560 // fold (srl x, c >= size(x)) -> undef
6561 // NOTE: ALL vector elements must be too big to avoid partial UNDEFs.
6562 auto MatchShiftTooBig = [OpSizeInBits](ConstantSDNode *Val) {
6563 return Val->getAPIntValue().uge(OpSizeInBits);
6564 };
6565 if (ISD::matchUnaryPredicate(N1, MatchShiftTooBig))
6566 return DAG.getUNDEF(VT);
6567 // fold (srl x, 0) -> x
6568 if (N1C && N1C->isNullValue())
6569 return N0;
6570
6571 if (SDValue NewSel = foldBinOpIntoSelect(N))
6572 return NewSel;
6573
6574 // if (srl x, c) is known to be zero, return 0
6575 if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0),
6576 APInt::getAllOnesValue(OpSizeInBits)))
6577 return DAG.getConstant(0, SDLoc(N), VT);
6578
6579 // fold (srl (srl x, c1), c2) -> 0 or (srl x, (add c1, c2))
6580 if (N0.getOpcode() == ISD::SRL) {
6581 auto MatchOutOfRange = [OpSizeInBits](ConstantSDNode *LHS,
6582 ConstantSDNode *RHS) {
6583 APInt c1 = LHS->getAPIntValue();
6584 APInt c2 = RHS->getAPIntValue();
6585 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
6586 return (c1 + c2).uge(OpSizeInBits);
6587 };
6588 if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchOutOfRange))
6589 return DAG.getConstant(0, SDLoc(N), VT);
6590
6591 auto MatchInRange = [OpSizeInBits](ConstantSDNode *LHS,
6592 ConstantSDNode *RHS) {
6593 APInt c1 = LHS->getAPIntValue();
6594 APInt c2 = RHS->getAPIntValue();
6595 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
6596 return (c1 + c2).ult(OpSizeInBits);
6597 };
6598 if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchInRange)) {
6599 SDLoc DL(N);
6600 EVT ShiftVT = N1.getValueType();
6601 SDValue Sum = DAG.getNode(ISD::ADD, DL, ShiftVT, N1, N0.getOperand(1));
6602 return DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), Sum);
6603 }
6604 }
6605
6606 // fold (srl (trunc (srl x, c1)), c2) -> 0 or (trunc (srl x, (add c1, c2)))
6607 if (N1C && N0.getOpcode() == ISD::TRUNCATE &&
6608 N0.getOperand(0).getOpcode() == ISD::SRL) {
6609 if (auto N001C = isConstOrConstSplat(N0.getOperand(0).getOperand(1))) {
6610 uint64_t c1 = N001C->getZExtValue();
6611 uint64_t c2 = N1C->getZExtValue();
6612 EVT InnerShiftVT = N0.getOperand(0).getValueType();
6613 EVT ShiftCountVT = N0.getOperand(0).getOperand(1).getValueType();
6614 uint64_t InnerShiftSize = InnerShiftVT.getScalarSizeInBits();
6615 // This is only valid if the OpSizeInBits + c1 = size of inner shift.
6616 if (c1 + OpSizeInBits == InnerShiftSize) {
6617 SDLoc DL(N0);
6618 if (c1 + c2 >= InnerShiftSize)
6619 return DAG.getConstant(0, DL, VT);
6620 return DAG.getNode(ISD::TRUNCATE, DL, VT,
6621 DAG.getNode(ISD::SRL, DL, InnerShiftVT,
6622 N0.getOperand(0).getOperand(0),
6623 DAG.getConstant(c1 + c2, DL,
6624 ShiftCountVT)));
6625 }
6626 }
6627 }
6628
6629 // fold (srl (shl x, c), c) -> (and x, cst2)
6630 if (N0.getOpcode() == ISD::SHL && N0.getOperand(1) == N1 &&
6631 isConstantOrConstantVector(N1, /* NoOpaques */ true)) {
6632 SDLoc DL(N);
6633 SDValue Mask =
6634 DAG.getNode(ISD::SRL, DL, VT, DAG.getAllOnesConstant(DL, VT), N1);
6635 AddToWorklist(Mask.getNode());
6636 return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), Mask);
6637 }
6638
6639 // fold (srl (anyextend x), c) -> (and (anyextend (srl x, c)), mask)
6640 if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
6641 // Shifting in all undef bits?
6642 EVT SmallVT = N0.getOperand(0).getValueType();
6643 unsigned BitSize = SmallVT.getScalarSizeInBits();
6644 if (N1C->getZExtValue() >= BitSize)
6645 return DAG.getUNDEF(VT);
6646
6647 if (!LegalTypes || TLI.isTypeDesirableForOp(ISD::SRL, SmallVT)) {
6648 uint64_t ShiftAmt = N1C->getZExtValue();
6649 SDLoc DL0(N0);
6650 SDValue SmallShift = DAG.getNode(ISD::SRL, DL0, SmallVT,
6651 N0.getOperand(0),
6652 DAG.getConstant(ShiftAmt, DL0,
6653 getShiftAmountTy(SmallVT)));
6654 AddToWorklist(SmallShift.getNode());
6655 APInt Mask = APInt::getLowBitsSet(OpSizeInBits, OpSizeInBits - ShiftAmt);
6656 SDLoc DL(N);
6657 return DAG.getNode(ISD::AND, DL, VT,
6658 DAG.getNode(ISD::ANY_EXTEND, DL, VT, SmallShift),
6659 DAG.getConstant(Mask, DL, VT));
6660 }
6661 }
6662
6663 // fold (srl (sra X, Y), 31) -> (srl X, 31). This srl only looks at the sign
6664 // bit, which is unmodified by sra.
6665 if (N1C && N1C->getZExtValue() + 1 == OpSizeInBits) {
6666 if (N0.getOpcode() == ISD::SRA)
6667 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0.getOperand(0), N1);
6668 }
6669
6670 // fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit).
6671 if (N1C && N0.getOpcode() == ISD::CTLZ &&
6672 N1C->getAPIntValue() == Log2_32(OpSizeInBits)) {
6673 KnownBits Known;
6674 DAG.computeKnownBits(N0.getOperand(0), Known);
6675
6676 // If any of the input bits are KnownOne, then the input couldn't be all
6677 // zeros, thus the result of the srl will always be zero.
6678 if (Known.One.getBoolValue()) return DAG.getConstant(0, SDLoc(N0), VT);
6679
6680 // If all of the bits input the to ctlz node are known to be zero, then
6681 // the result of the ctlz is "32" and the result of the shift is one.
6682 APInt UnknownBits = ~Known.Zero;
6683 if (UnknownBits == 0) return DAG.getConstant(1, SDLoc(N0), VT);
6684
6685 // Otherwise, check to see if there is exactly one bit input to the ctlz.
6686 if (UnknownBits.isPowerOf2()) {
6687 // Okay, we know that only that the single bit specified by UnknownBits
6688 // could be set on input to the CTLZ node. If this bit is set, the SRL
6689 // will return 0, if it is clear, it returns 1. Change the CTLZ/SRL pair
6690 // to an SRL/XOR pair, which is likely to simplify more.
6691 unsigned ShAmt = UnknownBits.countTrailingZeros();
6692 SDValue Op = N0.getOperand(0);
6693
6694 if (ShAmt) {
6695 SDLoc DL(N0);
6696 Op = DAG.getNode(ISD::SRL, DL, VT, Op,
6697 DAG.getConstant(ShAmt, DL,
6698 getShiftAmountTy(Op.getValueType())));
6699 AddToWorklist(Op.getNode());
6700 }
6701
6702 SDLoc DL(N);
6703 return DAG.getNode(ISD::XOR, DL, VT,
6704 Op, DAG.getConstant(1, DL, VT));
6705 }
6706 }
6707
6708 // fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))).
6709 if (N1.getOpcode() == ISD::TRUNCATE &&
6710 N1.getOperand(0).getOpcode() == ISD::AND) {
6711 if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()))
6712 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, NewOp1);
6713 }
6714
6715 // fold operands of srl based on knowledge that the low bits are not
6716 // demanded.
6717 if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
6718 return SDValue(N, 0);
6719
6720 if (N1C && !N1C->isOpaque())
6721 if (SDValue NewSRL = visitShiftByConstant(N, N1C))
6722 return NewSRL;
6723
6724 // Attempt to convert a srl of a load into a narrower zero-extending load.
6725 if (SDValue NarrowLoad = ReduceLoadWidth(N))
6726 return NarrowLoad;
6727
6728 // Here is a common situation. We want to optimize:
6729 //
6730 // %a = ...
6731 // %b = and i32 %a, 2
6732 // %c = srl i32 %b, 1
6733 // brcond i32 %c ...
6734 //
6735 // into
6736 //
6737 // %a = ...
6738 // %b = and %a, 2
6739 // %c = setcc eq %b, 0
6740 // brcond %c ...
6741 //
6742 // However when after the source operand of SRL is optimized into AND, the SRL
6743 // itself may not be optimized further. Look for it and add the BRCOND into
6744 // the worklist.
6745 if (N->hasOneUse()) {
6746 SDNode *Use = *N->use_begin();
6747 if (Use->getOpcode() == ISD::BRCOND)
6748 AddToWorklist(Use);
6749 else if (Use->getOpcode() == ISD::TRUNCATE && Use->hasOneUse()) {
6750 // Also look pass the truncate.
6751 Use = *Use->use_begin();
6752 if (Use->getOpcode() == ISD::BRCOND)
6753 AddToWorklist(Use);
6754 }
6755 }
6756
6757 return SDValue();
6758}
6759
6760SDValue DAGCombiner::visitABS(SDNode *N) {
6761 SDValue N0 = N->getOperand(0);
6762 EVT VT = N->getValueType(0);
6763
6764 // fold (abs c1) -> c2
6765 if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
6766 return DAG.getNode(ISD::ABS, SDLoc(N), VT, N0);
6767 // fold (abs (abs x)) -> (abs x)
6768 if (N0.getOpcode() == ISD::ABS)
6769 return N0;
6770 // fold (abs x) -> x iff not-negative
6771 if (DAG.SignBitIsZero(N0))
6772 return N0;
6773 return SDValue();
6774}
6775
6776SDValue DAGCombiner::visitBSWAP(SDNode *N) {
6777 SDValue N0 = N->getOperand(0);
6778 EVT VT = N->getValueType(0);
6779
6780 // fold (bswap c1) -> c2
6781 if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
6782 return DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N0);
6783 // fold (bswap (bswap x)) -> x
6784 if (N0.getOpcode() == ISD::BSWAP)
6785 return N0->getOperand(0);
6786 return SDValue();
6787}
6788
6789SDValue DAGCombiner::visitBITREVERSE(SDNode *N) {
6790 SDValue N0 = N->getOperand(0);
6791 EVT VT = N->getValueType(0);
6792
6793 // fold (bitreverse c1) -> c2
6794 if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
6795 return DAG.getNode(ISD::BITREVERSE, SDLoc(N), VT, N0);
6796 // fold (bitreverse (bitreverse x)) -> x
6797 if (N0.getOpcode() == ISD::BITREVERSE)
6798 return N0.getOperand(0);
6799 return SDValue();
6800}
6801
6802SDValue DAGCombiner::visitCTLZ(SDNode *N) {
6803 SDValue N0 = N->getOperand(0);
6804 EVT VT = N->getValueType(0);
6805
6806 // fold (ctlz c1) -> c2
6807 if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
6808 return DAG.getNode(ISD::CTLZ, SDLoc(N), VT, N0);
6809
6810 // If the value is known never to be zero, switch to the undef version.
6811 if (!LegalOperations || TLI.isOperationLegal(ISD::CTLZ_ZERO_UNDEF, VT)) {
6812 if (DAG.isKnownNeverZero(N0))
6813 return DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SDLoc(N), VT, N0);
6814 }
6815
6816 return SDValue();
6817}
6818
6819SDValue DAGCombiner::visitCTLZ_ZERO_UNDEF(SDNode *N) {
6820 SDValue N0 = N->getOperand(0);
6821 EVT VT = N->getValueType(0);
6822
6823 // fold (ctlz_zero_undef c1) -> c2
6824 if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
6825 return DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SDLoc(N), VT, N0);
6826 return SDValue();
6827}
6828
6829SDValue DAGCombiner::visitCTTZ(SDNode *N) {
6830 SDValue N0 = N->getOperand(0);
6831 EVT VT = N->getValueType(0);
6832
6833 // fold (cttz c1) -> c2
6834 if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
6835 return DAG.getNode(ISD::CTTZ, SDLoc(N), VT, N0);
6836
6837 // If the value is known never to be zero, switch to the undef version.
6838 if (!LegalOperations || TLI.isOperationLegal(ISD::CTTZ_ZERO_UNDEF, VT)) {
6839 if (DAG.isKnownNeverZero(N0))
6840 return DAG.getNode(ISD::CTTZ_ZERO_UNDEF, SDLoc(N), VT, N0);
6841 }
6842
6843 return SDValue();
6844}
6845
6846SDValue DAGCombiner::visitCTTZ_ZERO_UNDEF(SDNode *N) {
6847 SDValue N0 = N->getOperand(0);
6848 EVT VT = N->getValueType(0);
6849
6850 // fold (cttz_zero_undef c1) -> c2
6851 if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
6852 return DAG.getNode(ISD::CTTZ_ZERO_UNDEF, SDLoc(N), VT, N0);
6853 return SDValue();
6854}
6855
6856SDValue DAGCombiner::visitCTPOP(SDNode *N) {
6857 SDValue N0 = N->getOperand(0);
6858 EVT VT = N->getValueType(0);
6859
6860 // fold (ctpop c1) -> c2
6861 if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
6862 return DAG.getNode(ISD::CTPOP, SDLoc(N), VT, N0);
6863 return SDValue();
6864}
6865
6866/// Generate Min/Max node
6867static SDValue combineMinNumMaxNum(const SDLoc &DL, EVT VT, SDValue LHS,
6868 SDValue RHS, SDValue True, SDValue False,
6869 ISD::CondCode CC, const TargetLowering &TLI,
6870 SelectionDAG &DAG) {
6871 if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
6872 return SDValue();
6873
6874 switch (CC) {
6875 case ISD::SETOLT:
6876 case ISD::SETOLE:
6877 case ISD::SETLT:
6878 case ISD::SETLE:
6879 case ISD::SETULT:
6880 case ISD::SETULE: {
6881 unsigned Opcode = (LHS == True) ? ISD::FMINNUM : ISD::FMAXNUM;
6882 if (TLI.isOperationLegal(Opcode, VT))
6883 return DAG.getNode(Opcode, DL, VT, LHS, RHS);
6884 return SDValue();
6885 }
6886 case ISD::SETOGT:
6887 case ISD::SETOGE:
6888 case ISD::SETGT:
6889 case ISD::SETGE:
6890 case ISD::SETUGT:
6891 case ISD::SETUGE: {
6892 unsigned Opcode = (LHS == True) ? ISD::FMAXNUM : ISD::FMINNUM;
6893 if (TLI.isOperationLegal(Opcode, VT))
6894 return DAG.getNode(Opcode, DL, VT, LHS, RHS);
6895 return SDValue();
6896 }
6897 default:
6898 return SDValue();
6899 }
6900}
6901
6902SDValue DAGCombiner::foldSelectOfConstants(SDNode *N) {
6903 SDValue Cond = N->getOperand(0);
6904 SDValue N1 = N->getOperand(1);
6905 SDValue N2 = N->getOperand(2);
6906 EVT VT = N->getValueType(0);
6907 EVT CondVT = Cond.getValueType();
6908 SDLoc DL(N);
6909
6910 if (!VT.isInteger())
6911 return SDValue();
6912
6913 auto *C1 = dyn_cast<ConstantSDNode>(N1);
6914 auto *C2 = dyn_cast<ConstantSDNode>(N2);
6915 if (!C1 || !C2)
6916 return SDValue();
6917
6918 // Only do this before legalization to avoid conflicting with target-specific
6919 // transforms in the other direction (create a select from a zext/sext). There
6920 // is also a target-independent combine here in DAGCombiner in the other
6921 // direction for (select Cond, -1, 0) when the condition is not i1.
6922 if (CondVT == MVT::i1 && !LegalOperations) {
6923 if (C1->isNullValue() && C2->isOne()) {
6924 // select Cond, 0, 1 --> zext (!Cond)
6925 SDValue NotCond = DAG.getNOT(DL, Cond, MVT::i1);
6926 if (VT != MVT::i1)
6927 NotCond = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, NotCond);
6928 return NotCond;
6929 }
6930 if (C1->isNullValue() && C2->isAllOnesValue()) {
6931 // select Cond, 0, -1 --> sext (!Cond)
6932 SDValue NotCond = DAG.getNOT(DL, Cond, MVT::i1);
6933 if (VT != MVT::i1)
6934 NotCond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, NotCond);
6935 return NotCond;
6936 }
6937 if (C1->isOne() && C2->isNullValue()) {
6938 // select Cond, 1, 0 --> zext (Cond)
6939 if (VT != MVT::i1)
6940 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
6941 return Cond;
6942 }
6943 if (C1->isAllOnesValue() && C2->isNullValue()) {
6944 // select Cond, -1, 0 --> sext (Cond)
6945 if (VT != MVT::i1)
6946 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
6947 return Cond;
6948 }
6949
6950 // For any constants that differ by 1, we can transform the select into an
6951 // extend and add. Use a target hook because some targets may prefer to
6952 // transform in the other direction.
6953 if (TLI.convertSelectOfConstantsToMath(VT)) {
6954 if (C1->getAPIntValue() - 1 == C2->getAPIntValue()) {
6955 // select Cond, C1, C1-1 --> add (zext Cond), C1-1
6956 if (VT != MVT::i1)
6957 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
6958 return DAG.getNode(ISD::ADD, DL, VT, Cond, N2);
6959 }
6960 if (C1->getAPIntValue() + 1 == C2->getAPIntValue()) {
6961 // select Cond, C1, C1+1 --> add (sext Cond), C1+1
6962 if (VT != MVT::i1)
6963 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
6964 return DAG.getNode(ISD::ADD, DL, VT, Cond, N2);
6965 }
6966 }
6967
6968 return SDValue();
6969 }
6970
6971 // fold (select Cond, 0, 1) -> (xor Cond, 1)
6972 // We can't do this reliably if integer based booleans have different contents
6973 // to floating point based booleans. This is because we can't tell whether we
6974 // have an integer-based boolean or a floating-point-based boolean unless we
6975 // can find the SETCC that produced it and inspect its operands. This is
6976 // fairly easy if C is the SETCC node, but it can potentially be
6977 // undiscoverable (or not reasonably discoverable). For example, it could be
6978 // in another basic block or it could require searching a complicated
6979 // expression.
6980 if (CondVT.isInteger() &&
6981 TLI.getBooleanContents(/*isVec*/false, /*isFloat*/true) ==
6982 TargetLowering::ZeroOrOneBooleanContent &&
6983 TLI.getBooleanContents(/*isVec*/false, /*isFloat*/false) ==
6984 TargetLowering::ZeroOrOneBooleanContent &&
6985 C1->isNullValue() && C2->isOne()) {
6986 SDValue NotCond =
6987 DAG.getNode(ISD::XOR, DL, CondVT, Cond, DAG.getConstant(1, DL, CondVT));
6988 if (VT.bitsEq(CondVT))
6989 return NotCond;
6990 return DAG.getZExtOrTrunc(NotCond, DL, VT);
6991 }
6992
6993 return SDValue();
6994}
6995
6996SDValue DAGCombiner::visitSELECT(SDNode *N) {
6997 SDValue N0 = N->getOperand(0);
6998 SDValue N1 = N->getOperand(1);
6999 SDValue N2 = N->getOperand(2);
7000 EVT VT = N->getValueType(0);
7001 EVT VT0 = N0.getValueType();
7002 SDLoc DL(N);
7003
7004 // fold (select C, X, X) -> X
7005 if (N1 == N2)
7006 return N1;
7007
7008 if (const ConstantSDNode *N0C = dyn_cast<const ConstantSDNode>(N0)) {
7009 // fold (select true, X, Y) -> X
7010 // fold (select false, X, Y) -> Y
7011 return !N0C->isNullValue() ? N1 : N2;
7012 }
7013
7014 // fold (select X, X, Y) -> (or X, Y)
7015 // fold (select X, 1, Y) -> (or C, Y)
7016 if (VT == VT0 && VT == MVT::i1 && (N0 == N1 || isOneConstant(N1)))
7017 return DAG.getNode(ISD::OR, DL, VT, N0, N2);
7018
7019 if (SDValue V = foldSelectOfConstants(N))
7020 return V;
7021
7022 // fold (select C, 0, X) -> (and (not C), X)
7023 if (VT == VT0 && VT == MVT::i1 && isNullConstant(N1)) {
7024 SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT);
7025 AddToWorklist(NOTNode.getNode());
7026 return DAG.getNode(ISD::AND, DL, VT, NOTNode, N2);
7027 }
7028 // fold (select C, X, 1) -> (or (not C), X)
7029 if (VT == VT0 && VT == MVT::i1 && isOneConstant(N2)) {
7030 SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT);
7031 AddToWorklist(NOTNode.getNode());
7032 return DAG.getNode(ISD::OR, DL, VT, NOTNode, N1);
7033 }
7034 // fold (select X, Y, X) -> (and X, Y)
7035 // fold (select X, Y, 0) -> (and X, Y)
7036 if (VT == VT0 && VT == MVT::i1 && (N0 == N2 || isNullConstant(N2)))
7037 return DAG.getNode(ISD::AND, DL, VT, N0, N1);
7038
7039 // If we can fold this based on the true/false value, do so.
7040 if (SimplifySelectOps(N, N1, N2))
7041 return SDValue(N, 0); // Don't revisit N.
7042
7043 if (VT0 == MVT::i1) {
7044 // The code in this block deals with the following 2 equivalences:
7045 // select(C0|C1, x, y) <=> select(C0, x, select(C1, x, y))
7046 // select(C0&C1, x, y) <=> select(C0, select(C1, x, y), y)
7047 // The target can specify its preferred form with the
7048 // shouldNormalizeToSelectSequence() callback. However we always transform
7049 // to the right anyway if we find the inner select exists in the DAG anyway
7050 // and we always transform to the left side if we know that we can further
7051 // optimize the combination of the conditions.
7052 bool normalizeToSequence =
7053 TLI.shouldNormalizeToSelectSequence(*DAG.getContext(), VT);
7054 // select (and Cond0, Cond1), X, Y
7055 // -> select Cond0, (select Cond1, X, Y), Y
7056 if (N0->getOpcode() == ISD::AND && N0->hasOneUse()) {
7057 SDValue Cond0 = N0->getOperand(0);
7058 SDValue Cond1 = N0->getOperand(1);
7059 SDValue InnerSelect =
7060 DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Cond1, N1, N2);
7061 if (normalizeToSequence || !InnerSelect.use_empty())
7062 return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Cond0,
7063 InnerSelect, N2);
7064 }
7065 // select (or Cond0, Cond1), X, Y -> select Cond0, X, (select Cond1, X, Y)
7066 if (N0->getOpcode() == ISD::OR && N0->hasOneUse()) {
7067 SDValue Cond0 = N0->getOperand(0);
7068 SDValue Cond1 = N0->getOperand(1);
7069 SDValue InnerSelect =
7070 DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Cond1, N1, N2);
7071 if (normalizeToSequence || !InnerSelect.use_empty())
7072 return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Cond0, N1,
7073 InnerSelect);
7074 }
7075
7076 // select Cond0, (select Cond1, X, Y), Y -> select (and Cond0, Cond1), X, Y
7077 if (N1->getOpcode() == ISD::SELECT && N1->hasOneUse()) {
7078 SDValue N1_0 = N1->getOperand(0);
7079 SDValue N1_1 = N1->getOperand(1);
7080 SDValue N1_2 = N1->getOperand(2);
7081 if (N1_2 == N2 && N0.getValueType() == N1_0.getValueType()) {
7082 // Create the actual and node if we can generate good code for it.
7083 if (!normalizeToSequence) {
7084 SDValue And = DAG.getNode(ISD::AND, DL, N0.getValueType(), N0, N1_0);
7085 return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), And, N1_1, N2);
7086 }
7087 // Otherwise see if we can optimize the "and" to a better pattern.
7088 if (SDValue Combined = visitANDLike(N0, N1_0, N))
7089 return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Combined, N1_1,
7090 N2);
7091 }
7092 }
7093 // select Cond0, X, (select Cond1, X, Y) -> select (or Cond0, Cond1), X, Y
7094 if (N2->getOpcode() == ISD::SELECT && N2->hasOneUse()) {
7095 SDValue N2_0 = N2->getOperand(0);
7096 SDValue N2_1 = N2->getOperand(1);
7097 SDValue N2_2 = N2->getOperand(2);
7098 if (N2_1 == N1 && N0.getValueType() == N2_0.getValueType()) {
7099 // Create the actual or node if we can generate good code for it.
7100 if (!normalizeToSequence) {
7101 SDValue Or = DAG.getNode(ISD::OR, DL, N0.getValueType(), N0, N2_0);
7102 return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Or, N1, N2_2);
7103 }
7104 // Otherwise see if we can optimize to a better pattern.
7105 if (SDValue Combined = visitORLike(N0, N2_0, N))
7106 return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Combined, N1,
7107 N2_2);
7108 }
7109 }
7110 }
7111
7112 if (VT0 == MVT::i1) {
7113 // select (not Cond), N1, N2 -> select Cond, N2, N1
7114 if (isBitwiseNot(N0))
7115 return DAG.getNode(ISD::SELECT, DL, VT, N0->getOperand(0), N2, N1);
7116 }
7117
7118 // fold selects based on a setcc into other things, such as min/max/abs
7119 if (N0.getOpcode() == ISD::SETCC) {
7120 // select x, y (fcmp lt x, y) -> fminnum x, y
7121 // select x, y (fcmp gt x, y) -> fmaxnum x, y
7122 //
7123 // This is OK if we don't care about what happens if either operand is a
7124 // NaN.
7125 //
7126
7127 // FIXME: Instead of testing for UnsafeFPMath, this should be checking for
7128 // no signed zeros as well as no nans.
7129 const TargetOptions &Options = DAG.getTarget().Options;
7130 if (Options.UnsafeFPMath && VT.isFloatingPoint() && N0.hasOneUse() &&
7131 DAG.isKnownNeverNaN(N1) && DAG.isKnownNeverNaN(N2)) {
7132 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
7133
7134 if (SDValue FMinMax = combineMinNumMaxNum(
7135 DL, VT, N0.getOperand(0), N0.getOperand(1), N1, N2, CC, TLI, DAG))
7136 return FMinMax;
7137 }
7138
7139 if ((!LegalOperations &&
7140 TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT)) ||
7141 TLI.isOperationLegal(ISD::SELECT_CC, VT))
7142 return DAG.getNode(ISD::SELECT_CC, DL, VT, N0.getOperand(0),
7143 N0.getOperand(1), N1, N2, N0.getOperand(2));
7144 return SimplifySelect(DL, N0, N1, N2);
7145 }
7146
7147 return SDValue();
7148}
7149
7150static
7151std::pair<SDValue, SDValue> SplitVSETCC(const SDNode *N, SelectionDAG &DAG) {
7152 SDLoc DL(N);
7153 EVT LoVT, HiVT;
7154 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
7155
7156 // Split the inputs.
7157 SDValue Lo, Hi, LL, LH, RL, RH;
7158 std::tie(LL, LH) = DAG.SplitVectorOperand(N, 0);
7159 std::tie(RL, RH) = DAG.SplitVectorOperand(N, 1);
7160
7161 Lo = DAG.getNode(N->getOpcode(), DL, LoVT, LL, RL, N->getOperand(2));
7162 Hi = DAG.getNode(N->getOpcode(), DL, HiVT, LH, RH, N->getOperand(2));
7163
7164 return std::make_pair(Lo, Hi);
7165}
7166
7167// This function assumes all the vselect's arguments are CONCAT_VECTOR
7168// nodes and that the condition is a BV of ConstantSDNodes (or undefs).
7169static SDValue ConvertSelectToConcatVector(SDNode *N, SelectionDAG &DAG) {
7170 SDLoc DL(N);
7171 SDValue Cond = N->getOperand(0);
7172 SDValue LHS = N->getOperand(1);
7173 SDValue RHS = N->getOperand(2);
7174 EVT VT = N->getValueType(0);
7175 int NumElems = VT.getVectorNumElements();
7176 assert(LHS.getOpcode() == ISD::CONCAT_VECTORS &&(static_cast <bool> (LHS.getOpcode() == ISD::CONCAT_VECTORS
&& RHS.getOpcode() == ISD::CONCAT_VECTORS &&
Cond.getOpcode() == ISD::BUILD_VECTOR) ? void (0) : __assert_fail
("LHS.getOpcode() == ISD::CONCAT_VECTORS && RHS.getOpcode() == ISD::CONCAT_VECTORS && Cond.getOpcode() == ISD::BUILD_VECTOR"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7178, __extension__ __PRETTY_FUNCTION__))
7177 RHS.getOpcode() == ISD::CONCAT_VECTORS &&(static_cast <bool> (LHS.getOpcode() == ISD::CONCAT_VECTORS
&& RHS.getOpcode() == ISD::CONCAT_VECTORS &&
Cond.getOpcode() == ISD::BUILD_VECTOR) ? void (0) : __assert_fail
("LHS.getOpcode() == ISD::CONCAT_VECTORS && RHS.getOpcode() == ISD::CONCAT_VECTORS && Cond.getOpcode() == ISD::BUILD_VECTOR"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7178, __extension__ __PRETTY_FUNCTION__))
7178 Cond.getOpcode() == ISD::BUILD_VECTOR)(static_cast <bool> (LHS.getOpcode() == ISD::CONCAT_VECTORS
&& RHS.getOpcode() == ISD::CONCAT_VECTORS &&
Cond.getOpcode() == ISD::BUILD_VECTOR) ? void (0) : __assert_fail
("LHS.getOpcode() == ISD::CONCAT_VECTORS && RHS.getOpcode() == ISD::CONCAT_VECTORS && Cond.getOpcode() == ISD::BUILD_VECTOR"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7178, __extension__ __PRETTY_FUNCTION__))
;
7179
7180 // CONCAT_VECTOR can take an arbitrary number of arguments. We only care about
7181 // binary ones here.
7182 if (LHS->getNumOperands() != 2 || RHS->getNumOperands() != 2)
7183 return SDValue();
7184
7185 // We're sure we have an even number of elements due to the
7186 // concat_vectors we have as arguments to vselect.
7187 // Skip BV elements until we find one that's not an UNDEF
7188 // After we find an UNDEF element, keep looping until we get to half the
7189 // length of the BV and see if all the non-undef nodes are the same.
7190 ConstantSDNode *BottomHalf = nullptr;
7191 for (int i = 0; i < NumElems / 2; ++i) {
7192 if (Cond->getOperand(i)->isUndef())
7193 continue;
7194
7195 if (BottomHalf == nullptr)
7196 BottomHalf = cast<ConstantSDNode>(Cond.getOperand(i));
7197 else if (Cond->getOperand(i).getNode() != BottomHalf)
7198 return SDValue();
7199 }
7200
7201 // Do the same for the second half of the BuildVector
7202 ConstantSDNode *TopHalf = nullptr;
7203 for (int i = NumElems / 2; i < NumElems; ++i) {
7204 if (Cond->getOperand(i)->isUndef())
7205 continue;
7206
7207 if (TopHalf == nullptr)
7208 TopHalf = cast<ConstantSDNode>(Cond.getOperand(i));
7209 else if (Cond->getOperand(i).getNode() != TopHalf)
7210 return SDValue();
7211 }
7212
7213 assert(TopHalf && BottomHalf &&(static_cast <bool> (TopHalf && BottomHalf &&
"One half of the selector was all UNDEFs and the other was all the "
"same value. This should have been addressed before this function."
) ? void (0) : __assert_fail ("TopHalf && BottomHalf && \"One half of the selector was all UNDEFs and the other was all the \" \"same value. This should have been addressed before this function.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7215, __extension__ __PRETTY_FUNCTION__))
7214 "One half of the selector was all UNDEFs and the other was all the "(static_cast <bool> (TopHalf && BottomHalf &&
"One half of the selector was all UNDEFs and the other was all the "
"same value. This should have been addressed before this function."
) ? void (0) : __assert_fail ("TopHalf && BottomHalf && \"One half of the selector was all UNDEFs and the other was all the \" \"same value. This should have been addressed before this function.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7215, __extension__ __PRETTY_FUNCTION__))
7215 "same value. This should have been addressed before this function.")(static_cast <bool> (TopHalf && BottomHalf &&
"One half of the selector was all UNDEFs and the other was all the "
"same value. This should have been addressed before this function."
) ? void (0) : __assert_fail ("TopHalf && BottomHalf && \"One half of the selector was all UNDEFs and the other was all the \" \"same value. This should have been addressed before this function.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7215, __extension__ __PRETTY_FUNCTION__))
;
7216 return DAG.getNode(
7217 ISD::CONCAT_VECTORS, DL, VT,
7218 BottomHalf->isNullValue() ? RHS->getOperand(0) : LHS->getOperand(0),
7219 TopHalf->isNullValue() ? RHS->getOperand(1) : LHS->getOperand(1));
7220}
7221
7222SDValue DAGCombiner::visitMSCATTER(SDNode *N) {
7223 if (Level >= AfterLegalizeTypes)
7224 return SDValue();
7225
7226 MaskedScatterSDNode *MSC = cast<MaskedScatterSDNode>(N);
7227 SDValue Mask = MSC->getMask();
7228 SDValue Data = MSC->getValue();
7229 SDLoc DL(N);
7230
7231 // If the MSCATTER data type requires splitting and the mask is provided by a
7232 // SETCC, then split both nodes and its operands before legalization. This
7233 // prevents the type legalizer from unrolling SETCC into scalar comparisons
7234 // and enables future optimizations (e.g. min/max pattern matching on X86).
7235 if (Mask.getOpcode() != ISD::SETCC)
7236 return SDValue();
7237
7238 // Check if any splitting is required.
7239 if (TLI.getTypeAction(*DAG.getContext(), Data.getValueType()) !=
7240 TargetLowering::TypeSplitVector)
7241 return SDValue();
7242 SDValue MaskLo, MaskHi, Lo, Hi;
7243 std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG);
7244
7245 EVT LoVT, HiVT;
7246 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(MSC->getValueType(0));
7247
7248 SDValue Chain = MSC->getChain();
7249
7250 EVT MemoryVT = MSC->getMemoryVT();
7251 unsigned Alignment = MSC->getOriginalAlignment();
7252
7253 EVT LoMemVT, HiMemVT;
7254 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
7255
7256 SDValue DataLo, DataHi;
7257 std::tie(DataLo, DataHi) = DAG.SplitVector(Data, DL);
7258
7259 SDValue Scale = MSC->getScale();
7260 SDValue BasePtr = MSC->getBasePtr();
7261 SDValue IndexLo, IndexHi;
7262 std::tie(IndexLo, IndexHi) = DAG.SplitVector(MSC->getIndex(), DL);
7263
7264 MachineMemOperand *MMO = DAG.getMachineFunction().
7265 getMachineMemOperand(MSC->getPointerInfo(),
7266 MachineMemOperand::MOStore, LoMemVT.getStoreSize(),
7267 Alignment, MSC->getAAInfo(), MSC->getRanges());
7268
7269 SDValue OpsLo[] = { Chain, DataLo, MaskLo, BasePtr, IndexLo, Scale };
7270 Lo = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), DataLo.getValueType(),
7271 DL, OpsLo, MMO);
7272
7273 SDValue OpsHi[] = { Chain, DataHi, MaskHi, BasePtr, IndexHi, Scale };
7274 Hi = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), DataHi.getValueType(),
7275 DL, OpsHi, MMO);
7276
7277 AddToWorklist(Lo.getNode());
7278 AddToWorklist(Hi.getNode());
7279
7280 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
7281}
7282
7283SDValue DAGCombiner::visitMSTORE(SDNode *N) {
7284 if (Level >= AfterLegalizeTypes)
7285 return SDValue();
7286
7287 MaskedStoreSDNode *MST = dyn_cast<MaskedStoreSDNode>(N);
7288 SDValue Mask = MST->getMask();
7289 SDValue Data = MST->getValue();
7290 EVT VT = Data.getValueType();
7291 SDLoc DL(N);
7292
7293 // If the MSTORE data type requires splitting and the mask is provided by a
7294 // SETCC, then split both nodes and its operands before legalization. This
7295 // prevents the type legalizer from unrolling SETCC into scalar comparisons
7296 // and enables future optimizations (e.g. min/max pattern matching on X86).
7297 if (Mask.getOpcode() == ISD::SETCC) {
7298 // Check if any splitting is required.
7299 if (TLI.getTypeAction(*DAG.getContext(), VT) !=
7300 TargetLowering::TypeSplitVector)
7301 return SDValue();
7302
7303 SDValue MaskLo, MaskHi, Lo, Hi;
7304 std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG);
7305
7306 SDValue Chain = MST->getChain();
7307 SDValue Ptr = MST->getBasePtr();
7308
7309 EVT MemoryVT = MST->getMemoryVT();
7310 unsigned Alignment = MST->getOriginalAlignment();
7311
7312 // if Alignment is equal to the vector size,
7313 // take the half of it for the second part
7314 unsigned SecondHalfAlignment =
7315 (Alignment == VT.getSizeInBits() / 8) ? Alignment / 2 : Alignment;
7316
7317 EVT LoMemVT, HiMemVT;
7318 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
7319
7320 SDValue DataLo, DataHi;
7321 std::tie(DataLo, DataHi) = DAG.SplitVector(Data, DL);
7322
7323 MachineMemOperand *MMO = DAG.getMachineFunction().
7324 getMachineMemOperand(MST->getPointerInfo(),
7325 MachineMemOperand::MOStore, LoMemVT.getStoreSize(),
7326 Alignment, MST->getAAInfo(), MST->getRanges());
7327
7328 Lo = DAG.getMaskedStore(Chain, DL, DataLo, Ptr, MaskLo, LoMemVT, MMO,
7329 MST->isTruncatingStore(),
7330 MST->isCompressingStore());
7331
7332 Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, DL, LoMemVT, DAG,
7333 MST->isCompressingStore());
7334 unsigned HiOffset = LoMemVT.getStoreSize();
7335
7336 MMO = DAG.getMachineFunction().getMachineMemOperand(
7337 MST->getPointerInfo().getWithOffset(HiOffset),
7338 MachineMemOperand::MOStore, HiMemVT.getStoreSize(), SecondHalfAlignment,
7339 MST->getAAInfo(), MST->getRanges());
7340
7341 Hi = DAG.getMaskedStore(Chain, DL, DataHi, Ptr, MaskHi, HiMemVT, MMO,
7342 MST->isTruncatingStore(),
7343 MST->isCompressingStore());
7344
7345 AddToWorklist(Lo.getNode());
7346 AddToWorklist(Hi.getNode());
7347
7348 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
7349 }
7350 return SDValue();
7351}
7352
7353SDValue DAGCombiner::visitMGATHER(SDNode *N) {
7354 if (Level >= AfterLegalizeTypes)
7355 return SDValue();
7356
7357 MaskedGatherSDNode *MGT = cast<MaskedGatherSDNode>(N);
7358 SDValue Mask = MGT->getMask();
7359 SDLoc DL(N);
7360
7361 // If the MGATHER result requires splitting and the mask is provided by a
7362 // SETCC, then split both nodes and its operands before legalization. This
7363 // prevents the type legalizer from unrolling SETCC into scalar comparisons
7364 // and enables future optimizations (e.g. min/max pattern matching on X86).
7365
7366 if (Mask.getOpcode() != ISD::SETCC)
7367 return SDValue();
7368
7369 EVT VT = N->getValueType(0);
7370
7371 // Check if any splitting is required.
7372 if (TLI.getTypeAction(*DAG.getContext(), VT) !=
7373 TargetLowering::TypeSplitVector)
7374 return SDValue();
7375
7376 SDValue MaskLo, MaskHi, Lo, Hi;
7377 std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG);
7378
7379 SDValue Src0 = MGT->getValue();
7380 SDValue Src0Lo, Src0Hi;
7381 std::tie(Src0Lo, Src0Hi) = DAG.SplitVector(Src0, DL);
7382
7383 EVT LoVT, HiVT;
7384 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
7385
7386 SDValue Chain = MGT->getChain();
7387 EVT MemoryVT = MGT->getMemoryVT();
7388 unsigned Alignment = MGT->getOriginalAlignment();
7389
7390 EVT LoMemVT, HiMemVT;
7391 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
7392
7393 SDValue Scale = MGT->getScale();
7394 SDValue BasePtr = MGT->getBasePtr();
7395 SDValue Index = MGT->getIndex();
7396 SDValue IndexLo, IndexHi;
7397 std::tie(IndexLo, IndexHi) = DAG.SplitVector(Index, DL);
7398
7399 MachineMemOperand *MMO = DAG.getMachineFunction().
7400 getMachineMemOperand(MGT->getPointerInfo(),
7401 MachineMemOperand::MOLoad, LoMemVT.getStoreSize(),
7402 Alignment, MGT->getAAInfo(), MGT->getRanges());
7403
7404 SDValue OpsLo[] = { Chain, Src0Lo, MaskLo, BasePtr, IndexLo, Scale };
7405 Lo = DAG.getMaskedGather(DAG.getVTList(LoVT, MVT::Other), LoVT, DL, OpsLo,
7406 MMO);
7407
7408 SDValue OpsHi[] = { Chain, Src0Hi, MaskHi, BasePtr, IndexHi, Scale };
7409 Hi = DAG.getMaskedGather(DAG.getVTList(HiVT, MVT::Other), HiVT, DL, OpsHi,
7410 MMO);
7411
7412 AddToWorklist(Lo.getNode());
7413 AddToWorklist(Hi.getNode());
7414
7415 // Build a factor node to remember that this load is independent of the
7416 // other one.
7417 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo.getValue(1),
7418 Hi.getValue(1));
7419
7420 // Legalized the chain result - switch anything that used the old chain to
7421 // use the new one.
7422 DAG.ReplaceAllUsesOfValueWith(SDValue(MGT, 1), Chain);
7423
7424 SDValue GatherRes = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
7425
7426 SDValue RetOps[] = { GatherRes, Chain };
7427 return DAG.getMergeValues(RetOps, DL);
7428}
7429
7430SDValue DAGCombiner::visitMLOAD(SDNode *N) {
7431 if (Level >= AfterLegalizeTypes)
7432 return SDValue();
7433
7434 MaskedLoadSDNode *MLD = dyn_cast<MaskedLoadSDNode>(N);
7435 SDValue Mask = MLD->getMask();
7436 SDLoc DL(N);
7437
7438 // If the MLOAD result requires splitting and the mask is provided by a
7439 // SETCC, then split both nodes and its operands before legalization. This
7440 // prevents the type legalizer from unrolling SETCC into scalar comparisons
7441 // and enables future optimizations (e.g. min/max pattern matching on X86).
7442 if (Mask.getOpcode() == ISD::SETCC) {
7443 EVT VT = N->getValueType(0);
7444
7445 // Check if any splitting is required.
7446 if (TLI.getTypeAction(*DAG.getContext(), VT) !=
7447 TargetLowering::TypeSplitVector)
7448 return SDValue();
7449
7450 SDValue MaskLo, MaskHi, Lo, Hi;
7451 std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG);
7452
7453 SDValue Src0 = MLD->getSrc0();
7454 SDValue Src0Lo, Src0Hi;
7455 std::tie(Src0Lo, Src0Hi) = DAG.SplitVector(Src0, DL);
7456
7457 EVT LoVT, HiVT;
7458 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(MLD->getValueType(0));
7459
7460 SDValue Chain = MLD->getChain();
7461 SDValue Ptr = MLD->getBasePtr();
7462 EVT MemoryVT = MLD->getMemoryVT();
7463 unsigned Alignment = MLD->getOriginalAlignment();
7464
7465 // if Alignment is equal to the vector size,
7466 // take the half of it for the second part
7467 unsigned SecondHalfAlignment =
7468 (Alignment == MLD->getValueType(0).getSizeInBits()/8) ?
7469 Alignment/2 : Alignment;
7470
7471 EVT LoMemVT, HiMemVT;
7472 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT);
7473
7474 MachineMemOperand *MMO = DAG.getMachineFunction().
7475 getMachineMemOperand(MLD->getPointerInfo(),
7476 MachineMemOperand::MOLoad, LoMemVT.getStoreSize(),
7477 Alignment, MLD->getAAInfo(), MLD->getRanges());
7478
7479 Lo = DAG.getMaskedLoad(LoVT, DL, Chain, Ptr, MaskLo, Src0Lo, LoMemVT, MMO,
7480 ISD::NON_EXTLOAD, MLD->isExpandingLoad());
7481
7482 Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, DL, LoMemVT, DAG,
7483 MLD->isExpandingLoad());
7484 unsigned HiOffset = LoMemVT.getStoreSize();
7485
7486 MMO = DAG.getMachineFunction().getMachineMemOperand(
7487 MLD->getPointerInfo().getWithOffset(HiOffset),
7488 MachineMemOperand::MOLoad, HiMemVT.getStoreSize(), SecondHalfAlignment,
7489 MLD->getAAInfo(), MLD->getRanges());
7490
7491 Hi = DAG.getMaskedLoad(HiVT, DL, Chain, Ptr, MaskHi, Src0Hi, HiMemVT, MMO,
7492 ISD::NON_EXTLOAD, MLD->isExpandingLoad());
7493
7494 AddToWorklist(Lo.getNode());
7495 AddToWorklist(Hi.getNode());
7496
7497 // Build a factor node to remember that this load is independent of the
7498 // other one.
7499 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo.getValue(1),
7500 Hi.getValue(1));
7501
7502 // Legalized the chain result - switch anything that used the old chain to
7503 // use the new one.
7504 DAG.ReplaceAllUsesOfValueWith(SDValue(MLD, 1), Chain);
7505
7506 SDValue LoadRes = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
7507
7508 SDValue RetOps[] = { LoadRes, Chain };
7509 return DAG.getMergeValues(RetOps, DL);
7510 }
7511 return SDValue();
7512}
7513
7514/// A vector select of 2 constant vectors can be simplified to math/logic to
7515/// avoid a variable select instruction and possibly avoid constant loads.
7516SDValue DAGCombiner::foldVSelectOfConstants(SDNode *N) {
7517 SDValue Cond = N->getOperand(0);
7518 SDValue N1 = N->getOperand(1);
7519 SDValue N2 = N->getOperand(2);
7520 EVT VT = N->getValueType(0);
7521 if (!Cond.hasOneUse() || Cond.getScalarValueSizeInBits() != 1 ||
7522 !TLI.convertSelectOfConstantsToMath(VT) ||
7523 !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()) ||
7524 !ISD::isBuildVectorOfConstantSDNodes(N2.getNode()))
7525 return SDValue();
7526
7527 // Check if we can use the condition value to increment/decrement a single
7528 // constant value. This simplifies a select to an add and removes a constant
7529 // load/materialization from the general case.
7530 bool AllAddOne = true;
7531 bool AllSubOne = true;
7532 unsigned Elts = VT.getVectorNumElements();
7533 for (unsigned i = 0; i != Elts; ++i) {
7534 SDValue N1Elt = N1.getOperand(i);
7535 SDValue N2Elt = N2.getOperand(i);
7536 if (N1Elt.isUndef() || N2Elt.isUndef())
7537 continue;
7538
7539 const APInt &C1 = cast<ConstantSDNode>(N1Elt)->getAPIntValue();
7540 const APInt &C2 = cast<ConstantSDNode>(N2Elt)->getAPIntValue();
7541 if (C1 != C2 + 1)
7542 AllAddOne = false;
7543 if (C1 != C2 - 1)
7544 AllSubOne = false;
7545 }
7546
7547 // Further simplifications for the extra-special cases where the constants are
7548 // all 0 or all -1 should be implemented as folds of these patterns.
7549 SDLoc DL(N);
7550 if (AllAddOne || AllSubOne) {
7551 // vselect <N x i1> Cond, C+1, C --> add (zext Cond), C
7552 // vselect <N x i1> Cond, C-1, C --> add (sext Cond), C
7553 auto ExtendOpcode = AllAddOne ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
7554 SDValue ExtendedCond = DAG.getNode(ExtendOpcode, DL, VT, Cond);
7555 return DAG.getNode(ISD::ADD, DL, VT, ExtendedCond, N2);
7556 }
7557
7558 // The general case for select-of-constants:
7559 // vselect <N x i1> Cond, C1, C2 --> xor (and (sext Cond), (C1^C2)), C2
7560 // ...but that only makes sense if a vselect is slower than 2 logic ops, so
7561 // leave that to a machine-specific pass.
7562 return SDValue();
7563}
7564
7565SDValue DAGCombiner::visitVSELECT(SDNode *N) {
7566 SDValue N0 = N->getOperand(0);
7567 SDValue N1 = N->getOperand(1);
7568 SDValue N2 = N->getOperand(2);
7569 SDLoc DL(N);
7570
7571 // fold (vselect C, X, X) -> X
7572 if (N1 == N2)
7573 return N1;
7574
7575 // Canonicalize integer abs.
7576 // vselect (setg[te] X, 0), X, -X ->
7577 // vselect (setgt X, -1), X, -X ->
7578 // vselect (setl[te] X, 0), -X, X ->
7579 // Y = sra (X, size(X)-1); xor (add (X, Y), Y)
7580 if (N0.getOpcode() == ISD::SETCC) {
7581 SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
7582 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
7583 bool isAbs = false;
7584 bool RHSIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
7585
7586 if (((RHSIsAllZeros && (CC == ISD::SETGT || CC == ISD::SETGE)) ||
7587 (ISD::isBuildVectorAllOnes(RHS.getNode()) && CC == ISD::SETGT)) &&
7588 N1 == LHS && N2.getOpcode() == ISD::SUB && N1 == N2.getOperand(1))
7589 isAbs = ISD::isBuildVectorAllZeros(N2.getOperand(0).getNode());
7590 else if ((RHSIsAllZeros && (CC == ISD::SETLT || CC == ISD::SETLE)) &&
7591 N2 == LHS && N1.getOpcode() == ISD::SUB && N2 == N1.getOperand(1))
7592 isAbs = ISD::isBuildVectorAllZeros(N1.getOperand(0).getNode());
7593
7594 if (isAbs) {
7595 EVT VT = LHS.getValueType();
7596 if (TLI.isOperationLegalOrCustom(ISD::ABS, VT))
7597 return DAG.getNode(ISD::ABS, DL, VT, LHS);
7598
7599 SDValue Shift = DAG.getNode(
7600 ISD::SRA, DL, VT, LHS,
7601 DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
7602 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, LHS, Shift);
7603 AddToWorklist(Shift.getNode());
7604 AddToWorklist(Add.getNode());
7605 return DAG.getNode(ISD::XOR, DL, VT, Add, Shift);
7606 }
7607
7608 // If this select has a condition (setcc) with narrower operands than the
7609 // select, try to widen the compare to match the select width.
7610 // TODO: This should be extended to handle any constant.
7611 // TODO: This could be extended to handle non-loading patterns, but that
7612 // requires thorough testing to avoid regressions.
7613 if (isNullConstantOrNullSplatConstant(RHS)) {
7614 EVT NarrowVT = LHS.getValueType();
7615 EVT WideVT = N1.getValueType().changeVectorElementTypeToInteger();
7616 EVT SetCCVT = getSetCCResultType(LHS.getValueType());
7617 unsigned SetCCWidth = SetCCVT.getScalarSizeInBits();
7618 unsigned WideWidth = WideVT.getScalarSizeInBits();
7619 bool IsSigned = isSignedIntSetCC(CC);
7620 auto LoadExtOpcode = IsSigned ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
7621 if (LHS.getOpcode() == ISD::LOAD && LHS.hasOneUse() &&
7622 SetCCWidth != 1 && SetCCWidth < WideWidth &&
7623 TLI.isLoadExtLegalOrCustom(LoadExtOpcode, WideVT, NarrowVT) &&
7624 TLI.isOperationLegalOrCustom(ISD::SETCC, WideVT)) {
7625 // Both compare operands can be widened for free. The LHS can use an
7626 // extended load, and the RHS is a constant:
7627 // vselect (ext (setcc load(X), C)), N1, N2 -->
7628 // vselect (setcc extload(X), C'), N1, N2
7629 auto ExtOpcode = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
7630 SDValue WideLHS = DAG.getNode(ExtOpcode, DL, WideVT, LHS);
7631 SDValue WideRHS = DAG.getNode(ExtOpcode, DL, WideVT, RHS);
7632 EVT WideSetCCVT = getSetCCResultType(WideVT);
7633 SDValue WideSetCC = DAG.getSetCC(DL, WideSetCCVT, WideLHS, WideRHS, CC);
7634 return DAG.getSelect(DL, N1.getValueType(), WideSetCC, N1, N2);
7635 }
7636 }
7637 }
7638
7639 if (SimplifySelectOps(N, N1, N2))
7640 return SDValue(N, 0); // Don't revisit N.
7641
7642 // Fold (vselect (build_vector all_ones), N1, N2) -> N1
7643 if (ISD::isBuildVectorAllOnes(N0.getNode()))
7644 return N1;
7645 // Fold (vselect (build_vector all_zeros), N1, N2) -> N2
7646 if (ISD::isBuildVectorAllZeros(N0.getNode()))
7647 return N2;
7648
7649 // The ConvertSelectToConcatVector function is assuming both the above
7650 // checks for (vselect (build_vector all{ones,zeros) ...) have been made
7651 // and addressed.
7652 if (N1.getOpcode() == ISD::CONCAT_VECTORS &&
7653 N2.getOpcode() == ISD::CONCAT_VECTORS &&
7654 ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
7655 if (SDValue CV = ConvertSelectToConcatVector(N, DAG))
7656 return CV;
7657 }
7658
7659 if (SDValue V = foldVSelectOfConstants(N))
7660 return V;
7661
7662 return SDValue();
7663}
7664
7665SDValue DAGCombiner::visitSELECT_CC(SDNode *N) {
7666 SDValue N0 = N->getOperand(0);
7667 SDValue N1 = N->getOperand(1);
7668 SDValue N2 = N->getOperand(2);
7669 SDValue N3 = N->getOperand(3);
7670 SDValue N4 = N->getOperand(4);
7671 ISD::CondCode CC = cast<CondCodeSDNode>(N4)->get();
7672
7673 // fold select_cc lhs, rhs, x, x, cc -> x
7674 if (N2 == N3)
7675 return N2;
7676
7677 // Determine if the condition we're dealing with is constant
7678 if (SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()), N0, N1,
7679 CC, SDLoc(N), false)) {
7680 AddToWorklist(SCC.getNode());
7681
7682 if (ConstantSDNode *SCCC = dyn_cast<ConstantSDNode>(SCC.getNode())) {
7683 if (!SCCC->isNullValue())
7684 return N2; // cond always true -> true val
7685 else
7686 return N3; // cond always false -> false val
7687 } else if (SCC->isUndef()) {
7688 // When the condition is UNDEF, just return the first operand. This is
7689 // coherent the DAG creation, no setcc node is created in this case
7690 return N2;
7691 } else if (SCC.getOpcode() == ISD::SETCC) {
7692 // Fold to a simpler select_cc
7693 return DAG.getNode(ISD::SELECT_CC, SDLoc(N), N2.getValueType(),
7694 SCC.getOperand(0), SCC.getOperand(1), N2, N3,
7695 SCC.getOperand(2));
7696 }
7697 }
7698
7699 // If we can fold this based on the true/false value, do so.
7700 if (SimplifySelectOps(N, N2, N3))
7701 return SDValue(N, 0); // Don't revisit N.
7702
7703 // fold select_cc into other things, such as min/max/abs
7704 return SimplifySelectCC(SDLoc(N), N0, N1, N2, N3, CC);
7705}
7706
7707SDValue DAGCombiner::visitSETCC(SDNode *N) {
7708 // setcc is very commonly used as an argument to brcond. This pattern
7709 // also lend itself to numerous combines and, as a result, it is desired
7710 // we keep the argument to a brcond as a setcc as much as possible.
7711 bool PreferSetCC =
7712 N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BRCOND;
7713
7714 SDValue Combined = SimplifySetCC(
7715 N->getValueType(0), N->getOperand(0), N->getOperand(1),
7716 cast<CondCodeSDNode>(N->getOperand(2))->get(), SDLoc(N), !PreferSetCC);
7717
7718 if (!Combined)
7719 return SDValue();
7720
7721 // If we prefer to have a setcc, and we don't, we'll try our best to
7722 // recreate one using rebuildSetCC.
7723 if (PreferSetCC && Combined.getOpcode() != ISD::SETCC) {
7724 SDValue NewSetCC = rebuildSetCC(Combined);
7725
7726 // We don't have anything interesting to combine to.
7727 if (NewSetCC.getNode() == N)
7728 return SDValue();
7729
7730 if (NewSetCC)
7731 return NewSetCC;
7732 }
7733
7734 return Combined;
7735}
7736
7737SDValue DAGCombiner::visitSETCCCARRY(SDNode *N) {
7738 SDValue LHS = N->getOperand(0);
7739 SDValue RHS = N->getOperand(1);
7740 SDValue Carry = N->getOperand(2);
7741 SDValue Cond = N->getOperand(3);
7742
7743 // If Carry is false, fold to a regular SETCC.
7744 if (isNullConstant(Carry))
7745 return DAG.getNode(ISD::SETCC, SDLoc(N), N->getVTList(), LHS, RHS, Cond);
7746
7747 return SDValue();
7748}
7749
7750/// Try to fold a sext/zext/aext dag node into a ConstantSDNode or
7751/// a build_vector of constants.
7752/// This function is called by the DAGCombiner when visiting sext/zext/aext
7753/// dag nodes (see for example method DAGCombiner::visitSIGN_EXTEND).
7754/// Vector extends are not folded if operations are legal; this is to
7755/// avoid introducing illegal build_vector dag nodes.
7756static SDNode *tryToFoldExtendOfConstant(SDNode *N, const TargetLowering &TLI,
7757 SelectionDAG &DAG, bool LegalTypes,
7758 bool LegalOperations) {
7759 unsigned Opcode = N->getOpcode();
7760 SDValue N0 = N->getOperand(0);
7761 EVT VT = N->getValueType(0);
7762
7763 assert((Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND ||(static_cast <bool> ((Opcode == ISD::SIGN_EXTEND || Opcode
== ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND || Opcode ==
ISD::SIGN_EXTEND_VECTOR_INREG || Opcode == ISD::ZERO_EXTEND_VECTOR_INREG
) && "Expected EXTEND dag node in input!") ? void (0)
: __assert_fail ("(Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG || Opcode == ISD::ZERO_EXTEND_VECTOR_INREG) && \"Expected EXTEND dag node in input!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7766, __extension__ __PRETTY_FUNCTION__))
7764 Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG ||(static_cast <bool> ((Opcode == ISD::SIGN_EXTEND || Opcode
== ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND || Opcode ==
ISD::SIGN_EXTEND_VECTOR_INREG || Opcode == ISD::ZERO_EXTEND_VECTOR_INREG
) && "Expected EXTEND dag node in input!") ? void (0)
: __assert_fail ("(Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG || Opcode == ISD::ZERO_EXTEND_VECTOR_INREG) && \"Expected EXTEND dag node in input!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7766, __extension__ __PRETTY_FUNCTION__))
7765 Opcode == ISD::ZERO_EXTEND_VECTOR_INREG)(static_cast <bool> ((Opcode == ISD::SIGN_EXTEND || Opcode
== ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND || Opcode ==
ISD::SIGN_EXTEND_VECTOR_INREG || Opcode == ISD::ZERO_EXTEND_VECTOR_INREG
) && "Expected EXTEND dag node in input!") ? void (0)
: __assert_fail ("(Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG || Opcode == ISD::ZERO_EXTEND_VECTOR_INREG) && \"Expected EXTEND dag node in input!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7766, __extension__ __PRETTY_FUNCTION__))
7766 && "Expected EXTEND dag node in input!")(static_cast <bool> ((Opcode == ISD::SIGN_EXTEND || Opcode
== ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND || Opcode ==
ISD::SIGN_EXTEND_VECTOR_INREG || Opcode == ISD::ZERO_EXTEND_VECTOR_INREG
) && "Expected EXTEND dag node in input!") ? void (0)
: __assert_fail ("(Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG || Opcode == ISD::ZERO_EXTEND_VECTOR_INREG) && \"Expected EXTEND dag node in input!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7766, __extension__ __PRETTY_FUNCTION__))
;
7767
7768 // fold (sext c1) -> c1
7769 // fold (zext c1) -> c1
7770 // fold (aext c1) -> c1
7771 if (isa<ConstantSDNode>(N0))
7772 return DAG.getNode(Opcode, SDLoc(N), VT, N0).getNode();
7773
7774 // fold (sext (build_vector AllConstants) -> (build_vector AllConstants)
7775 // fold (zext (build_vector AllConstants) -> (build_vector AllConstants)
7776 // fold (aext (build_vector AllConstants) -> (build_vector AllConstants)
7777 EVT SVT = VT.getScalarType();
7778 if (!(VT.isVector() &&
7779 (!LegalTypes || (!LegalOperations && TLI.isTypeLegal(SVT))) &&
7780 ISD::isBuildVectorOfConstantSDNodes(N0.getNode())))
7781 return nullptr;
7782
7783 // We can fold this node into a build_vector.
7784 unsigned VTBits = SVT.getSizeInBits();
7785 unsigned EVTBits = N0->getValueType(0).getScalarSizeInBits();
7786 SmallVector<SDValue, 8> Elts;
7787 unsigned NumElts = VT.getVectorNumElements();
7788 SDLoc DL(N);
7789
7790 for (unsigned i=0; i != NumElts; ++i) {
7791 SDValue Op = N0->getOperand(i);
7792 if (Op->isUndef()) {
7793 Elts.push_back(DAG.getUNDEF(SVT));
7794 continue;
7795 }
7796
7797 SDLoc DL(Op);
7798 // Get the constant value and if needed trunc it to the size of the type.
7799 // Nodes like build_vector might have constants wider than the scalar type.
7800 APInt C = cast<ConstantSDNode>(Op)->getAPIntValue().zextOrTrunc(EVTBits);
7801 if (Opcode == ISD::SIGN_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG)
7802 Elts.push_back(DAG.getConstant(C.sext(VTBits), DL, SVT));
7803 else
7804 Elts.push_back(DAG.getConstant(C.zext(VTBits), DL, SVT));
7805 }
7806
7807 return DAG.getBuildVector(VT, DL, Elts).getNode();
7808}
7809
7810// ExtendUsesToFormExtLoad - Trying to extend uses of a load to enable this:
7811// "fold ({s|z|a}ext (load x)) -> ({s|z|a}ext (truncate ({s|z|a}extload x)))"
7812// transformation. Returns true if extension are possible and the above
7813// mentioned transformation is profitable.
7814static bool ExtendUsesToFormExtLoad(EVT VT, SDNode *N, SDValue N0,
7815 unsigned ExtOpc,
7816 SmallVectorImpl<SDNode *> &ExtendNodes,
7817 const TargetLowering &TLI) {
7818 bool HasCopyToRegUses = false;
7819 bool isTruncFree = TLI.isTruncateFree(VT, N0.getValueType());
7820 for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
7821 UE = N0.getNode()->use_end();
7822 UI != UE; ++UI) {
7823 SDNode *User = *UI;
7824 if (User == N)
7825 continue;
7826 if (UI.getUse().getResNo() != N0.getResNo())
7827 continue;
7828 // FIXME: Only extend SETCC N, N and SETCC N, c for now.
7829 if (ExtOpc != ISD::ANY_EXTEND && User->getOpcode() == ISD::SETCC) {
7830 ISD::CondCode CC = cast<CondCodeSDNode>(User->getOperand(2))->get();
7831 if (ExtOpc == ISD::ZERO_EXTEND && ISD::isSignedIntSetCC(CC))
7832 // Sign bits will be lost after a zext.
7833 return false;
7834 bool Add = false;
7835 for (unsigned i = 0; i != 2; ++i) {
7836 SDValue UseOp = User->getOperand(i);
7837 if (UseOp == N0)
7838 continue;
7839 if (!isa<ConstantSDNode>(UseOp))
7840 return false;
7841 Add = true;
7842 }
7843 if (Add)
7844 ExtendNodes.push_back(User);
7845 continue;
7846 }
7847 // If truncates aren't free and there are users we can't
7848 // extend, it isn't worthwhile.
7849 if (!isTruncFree)
7850 return false;
7851 // Remember if this value is live-out.
7852 if (User->getOpcode() == ISD::CopyToReg)
7853 HasCopyToRegUses = true;
7854 }
7855
7856 if (HasCopyToRegUses) {
7857 bool BothLiveOut = false;
7858 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
7859 UI != UE; ++UI) {
7860 SDUse &Use = UI.getUse();
7861 if (Use.getResNo() == 0 && Use.getUser()->getOpcode() == ISD::CopyToReg) {
7862 BothLiveOut = true;
7863 break;
7864 }
7865 }
7866 if (BothLiveOut)
7867 // Both unextended and extended values are live out. There had better be
7868 // a good reason for the transformation.
7869 return ExtendNodes.size();
7870 }
7871 return true;
7872}
7873
7874void DAGCombiner::ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs,
7875 SDValue OrigLoad, SDValue ExtLoad,
7876 ISD::NodeType ExtType) {
7877 // Extend SetCC uses if necessary.
7878 SDLoc DL(ExtLoad);
7879 for (SDNode *SetCC : SetCCs) {
7880 SmallVector<SDValue, 4> Ops;
7881
7882 for (unsigned j = 0; j != 2; ++j) {
7883 SDValue SOp = SetCC->getOperand(j);
7884 if (SOp == OrigLoad)
7885 Ops.push_back(ExtLoad);
7886 else
7887 Ops.push_back(DAG.getNode(ExtType, DL, ExtLoad->getValueType(0), SOp));
7888 }
7889
7890 Ops.push_back(SetCC->getOperand(2));
7891 CombineTo(SetCC, DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
7892 }
7893}
7894
7895// FIXME: Bring more similar combines here, common to sext/zext (maybe aext?).
7896SDValue DAGCombiner::CombineExtLoad(SDNode *N) {
7897 SDValue N0 = N->getOperand(0);
7898 EVT DstVT = N->getValueType(0);
7899 EVT SrcVT = N0.getValueType();
7900
7901 assert((N->getOpcode() == ISD::SIGN_EXTEND ||(static_cast <bool> ((N->getOpcode() == ISD::SIGN_EXTEND
|| N->getOpcode() == ISD::ZERO_EXTEND) && "Unexpected node type (not an extend)!"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) && \"Unexpected node type (not an extend)!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7903, __extension__ __PRETTY_FUNCTION__))
7902 N->getOpcode() == ISD::ZERO_EXTEND) &&(static_cast <bool> ((N->getOpcode() == ISD::SIGN_EXTEND
|| N->getOpcode() == ISD::ZERO_EXTEND) && "Unexpected node type (not an extend)!"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) && \"Unexpected node type (not an extend)!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7903, __extension__ __PRETTY_FUNCTION__))
7903 "Unexpected node type (not an extend)!")(static_cast <bool> ((N->getOpcode() == ISD::SIGN_EXTEND
|| N->getOpcode() == ISD::ZERO_EXTEND) && "Unexpected node type (not an extend)!"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) && \"Unexpected node type (not an extend)!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7903, __extension__ __PRETTY_FUNCTION__))
;
7904
7905 // fold (sext (load x)) to multiple smaller sextloads; same for zext.
7906 // For example, on a target with legal v4i32, but illegal v8i32, turn:
7907 // (v8i32 (sext (v8i16 (load x))))
7908 // into:
7909 // (v8i32 (concat_vectors (v4i32 (sextload x)),
7910 // (v4i32 (sextload (x + 16)))))
7911 // Where uses of the original load, i.e.:
7912 // (v8i16 (load x))
7913 // are replaced with:
7914 // (v8i16 (truncate
7915 // (v8i32 (concat_vectors (v4i32 (sextload x)),
7916 // (v4i32 (sextload (x + 16)))))))
7917 //
7918 // This combine is only applicable to illegal, but splittable, vectors.
7919 // All legal types, and illegal non-vector types, are handled elsewhere.
7920 // This combine is controlled by TargetLowering::isVectorLoadExtDesirable.
7921 //
7922 if (N0->getOpcode() != ISD::LOAD)
7923 return SDValue();
7924
7925 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
7926
7927 if (!ISD::isNON_EXTLoad(LN0) || !ISD::isUNINDEXEDLoad(LN0) ||
7928 !N0.hasOneUse() || LN0->isVolatile() || !DstVT.isVector() ||
7929 !DstVT.isPow2VectorType() || !TLI.isVectorLoadExtDesirable(SDValue(N, 0)))
7930 return SDValue();
7931
7932 SmallVector<SDNode *, 4> SetCCs;
7933 if (!ExtendUsesToFormExtLoad(DstVT, N, N0, N->getOpcode(), SetCCs, TLI))
7934 return SDValue();
7935
7936 ISD::LoadExtType ExtType =
7937 N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
7938
7939 // Try to split the vector types to get down to legal types.
7940 EVT SplitSrcVT = SrcVT;
7941 EVT SplitDstVT = DstVT;
7942 while (!TLI.isLoadExtLegalOrCustom(ExtType, SplitDstVT, SplitSrcVT) &&
7943 SplitSrcVT.getVectorNumElements() > 1) {
7944 SplitDstVT = DAG.GetSplitDestVTs(SplitDstVT).first;
7945 SplitSrcVT = DAG.GetSplitDestVTs(SplitSrcVT).first;
7946 }
7947
7948 if (!TLI.isLoadExtLegalOrCustom(ExtType, SplitDstVT, SplitSrcVT))
7949 return SDValue();
7950
7951 SDLoc DL(N);
7952 const unsigned NumSplits =
7953 DstVT.getVectorNumElements() / SplitDstVT.getVectorNumElements();
7954 const unsigned Stride = SplitSrcVT.getStoreSize();
7955 SmallVector<SDValue, 4> Loads;
7956 SmallVector<SDValue, 4> Chains;
7957
7958 SDValue BasePtr = LN0->getBasePtr();
7959 for (unsigned Idx = 0; Idx < NumSplits; Idx++) {
7960 const unsigned Offset = Idx * Stride;
7961 const unsigned Align = MinAlign(LN0->getAlignment(), Offset);
7962
7963 SDValue SplitLoad = DAG.getExtLoad(
7964 ExtType, SDLoc(LN0), SplitDstVT, LN0->getChain(), BasePtr,
7965 LN0->getPointerInfo().getWithOffset(Offset), SplitSrcVT, Align,
7966 LN0->getMemOperand()->getFlags(), LN0->getAAInfo());
7967
7968 BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr,
7969 DAG.getConstant(Stride, DL, BasePtr.getValueType()));
7970
7971 Loads.push_back(SplitLoad.getValue(0));
7972 Chains.push_back(SplitLoad.getValue(1));
7973 }
7974
7975 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
7976 SDValue NewValue = DAG.getNode(ISD::CONCAT_VECTORS, DL, DstVT, Loads);
7977
7978 // Simplify TF.
7979 AddToWorklist(NewChain.getNode());
7980
7981 CombineTo(N, NewValue);
7982
7983 // Replace uses of the original load (before extension)
7984 // with a truncate of the concatenated sextloaded vectors.
7985 SDValue Trunc =
7986 DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), NewValue);
7987 ExtendSetCCUses(SetCCs, N0, NewValue, (ISD::NodeType)N->getOpcode());
7988 CombineTo(N0.getNode(), Trunc, NewChain);
7989 return SDValue(N, 0); // Return N so it doesn't get rechecked!
7990}
7991
7992// fold (zext (and/or/xor (shl/shr (load x), cst), cst)) ->
7993// (and/or/xor (shl/shr (zextload x), (zext cst)), (zext cst))
7994SDValue DAGCombiner::CombineZExtLogicopShiftLoad(SDNode *N) {
7995 assert(N->getOpcode() == ISD::ZERO_EXTEND)(static_cast <bool> (N->getOpcode() == ISD::ZERO_EXTEND
) ? void (0) : __assert_fail ("N->getOpcode() == ISD::ZERO_EXTEND"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7995, __extension__ __PRETTY_FUNCTION__))
;
7996 EVT VT = N->getValueType(0);
7997
7998 // and/or/xor
7999 SDValue N0 = N->getOperand(0);
8000 if (!(N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR ||
8001 N0.getOpcode() == ISD::XOR) ||
8002 N0.getOperand(1).getOpcode() != ISD::Constant ||
8003 (LegalOperations && !TLI.isOperationLegal(N0.getOpcode(), VT)))
8004 return SDValue();
8005
8006 // shl/shr
8007 SDValue N1 = N0->getOperand(0);
8008 if (!(N1.getOpcode() == ISD::SHL || N1.getOpcode() == ISD::SRL) ||
8009 N1.getOperand(1).getOpcode() != ISD::Constant ||
8010 (LegalOperations && !TLI.isOperationLegal(N1.getOpcode(), VT)))
8011 return SDValue();
8012
8013 // load
8014 if (!isa<LoadSDNode>(N1.getOperand(0)))
8015 return SDValue();
8016 LoadSDNode *Load = cast<LoadSDNode>(N1.getOperand(0));
8017 EVT MemVT = Load->getMemoryVT();
8018 if (!TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT) ||
8019 Load->getExtensionType() == ISD::SEXTLOAD || Load->isIndexed())
8020 return SDValue();
8021
8022
8023 // If the shift op is SHL, the logic op must be AND, otherwise the result
8024 // will be wrong.
8025 if (N1.getOpcode() == ISD::SHL && N0.getOpcode() != ISD::AND)
8026 return SDValue();
8027
8028 if (!N0.hasOneUse() || !N1.hasOneUse())
8029 return SDValue();
8030
8031 SmallVector<SDNode*, 4> SetCCs;
8032 if (!ExtendUsesToFormExtLoad(VT, N1.getNode(), N1.getOperand(0),
8033 ISD::ZERO_EXTEND, SetCCs, TLI))
8034 return SDValue();
8035
8036 // Actually do the transformation.
8037 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(Load), VT,
8038 Load->getChain(), Load->getBasePtr(),
8039 Load->getMemoryVT(), Load->getMemOperand());
8040
8041 SDLoc DL1(N1);
8042 SDValue Shift = DAG.getNode(N1.getOpcode(), DL1, VT, ExtLoad,
8043 N1.getOperand(1));
8044
8045 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
8046 Mask = Mask.zext(VT.getSizeInBits());
8047 SDLoc DL0(N0);
8048 SDValue And = DAG.getNode(N0.getOpcode(), DL0, VT, Shift,
8049 DAG.getConstant(Mask, DL0, VT));
8050
8051 ExtendSetCCUses(SetCCs, N1.getOperand(0), ExtLoad, ISD::ZERO_EXTEND);
8052 CombineTo(N, And);
8053 if (SDValue(Load, 0).hasOneUse()) {
8054 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), ExtLoad.getValue(1));
8055 } else {
8056 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(Load),
8057 Load->getValueType(0), ExtLoad);
8058 CombineTo(Load, Trunc, ExtLoad.getValue(1));
8059 }
8060 return SDValue(N,0); // Return N so it doesn't get rechecked!
8061}
8062
8063/// If we're narrowing or widening the result of a vector select and the final
8064/// size is the same size as a setcc (compare) feeding the select, then try to
8065/// apply the cast operation to the select's operands because matching vector
8066/// sizes for a select condition and other operands should be more efficient.
8067SDValue DAGCombiner::matchVSelectOpSizesWithSetCC(SDNode *Cast) {
8068 unsigned CastOpcode = Cast->getOpcode();
8069 assert((CastOpcode == ISD::SIGN_EXTEND || CastOpcode == ISD::ZERO_EXTEND ||(static_cast <bool> ((CastOpcode == ISD::SIGN_EXTEND ||
CastOpcode == ISD::ZERO_EXTEND || CastOpcode == ISD::TRUNCATE
|| CastOpcode == ISD::FP_EXTEND || CastOpcode == ISD::FP_ROUND
) && "Unexpected opcode for vector select narrowing/widening"
) ? void (0) : __assert_fail ("(CastOpcode == ISD::SIGN_EXTEND || CastOpcode == ISD::ZERO_EXTEND || CastOpcode == ISD::TRUNCATE || CastOpcode == ISD::FP_EXTEND || CastOpcode == ISD::FP_ROUND) && \"Unexpected opcode for vector select narrowing/widening\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 8072, __extension__ __PRETTY_FUNCTION__))
8070 CastOpcode == ISD::TRUNCATE || CastOpcode == ISD::FP_EXTEND ||(static_cast <bool> ((CastOpcode == ISD::SIGN_EXTEND ||
CastOpcode == ISD::ZERO_EXTEND || CastOpcode == ISD::TRUNCATE
|| CastOpcode == ISD::FP_EXTEND || CastOpcode == ISD::FP_ROUND
) && "Unexpected opcode for vector select narrowing/widening"
) ? void (0) : __assert_fail ("(CastOpcode == ISD::SIGN_EXTEND || CastOpcode == ISD::ZERO_EXTEND || CastOpcode == ISD::TRUNCATE || CastOpcode == ISD::FP_EXTEND || CastOpcode == ISD::FP_ROUND) && \"Unexpected opcode for vector select narrowing/widening\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 8072, __extension__ __PRETTY_FUNCTION__))
8071 CastOpcode == ISD::FP_ROUND) &&(static_cast <bool> ((CastOpcode == ISD::SIGN_EXTEND ||
CastOpcode == ISD::ZERO_EXTEND || CastOpcode == ISD::TRUNCATE
|| CastOpcode == ISD::FP_EXTEND || CastOpcode == ISD::FP_ROUND
) && "Unexpected opcode for vector select narrowing/widening"
) ? void (0) : __assert_fail ("(CastOpcode == ISD::SIGN_EXTEND || CastOpcode == ISD::ZERO_EXTEND || CastOpcode == ISD::TRUNCATE || CastOpcode == ISD::FP_EXTEND || CastOpcode == ISD::FP_ROUND) && \"Unexpected opcode for vector select narrowing/widening\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 8072, __extension__ __PRETTY_FUNCTION__))
8072 "Unexpected opcode for vector select narrowing/widening")(static_cast <bool> ((CastOpcode == ISD::SIGN_EXTEND ||
CastOpcode == ISD::ZERO_EXTEND || CastOpcode == ISD::TRUNCATE
|| CastOpcode == ISD::FP_EXTEND || CastOpcode == ISD::FP_ROUND
) && "Unexpected opcode for vector select narrowing/widening"
) ? void (0) : __assert_fail ("(CastOpcode == ISD::SIGN_EXTEND || CastOpcode == ISD::ZERO_EXTEND || CastOpcode == ISD::TRUNCATE || CastOpcode == ISD::FP_EXTEND || CastOpcode == ISD::FP_ROUND) && \"Unexpected opcode for vector select narrowing/widening\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 8072, __extension__ __PRETTY_FUNCTION__))
;
8073
8074 // We only do this transform before legal ops because the pattern may be
8075 // obfuscated by target-specific operations after legalization. Do not create
8076 // an illegal select op, however, because that may be difficult to lower.
8077 EVT VT = Cast->getValueType(0);
8078 if (LegalOperations || !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
8079 return SDValue();
8080
8081 SDValue VSel = Cast->getOperand(0);
8082 if (VSel.getOpcode() != ISD::VSELECT || !VSel.hasOneUse() ||
8083 VSel.getOperand(0).getOpcode() != ISD::SETCC)
8084 return SDValue();
8085
8086 // Does the setcc have the same vector size as the casted select?
8087 SDValue SetCC = VSel.getOperand(0);
8088 EVT SetCCVT = getSetCCResultType(SetCC.getOperand(0).getValueType());
8089 if (SetCCVT.getSizeInBits() != VT.getSizeInBits())
8090 return SDValue();
8091
8092 // cast (vsel (setcc X), A, B) --> vsel (setcc X), (cast A), (cast B)
8093 SDValue A = VSel.getOperand(1);
8094 SDValue B = VSel.getOperand(2);
8095 SDValue CastA, CastB;
8096 SDLoc DL(Cast);
8097 if (CastOpcode == ISD::FP_ROUND) {
8098 // FP_ROUND (fptrunc) has an extra flag operand to pass along.
8099 CastA = DAG.getNode(CastOpcode, DL, VT, A, Cast->getOperand(1));
8100 CastB = DAG.getNode(CastOpcode, DL, VT, B, Cast->getOperand(1));
8101 } else {
8102 CastA = DAG.getNode(CastOpcode, DL, VT, A);
8103 CastB = DAG.getNode(CastOpcode, DL, VT, B);
8104 }
8105 return DAG.getNode(ISD::VSELECT, DL, VT, SetCC, CastA, CastB);
8106}
8107
8108// fold ([s|z]ext ([s|z]extload x)) -> ([s|z]ext (truncate ([s|z]extload x)))
8109// fold ([s|z]ext ( extload x)) -> ([s|z]ext (truncate ([s|z]extload x)))
8110static SDValue tryToFoldExtOfExtload(SelectionDAG &DAG, DAGCombiner &Combiner,
8111 const TargetLowering &TLI, EVT VT,
8112 bool LegalOperations, SDNode *N,
8113 SDValue N0, ISD::LoadExtType ExtLoadType) {
8114 SDNode *N0Node = N0.getNode();
8115 bool isAExtLoad = (ExtLoadType == ISD::SEXTLOAD) ? ISD::isSEXTLoad(N0Node)
8116 : ISD::isZEXTLoad(N0Node);
8117 if ((!isAExtLoad && !ISD::isEXTLoad(N0Node)) ||
8118 !ISD::isUNINDEXEDLoad(N0Node) || !N0.hasOneUse())
8119 return {};
8120
8121 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
8122 EVT MemVT = LN0->getMemoryVT();
8123 if ((LegalOperations || LN0->isVolatile()) &&
8124 !TLI.isLoadExtLegal(ExtLoadType, VT, MemVT))
8125 return {};
8126
8127 SDValue ExtLoad =
8128 DAG.getExtLoad(ExtLoadType, SDLoc(LN0), VT, LN0->getChain(),
8129 LN0->getBasePtr(), MemVT, LN0->getMemOperand());
8130 Combiner.CombineTo(N, ExtLoad);
8131 DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
8132 return SDValue(N, 0); // Return N so it doesn't get rechecked!
8133}
8134
8135// fold ([s|z]ext (load x)) -> ([s|z]ext (truncate ([s|z]extload x)))
8136// Only generate vector extloads when 1) they're legal, and 2) they are
8137// deemed desirable by the target.
8138static SDValue tryToFoldExtOfLoad(SelectionDAG &DAG, DAGCombiner &Combiner,
8139 const TargetLowering &TLI, EVT VT,
8140 bool LegalOperations, SDNode *N, SDValue N0,
8141 ISD::LoadExtType ExtLoadType,
8142 ISD::NodeType ExtOpc) {
8143 if (!ISD::isNON_EXTLoad(N0.getNode()) ||
8144 !ISD::isUNINDEXEDLoad(N0.getNode()) ||
8145 ((LegalOperations || VT.isVector() ||
8146 cast<LoadSDNode>(N0)->isVolatile()) &&
8147 !TLI.isLoadExtLegal(ExtLoadType, VT, N0.getValueType())))
8148 return {};
8149
8150 bool DoXform = true;
8151 SmallVector<SDNode *, 4> SetCCs;
8152 if (!N0.hasOneUse())
8153 DoXform = ExtendUsesToFormExtLoad(VT, N, N0, ExtOpc, SetCCs, TLI);
8154 if (VT.isVector())
8155 DoXform &= TLI.isVectorLoadExtDesirable(SDValue(N, 0));
8156 if (!DoXform)
8157 return {};
8158
8159 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
8160 SDValue ExtLoad = DAG.getExtLoad(ExtLoadType, SDLoc(LN0), VT, LN0->getChain(),
8161 LN0->getBasePtr(), N0.getValueType(),
8162 LN0->getMemOperand());
8163 Combiner.ExtendSetCCUses(SetCCs, N0, ExtLoad, ExtOpc);
8164 // If the load value is used only by N, replace it via CombineTo N.
8165 bool NoReplaceTrunc = SDValue(LN0, 0).hasOneUse();
8166 Combiner.CombineTo(N, ExtLoad);
8167 if (NoReplaceTrunc) {
8168 DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
8169 } else {
8170 SDValue Trunc =
8171 DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), ExtLoad);
8172 Combiner.CombineTo(LN0, Trunc, ExtLoad.getValue(1));
8173 }
8174 return SDValue(N, 0); // Return N so it doesn't get rechecked!
8175}
8176
8177static SDValue foldExtendedSignBitTest(SDNode *N, SelectionDAG &DAG,
8178 bool LegalOperations) {
8179 assert((N->getOpcode() == ISD::SIGN_EXTEND ||(static_cast <bool> ((N->getOpcode() == ISD::SIGN_EXTEND
|| N->getOpcode() == ISD::ZERO_EXTEND) && "Expected sext or zext"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) && \"Expected sext or zext\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 8180, __extension__ __PRETTY_FUNCTION__))
8180 N->getOpcode() == ISD::ZERO_EXTEND) && "Expected sext or zext")(static_cast <bool> ((N->getOpcode() == ISD::SIGN_EXTEND
|| N->getOpcode() == ISD::ZERO_EXTEND) && "Expected sext or zext"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) && \"Expected sext or zext\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 8180, __extension__ __PRETTY_FUNCTION__))
;
8181
8182 SDValue SetCC = N->getOperand(0);
8183 if (LegalOperations || SetCC.getOpcode() != ISD::SETCC ||
8184 !SetCC.hasOneUse() || SetCC.getValueType() != MVT::i1)
8185 return SDValue();
8186
8187 SDValue X = SetCC.getOperand(0);
8188 SDValue Ones = SetCC.getOperand(1);
8189 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
8190 EVT VT = N->getValueType(0);
8191 EVT XVT = X.getValueType();
8192 // setge X, C is canonicalized to setgt, so we do not need to match that
8193 // pattern. The setlt sibling is folded in SimplifySelectCC() because it does
8194 // not require the 'not' op.
8195 if (CC == ISD::SETGT && isAllOnesConstant(Ones) && VT == XVT) {
8196 // Invert and smear/shift the sign bit:
8197 // sext i1 (setgt iN X, -1) --> sra (not X), (N - 1)
8198 // zext i1 (setgt iN X, -1) --> srl (not X), (N - 1)
8199 SDLoc DL(N);
8200 SDValue NotX = DAG.getNOT(DL, X, VT);
8201 SDValue ShiftAmount = DAG.getConstant(VT.getSizeInBits() - 1, DL, VT);
8202 auto ShiftOpcode = N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SRA : ISD::SRL;
8203 return DAG.getNode(ShiftOpcode, DL, VT, NotX, ShiftAmount);
8204 }
8205 return SDValue();
8206}
8207
8208SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
8209 SDValue N0 = N->getOperand(0);
8210 EVT VT = N->getValueType(0);
8211 SDLoc DL(N);
8212
8213 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes,
8214 LegalOperations))
8215 return SDValue(Res, 0);
8216
8217 // fold (sext (sext x)) -> (sext x)
8218 // fold (sext (aext x)) -> (sext x)
8219 if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
8220 return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, N0.getOperand(0));
8221
8222 if (N0.getOpcode() == ISD::TRUNCATE) {
8223 // fold (sext (truncate (load x))) -> (sext (smaller load x))
8224 // fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n)))
8225 if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) {
8226 SDNode *oye = N0.getOperand(0).getNode();
8227 if (NarrowLoad.getNode() != N0.getNode()) {
8228 CombineTo(N0.getNode(), NarrowLoad);
8229 // CombineTo deleted the truncate, if needed, but not what's under it.
8230 AddToWorklist(oye);
8231 }
8232 return SDValue(N, 0); // Return N so it doesn't get rechecked!
8233 }
8234
8235 // See if the value being truncated is already sign extended. If so, just
8236 // eliminate the trunc/sext pair.
8237 SDValue Op = N0.getOperand(0);
8238 unsigned OpBits = Op.getScalarValueSizeInBits();
8239 unsigned MidBits = N0.getScalarValueSizeInBits();
8240 unsigned DestBits = VT.getScalarSizeInBits();
8241 unsigned NumSignBits = DAG.ComputeNumSignBits(Op);
8242
8243 if (OpBits == DestBits) {
8244 // Op is i32, Mid is i8, and Dest is i32. If Op has more than 24 sign
8245 // bits, it is already ready.
8246 if (NumSignBits > DestBits-MidBits)
8247 return Op;
8248 } else if (OpBits < DestBits) {
8249 // Op is i32, Mid is i8, and Dest is i64. If Op has more than 24 sign
8250 // bits, just sext from i32.
8251 if (NumSignBits > OpBits-MidBits)
8252 return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op);
8253 } else {
8254 // Op is i64, Mid is i8, and Dest is i32. If Op has more than 56 sign
8255 // bits, just truncate to i32.
8256 if (NumSignBits > OpBits-MidBits)
8257 return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
8258 }
8259
8260 // fold (sext (truncate x)) -> (sextinreg x).
8261 if (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG,
8262 N0.getValueType())) {
8263 if (OpBits < DestBits)
8264 Op = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N0), VT, Op);
8265 else if (OpBits > DestBits)
8266 Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), VT, Op);
8267 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Op,
8268 DAG.getValueType(N0.getValueType()));
8269 }
8270 }
8271
8272 // Try to simplify (sext (load x)).
8273 if (SDValue foldedExt =
8274 tryToFoldExtOfLoad(DAG, *this, TLI, VT, LegalOperations, N, N0,
8275 ISD::SEXTLOAD, ISD::SIGN_EXTEND))
8276 return foldedExt;
8277
8278 // fold (sext (load x)) to multiple smaller sextloads.
8279 // Only on illegal but splittable vectors.
8280 if (SDValue ExtLoad = CombineExtLoad(N))
8281 return ExtLoad;
8282
8283 // Try to simplify (sext (sextload x)).
8284 if (SDValue foldedExt = tryToFoldExtOfExtload(
8285 DAG, *this, TLI, VT, LegalOperations, N, N0, ISD::SEXTLOAD))
8286 return foldedExt;
8287
8288 // fold (sext (and/or/xor (load x), cst)) ->
8289 // (and/or/xor (sextload x), (sext cst))
8290 if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR ||
8291 N0.getOpcode() == ISD::XOR) &&
8292 isa<LoadSDNode>(N0.getOperand(0)) &&
8293 N0.getOperand(1).getOpcode() == ISD::Constant &&
8294 (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) {
8295 LoadSDNode *LN00 = cast<LoadSDNode>(N0.getOperand(0));
8296 EVT MemVT = LN00->getMemoryVT();
8297 if (TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, MemVT) &&
8298 LN00->getExtensionType() != ISD::ZEXTLOAD && LN00->isUnindexed()) {
8299 SmallVector<SDNode*, 4> SetCCs;
8300 bool DoXform = ExtendUsesToFormExtLoad(VT, N0.getNode(), N0.getOperand(0),
8301 ISD::SIGN_EXTEND, SetCCs, TLI);
8302 if (DoXform) {
8303 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(LN00), VT,
8304 LN00->getChain(), LN00->getBasePtr(),
8305 LN00->getMemoryVT(),
8306 LN00->getMemOperand());
8307 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
8308 Mask = Mask.sext(VT.getSizeInBits());
8309 SDValue And = DAG.getNode(N0.getOpcode(), DL, VT,
8310 ExtLoad, DAG.getConstant(Mask, DL, VT));
8311 ExtendSetCCUses(SetCCs, N0.getOperand(0), ExtLoad, ISD::SIGN_EXTEND);
8312 bool NoReplaceTruncAnd = !N0.hasOneUse();
8313 bool NoReplaceTrunc = SDValue(LN00, 0).hasOneUse();
8314 CombineTo(N, And);
8315 // If N0 has multiple uses, change other uses as well.
8316 if (NoReplaceTruncAnd) {
8317 SDValue TruncAnd =
8318 DAG.getNode(ISD::TRUNCATE, DL, N0.getValueType(), And);
8319 CombineTo(N0.getNode(), TruncAnd);
8320 }
8321 if (NoReplaceTrunc) {
8322 DAG.ReplaceAllUsesOfValueWith(SDValue(LN00, 1), ExtLoad.getValue(1));
8323 } else {
8324 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(LN00),
8325 LN00->getValueType(0), ExtLoad);
8326 CombineTo(LN00, Trunc, ExtLoad.getValue(1));
8327 }
8328 return SDValue(N,0); // Return N so it doesn't get rechecked!
8329 }
8330 }
8331 }
8332
8333 if (SDValue V = foldExtendedSignBitTest(N, DAG, LegalOperations))
8334 return V;
8335
8336 if (N0.getOpcode() == ISD::SETCC) {
8337 SDValue N00 = N0.getOperand(0);
8338 SDValue N01 = N0.getOperand(1);
8339 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
8340 EVT N00VT = N0.getOperand(0).getValueType();
8341
8342 // sext(setcc) -> sext_in_reg(vsetcc) for vectors.
8343 // Only do this before legalize for now.
8344 if (VT.isVector() && !LegalOperations &&
8345 TLI.getBooleanContents(N00VT) ==
8346 TargetLowering::ZeroOrNegativeOneBooleanContent) {
8347 // On some architectures (such as SSE/NEON/etc) the SETCC result type is
8348 // of the same size as the compared operands. Only optimize sext(setcc())
8349 // if this is the case.
8350 EVT SVT = getSetCCResultType(N00VT);
8351
8352 // We know that the # elements of the results is the same as the
8353 // # elements of the compare (and the # elements of the compare result
8354 // for that matter). Check to see that they are the same size. If so,
8355 // we know that the element size of the sext'd result matches the
8356 // element size of the compare operands.
8357 if (VT.getSizeInBits() == SVT.getSizeInBits())
8358 return DAG.getSetCC(DL, VT, N00, N01, CC);
8359
8360 // If the desired elements are smaller or larger than the source
8361 // elements, we can use a matching integer vector type and then
8362 // truncate/sign extend.
8363 EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
8364 if (SVT == MatchingVecType) {
8365 SDValue VsetCC = DAG.getSetCC(DL, MatchingVecType, N00, N01, CC);
8366 return DAG.getSExtOrTrunc(VsetCC, DL, VT);
8367 }
8368 }
8369
8370 // sext(setcc x, y, cc) -> (select (setcc x, y, cc), T, 0)
8371 // Here, T can be 1 or -1, depending on the type of the setcc and
8372 // getBooleanContents().
8373 unsigned SetCCWidth = N0.getScalarValueSizeInBits();
8374
8375 // To determine the "true" side of the select, we need to know the high bit
8376 // of the value returned by the setcc if it evaluates to true.
8377 // If the type of the setcc is i1, then the true case of the select is just
8378 // sext(i1 1), that is, -1.
8379 // If the type of the setcc is larger (say, i8) then the value of the high
8380 // bit depends on getBooleanContents(), so ask TLI for a real "true" value
8381 // of the appropriate width.
8382 SDValue ExtTrueVal = (SetCCWidth == 1)
8383 ? DAG.getAllOnesConstant(DL, VT)
8384 : DAG.getBoolConstant(true, DL, VT, N00VT);
8385 SDValue Zero = DAG.getConstant(0, DL, VT);
8386 if (SDValue SCC =
8387 SimplifySelectCC(DL, N00, N01, ExtTrueVal, Zero, CC, true))
8388 return SCC;
8389
8390 if (!VT.isVector() && !TLI.convertSelectOfConstantsToMath(VT)) {
8391 EVT SetCCVT = getSetCCResultType(N00VT);
8392 // Don't do this transform for i1 because there's a select transform
8393 // that would reverse it.
8394 // TODO: We should not do this transform at all without a target hook
8395 // because a sext is likely cheaper than a select?
8396 if (SetCCVT.getScalarSizeInBits() != 1 &&
8397 (!LegalOperations || TLI.isOperationLegal(ISD::SETCC, N00VT))) {
8398 SDValue SetCC = DAG.getSetCC(DL, SetCCVT, N00, N01, CC);
8399 return DAG.getSelect(DL, VT, SetCC, ExtTrueVal, Zero);
8400 }
8401 }
8402 }
8403
8404 // fold (sext x) -> (zext x) if the sign bit is known zero.
8405 if ((!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) &&
8406 DAG.SignBitIsZero(N0))
8407 return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0);
8408
8409 if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
8410 return NewVSel;
8411
8412 return SDValue();
8413}
8414
8415// isTruncateOf - If N is a truncate of some other value, return true, record
8416// the value being truncated in Op and which of Op's bits are zero/one in Known.
8417// This function computes KnownBits to avoid a duplicated call to
8418// computeKnownBits in the caller.
8419static bool isTruncateOf(SelectionDAG &DAG, SDValue N, SDValue &Op,
8420 KnownBits &Known) {
8421 if (N->getOpcode() == ISD::TRUNCATE) {
8422 Op = N->getOperand(0);
8423 DAG.computeKnownBits(Op, Known);
8424 return true;
8425 }
8426
8427 if (N->getOpcode() != ISD::SETCC || N->getValueType(0) != MVT::i1 ||
8428 cast<CondCodeSDNode>(N->getOperand(2))->get() != ISD::SETNE)
8429 return false;
8430
8431 SDValue Op0 = N->getOperand(0);
8432 SDValue Op1 = N->getOperand(1);
8433 assert(Op0.getValueType() == Op1.getValueType())(static_cast <bool> (Op0.getValueType() == Op1.getValueType
()) ? void (0) : __assert_fail ("Op0.getValueType() == Op1.getValueType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 8433, __extension__ __PRETTY_FUNCTION__))
;
8434
8435 if (isNullConstant(Op0))
8436 Op = Op1;
8437 else if (isNullConstant(Op1))
8438 Op = Op0;
8439 else
8440 return false;
8441
8442 DAG.computeKnownBits(Op, Known);
8443
8444 if (!(Known.Zero | 1).isAllOnesValue())
8445 return false;
8446
8447 return true;
8448}
8449
8450SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
8451 SDValue N0 = N->getOperand(0);
8452 EVT VT = N->getValueType(0);
8453
8454 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes,
8455 LegalOperations))
8456 return SDValue(Res, 0);
8457
8458 // fold (zext (zext x)) -> (zext x)
8459 // fold (zext (aext x)) -> (zext x)
8460 if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
8461 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT,
8462 N0.getOperand(0));
8463
8464 // fold (zext (truncate x)) -> (zext x) or
8465 // (zext (truncate x)) -> (truncate x)
8466 // This is valid when the truncated bits of x are already zero.
8467 // FIXME: We should extend this to work for vectors too.
8468 SDValue Op;
8469 KnownBits Known;
8470 if (!VT.isVector() && isTruncateOf(DAG, N0, Op, Known)) {
8471 APInt TruncatedBits =
8472 (Op.getValueSizeInBits() == N0.getValueSizeInBits()) ?
8473 APInt(Op.getValueSizeInBits(), 0) :
8474 APInt::getBitsSet(Op.getValueSizeInBits(),
8475 N0.getValueSizeInBits(),
8476 std::min(Op.getValueSizeInBits(),
8477 VT.getSizeInBits()));
8478 if (TruncatedBits.isSubsetOf(Known.Zero))
8479 return DAG.getZExtOrTrunc(Op, SDLoc(N), VT);
8480 }
8481
8482 // fold (zext (truncate x)) -> (and x, mask)
8483 if (N0.getOpcode() == ISD::TRUNCATE) {
8484 // fold (zext (truncate (load x))) -> (zext (smaller load x))
8485 // fold (zext (truncate (srl (load x), c))) -> (zext (smaller load (x+c/n)))
8486 if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) {
8487 SDNode *oye = N0.getOperand(0).getNode();
8488 if (NarrowLoad.getNode() != N0.getNode()) {
8489 CombineTo(N0.getNode(), NarrowLoad);
8490 // CombineTo deleted the truncate, if needed, but not what's under it.
8491 AddToWorklist(oye);
8492 }
8493 return SDValue(N, 0); // Return N so it doesn't get rechecked!
8494 }
8495
8496 EVT SrcVT = N0.getOperand(0).getValueType();
8497 EVT MinVT = N0.getValueType();
8498
8499 // Try to mask before the extension to avoid having to generate a larger mask,
8500 // possibly over several sub-vectors.
8501 if (SrcVT.bitsLT(VT) && VT.isVector()) {
8502 if (!LegalOperations || (TLI.isOperationLegal(ISD::AND, SrcVT) &&
8503 TLI.isOperationLegal(ISD::ZERO_EXTEND, VT))) {
8504 SDValue Op = N0.getOperand(0);
8505 Op = DAG.getZeroExtendInReg(Op, SDLoc(N), MinVT.getScalarType());
8506 AddToWorklist(Op.getNode());
8507 SDValue ZExtOrTrunc = DAG.getZExtOrTrunc(Op, SDLoc(N), VT);
8508 // Transfer the debug info; the new node is equivalent to N0.
8509 DAG.transferDbgValues(N0, ZExtOrTrunc);
8510 return ZExtOrTrunc;
8511 }
8512 }
8513
8514 if (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT)) {
8515 SDValue Op = DAG.getAnyExtOrTrunc(N0.getOperand(0), SDLoc(N), VT);
8516 AddToWorklist(Op.getNode());
8517 SDValue And = DAG.getZeroExtendInReg(Op, SDLoc(N), MinVT.getScalarType());
8518 // We may safely transfer the debug info describing the truncate node over
8519 // to the equivalent and operation.
8520 DAG.transferDbgValues(N0, And);
8521 return And;
8522 }
8523 }
8524
8525 // Fold (zext (and (trunc x), cst)) -> (and x, cst),
8526 // if either of the casts is not free.
8527 if (N0.getOpcode() == ISD::AND &&
8528 N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
8529 N0.getOperand(1).getOpcode() == ISD::Constant &&
8530 (!TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(),
8531 N0.getValueType()) ||
8532 !TLI.isZExtFree(N0.getValueType(), VT))) {
8533 SDValue X = N0.getOperand(0).getOperand(0);
8534 X = DAG.getAnyExtOrTrunc(X, SDLoc(X), VT);
8535 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
8536 Mask = Mask.zext(VT.getSizeInBits());
8537 SDLoc DL(N);
8538 return DAG.getNode(ISD::AND, DL, VT,
8539 X, DAG.getConstant(Mask, DL, VT));
8540 }
8541
8542 // Try to simplify (zext (load x)).
8543 if (SDValue foldedExt =
8544 tryToFoldExtOfLoad(DAG, *this, TLI, VT, LegalOperations, N, N0,
8545 ISD::ZEXTLOAD, ISD::ZERO_EXTEND))
8546 return foldedExt;
8547
8548 // fold (zext (load x)) to multiple smaller zextloads.
8549 // Only on illegal but splittable vectors.
8550 if (SDValue ExtLoad = CombineExtLoad(N))
8551 return ExtLoad;
8552
8553 // fold (zext (and/or/xor (load x), cst)) ->
8554 // (and/or/xor (zextload x), (zext cst))
8555 // Unless (and (load x) cst) will match as a zextload already and has
8556 // additional users.
8557 if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR ||
8558 N0.getOpcode() == ISD::XOR) &&
8559 isa<LoadSDNode>(N0.getOperand(0)) &&
8560 N0.getOperand(1).getOpcode() == ISD::Constant &&
8561 (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) {
8562 LoadSDNode *LN00 = cast<LoadSDNode>(N0.getOperand(0));
8563 EVT MemVT = LN00->getMemoryVT();
8564 if (TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT) &&
8565 LN00->getExtensionType() != ISD::SEXTLOAD && LN00->isUnindexed()) {
8566 bool DoXform = true;
8567 SmallVector<SDNode*, 4> SetCCs;
8568 if (!N0.hasOneUse()) {
8569 if (N0.getOpcode() == ISD::AND) {
8570 auto *AndC = cast<ConstantSDNode>(N0.getOperand(1));
8571 EVT LoadResultTy = AndC->getValueType(0);
8572 EVT ExtVT;
8573 if (isAndLoadExtLoad(AndC, LN00, LoadResultTy, ExtVT))
8574 DoXform = false;
8575 }
8576 }
8577 if (DoXform)
8578 DoXform = ExtendUsesToFormExtLoad(VT, N0.getNode(), N0.getOperand(0),
8579 ISD::ZERO_EXTEND, SetCCs, TLI);
8580 if (DoXform) {
8581 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN00), VT,
8582 LN00->getChain(), LN00->getBasePtr(),
8583 LN00->getMemoryVT(),
8584 LN00->getMemOperand());
8585 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
8586 Mask = Mask.zext(VT.getSizeInBits());
8587 SDLoc DL(N);
8588 SDValue And = DAG.getNode(N0.getOpcode(), DL, VT,
8589 ExtLoad, DAG.getConstant(Mask, DL, VT));
8590 ExtendSetCCUses(SetCCs, N0.getOperand(0), ExtLoad, ISD::ZERO_EXTEND);
8591 bool NoReplaceTruncAnd = !N0.hasOneUse();
8592 bool NoReplaceTrunc = SDValue(LN00, 0).hasOneUse();
8593 CombineTo(N, And);
8594 // If N0 has multiple uses, change other uses as well.
8595 if (NoReplaceTruncAnd) {
8596 SDValue TruncAnd =
8597 DAG.getNode(ISD::TRUNCATE, DL, N0.getValueType(), And);
8598 CombineTo(N0.getNode(), TruncAnd);
8599 }
8600 if (NoReplaceTrunc) {
8601 DAG.ReplaceAllUsesOfValueWith(SDValue(LN00, 1), ExtLoad.getValue(1));
8602 } else {
8603 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(LN00),
8604 LN00->getValueType(0), ExtLoad);
8605 CombineTo(LN00, Trunc, ExtLoad.getValue(1));
8606 }
8607 return SDValue(N,0); // Return N so it doesn't get rechecked!
8608 }
8609 }
8610 }
8611
8612 // fold (zext (and/or/xor (shl/shr (load x), cst), cst)) ->
8613 // (and/or/xor (shl/shr (zextload x), (zext cst)), (zext cst))
8614 if (SDValue ZExtLoad = CombineZExtLogicopShiftLoad(N))
8615 return ZExtLoad;
8616
8617 // Try to simplify (zext (zextload x)).
8618 if (SDValue foldedExt = tryToFoldExtOfExtload(
8619 DAG, *this, TLI, VT, LegalOperations, N, N0, ISD::ZEXTLOAD))
8620 return foldedExt;
8621
8622 if (SDValue V = foldExtendedSignBitTest(N, DAG, LegalOperations))
8623 return V;
8624
8625 if (N0.getOpcode() == ISD::SETCC) {
8626 // Only do this before legalize for now.
8627 if (!LegalOperations && VT.isVector() &&
8628 N0.getValueType().getVectorElementType() == MVT::i1) {
8629 EVT N00VT = N0.getOperand(0).getValueType();
8630 if (getSetCCResultType(N00VT) == N0.getValueType())
8631 return SDValue();
8632
8633 // We know that the # elements of the results is the same as the #
8634 // elements of the compare (and the # elements of the compare result for
8635 // that matter). Check to see that they are the same size. If so, we know
8636 // that the element size of the sext'd result matches the element size of
8637 // the compare operands.
8638 SDLoc DL(N);
8639 SDValue VecOnes = DAG.getConstant(1, DL, VT);
8640 if (VT.getSizeInBits() == N00VT.getSizeInBits()) {
8641 // zext(setcc) -> (and (vsetcc), (1, 1, ...) for vectors.
8642 SDValue VSetCC = DAG.getNode(ISD::SETCC, DL, VT, N0.getOperand(0),
8643 N0.getOperand(1), N0.getOperand(2));
8644 return DAG.getNode(ISD::AND, DL, VT, VSetCC, VecOnes);
8645 }
8646
8647 // If the desired elements are smaller or larger than the source
8648 // elements we can use a matching integer vector type and then
8649 // truncate/sign extend.
8650 EVT MatchingVectorType = N00VT.changeVectorElementTypeToInteger();
8651 SDValue VsetCC =
8652 DAG.getNode(ISD::SETCC, DL, MatchingVectorType, N0.getOperand(0),
8653 N0.getOperand(1), N0.getOperand(2));
8654 return DAG.getNode(ISD::AND, DL, VT, DAG.getSExtOrTrunc(VsetCC, DL, VT),
8655 VecOnes);
8656 }
8657
8658 // zext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
8659 SDLoc DL(N);
8660 if (SDValue SCC = SimplifySelectCC(
8661 DL, N0.getOperand(0), N0.getOperand(1), DAG.getConstant(1, DL, VT),
8662 DAG.getConstant(0, DL, VT),
8663 cast<CondCodeSDNode>(N0.getOperand(2))->get(), true))
8664 return SCC;
8665 }
8666
8667 // (zext (shl (zext x), cst)) -> (shl (zext x), cst)
8668 if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL) &&
8669 isa<ConstantSDNode>(N0.getOperand(1)) &&
8670 N0.getOperand(0).getOpcode() == ISD::ZERO_EXTEND &&
8671 N0.hasOneUse()) {
8672 SDValue ShAmt = N0.getOperand(1);
8673 unsigned ShAmtVal = cast<ConstantSDNode>(ShAmt)->getZExtValue();
8674 if (N0.getOpcode() == ISD::SHL) {
8675 SDValue InnerZExt = N0.getOperand(0);
8676 // If the original shl may be shifting out bits, do not perform this
8677 // transformation.
8678 unsigned KnownZeroBits = InnerZExt.getValueSizeInBits() -
8679 InnerZExt.getOperand(0).getValueSizeInBits();
8680 if (ShAmtVal > KnownZeroBits)
8681 return SDValue();
8682 }
8683
8684 SDLoc DL(N);
8685
8686 // Ensure that the shift amount is wide enough for the shifted value.
8687 if (VT.getSizeInBits() >= 256)
8688 ShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShAmt);
8689
8690 return DAG.getNode(N0.getOpcode(), DL, VT,
8691 DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)),
8692 ShAmt);
8693 }
8694
8695 if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
8696 return NewVSel;
8697
8698 return SDValue();
8699}
8700
8701SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
8702 SDValue N0 = N->getOperand(0);
8703 EVT VT = N->getValueType(0);
8704
8705 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes,
8706 LegalOperations))
8707 return SDValue(Res, 0);
8708
8709 // fold (aext (aext x)) -> (aext x)
8710 // fold (aext (zext x)) -> (zext x)
8711 // fold (aext (sext x)) -> (sext x)
8712 if (N0.getOpcode() == ISD::ANY_EXTEND ||
8713 N0.getOpcode() == ISD::ZERO_EXTEND ||
8714 N0.getOpcode() == ISD::SIGN_EXTEND)
8715 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, N0.getOperand(0));
8716
8717 // fold (aext (truncate (load x))) -> (aext (smaller load x))
8718 // fold (aext (truncate (srl (load x), c))) -> (aext (small load (x+c/n)))
8719 if (N0.getOpcode() == ISD::TRUNCATE) {
8720 if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) {
8721 SDNode *oye = N0.getOperand(0).getNode();
8722 if (NarrowLoad.getNode() != N0.getNode()) {
8723 CombineTo(N0.getNode(), NarrowLoad);
8724 // CombineTo deleted the truncate, if needed, but not what's under it.
8725 AddToWorklist(oye);
8726 }
8727 return SDValue(N, 0); // Return N so it doesn't get rechecked!
8728 }
8729 }
8730
8731 // fold (aext (truncate x))
8732 if (N0.getOpcode() == ISD::TRUNCATE)
8733 return DAG.getAnyExtOrTrunc(N0.getOperand(0), SDLoc(N), VT);
8734
8735 // Fold (aext (and (trunc x), cst)) -> (and x, cst)
8736 // if the trunc is not free.
8737 if (N0.getOpcode() == ISD::AND &&
8738 N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
8739 N0.getOperand(1).getOpcode() == ISD::Constant &&
8740 !TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(),
8741 N0.getValueType())) {
8742 SDLoc DL(N);
8743 SDValue X = N0.getOperand(0).getOperand(0);
8744 X = DAG.getAnyExtOrTrunc(X, DL, VT);
8745 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
8746 Mask = Mask.zext(VT.getSizeInBits());
8747 return DAG.getNode(ISD::AND, DL, VT,
8748 X, DAG.getConstant(Mask, DL, VT));
8749 }
8750
8751 // fold (aext (load x)) -> (aext (truncate (extload x)))
8752 // None of the supported targets knows how to perform load and any_ext
8753 // on vectors in one instruction. We only perform this transformation on
8754 // scalars.
8755 if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() &&
8756 ISD::isUNINDEXEDLoad(N0.getNode()) &&
8757 TLI.isLoadExtLegal(ISD::EXTLOAD, VT, N0.getValueType())) {
8758 bool DoXform = true;
8759 SmallVector<SDNode*, 4> SetCCs;
8760 if (!N0.hasOneUse())
8761 DoXform = ExtendUsesToFormExtLoad(VT, N, N0, ISD::ANY_EXTEND, SetCCs,
8762 TLI);
8763 if (DoXform) {
8764 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
8765 SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT,
8766 LN0->getChain(),
8767 LN0->getBasePtr(), N0.getValueType(),
8768 LN0->getMemOperand());
8769 ExtendSetCCUses(SetCCs, N0, ExtLoad, ISD::ANY_EXTEND);
8770 // If the load value is used only by N, replace it via CombineTo N.
8771 bool NoReplaceTrunc = N0.hasOneUse();
8772 CombineTo(N, ExtLoad);
8773 if (NoReplaceTrunc) {
8774 DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
8775 } else {
8776 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
8777 N0.getValueType(), ExtLoad);
8778 CombineTo(LN0, Trunc, ExtLoad.getValue(1));
8779 }
8780 return SDValue(N, 0); // Return N so it doesn't get rechecked!
8781 }
8782 }
8783
8784 // fold (aext (zextload x)) -> (aext (truncate (zextload x)))
8785 // fold (aext (sextload x)) -> (aext (truncate (sextload x)))
8786 // fold (aext ( extload x)) -> (aext (truncate (extload x)))
8787 if (N0.getOpcode() == ISD::LOAD && !ISD::isNON_EXTLoad(N0.getNode()) &&
8788 ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) {
8789 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
8790 ISD::LoadExtType ExtType = LN0->getExtensionType();
8791 EVT MemVT = LN0->getMemoryVT();
8792 if (!LegalOperations || TLI.isLoadExtLegal(ExtType, VT, MemVT)) {
8793 SDValue ExtLoad = DAG.getExtLoad(ExtType, SDLoc(N),
8794 VT, LN0->getChain(), LN0->getBasePtr(),
8795 MemVT, LN0->getMemOperand());
8796 CombineTo(N, ExtLoad);
8797 DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
8798 return SDValue(N, 0); // Return N so it doesn't get rechecked!
8799 }
8800 }
8801
8802 if (N0.getOpcode() == ISD::SETCC) {
8803 // For vectors:
8804 // aext(setcc) -> vsetcc
8805 // aext(setcc) -> truncate(vsetcc)
8806 // aext(setcc) -> aext(vsetcc)
8807 // Only do this before legalize for now.
8808 if (VT.isVector() && !LegalOperations) {
8809 EVT N00VT = N0.getOperand(0).getValueType();
8810 if (getSetCCResultType(N00VT) == N0.getValueType())
8811 return SDValue();
8812
8813 // We know that the # elements of the results is the same as the
8814 // # elements of the compare (and the # elements of the compare result
8815 // for that matter). Check to see that they are the same size. If so,
8816 // we know that the element size of the sext'd result matches the
8817 // element size of the compare operands.
8818 if (VT.getSizeInBits() == N00VT.getSizeInBits())
8819 return DAG.getSetCC(SDLoc(N), VT, N0.getOperand(0),
8820 N0.getOperand(1),
8821 cast<CondCodeSDNode>(N0.getOperand(2))->get());
8822 // If the desired elements are smaller or larger than the source
8823 // elements we can use a matching integer vector type and then
8824 // truncate/any extend
8825 else {
8826 EVT MatchingVectorType = N00VT.changeVectorElementTypeToInteger();
8827 SDValue VsetCC =
8828 DAG.getSetCC(SDLoc(N), MatchingVectorType, N0.getOperand(0),
8829 N0.getOperand(1),
8830 cast<CondCodeSDNode>(N0.getOperand(2))->get());
8831 return DAG.getAnyExtOrTrunc(VsetCC, SDLoc(N), VT);
8832 }
8833 }
8834
8835 // aext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
8836 SDLoc DL(N);
8837 if (SDValue SCC = SimplifySelectCC(
8838 DL, N0.getOperand(0), N0.getOperand(1), DAG.getConstant(1, DL, VT),
8839 DAG.getConstant(0, DL, VT),
8840 cast<CondCodeSDNode>(N0.getOperand(2))->get(), true))
8841 return SCC;
8842 }
8843
8844 return SDValue();
8845}
8846
8847SDValue DAGCombiner::visitAssertExt(SDNode *N) {
8848 unsigned Opcode = N->getOpcode();
8849 SDValue N0 = N->getOperand(0);
8850 SDValue N1 = N->getOperand(1);
8851 EVT AssertVT = cast<VTSDNode>(N1)->getVT();
8852
8853 // fold (assert?ext (assert?ext x, vt), vt) -> (assert?ext x, vt)
8854 if (N0.getOpcode() == Opcode &&
8855 AssertVT == cast<VTSDNode>(N0.getOperand(1))->getVT())
8856 return N0;
8857
8858 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
8859 N0.getOperand(0).getOpcode() == Opcode) {
8860 // We have an assert, truncate, assert sandwich. Make one stronger assert
8861 // by asserting on the smallest asserted type to the larger source type.
8862 // This eliminates the later assert:
8863 // assert (trunc (assert X, i8) to iN), i1 --> trunc (assert X, i1) to iN
8864 // assert (trunc (assert X, i1) to iN), i8 --> trunc (assert X, i1) to iN
8865 SDValue BigA = N0.getOperand(0);
8866 EVT BigA_AssertVT = cast<VTSDNode>(BigA.getOperand(1))->getVT();
8867 assert(BigA_AssertVT.bitsLE(N0.getValueType()) &&(static_cast <bool> (BigA_AssertVT.bitsLE(N0.getValueType
()) && "Asserting zero/sign-extended bits to a type larger than the "
"truncated destination does not provide information") ? void
(0) : __assert_fail ("BigA_AssertVT.bitsLE(N0.getValueType()) && \"Asserting zero/sign-extended bits to a type larger than the \" \"truncated destination does not provide information\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 8869, __extension__ __PRETTY_FUNCTION__))
8868 "Asserting zero/sign-extended bits to a type larger than the "(static_cast <bool> (BigA_AssertVT.bitsLE(N0.getValueType
()) && "Asserting zero/sign-extended bits to a type larger than the "
"truncated destination does not provide information") ? void
(0) : __assert_fail ("BigA_AssertVT.bitsLE(N0.getValueType()) && \"Asserting zero/sign-extended bits to a type larger than the \" \"truncated destination does not provide information\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 8869, __extension__ __PRETTY_FUNCTION__))
8869 "truncated destination does not provide information")(static_cast <bool> (BigA_AssertVT.bitsLE(N0.getValueType
()) && "Asserting zero/sign-extended bits to a type larger than the "
"truncated destination does not provide information") ? void
(0) : __assert_fail ("BigA_AssertVT.bitsLE(N0.getValueType()) && \"Asserting zero/sign-extended bits to a type larger than the \" \"truncated destination does not provide information\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 8869, __extension__ __PRETTY_FUNCTION__))
;
8870
8871 SDLoc DL(N);
8872 EVT MinAssertVT = AssertVT.bitsLT(BigA_AssertVT) ? AssertVT : BigA_AssertVT;
8873 SDValue MinAssertVTVal = DAG.getValueType(MinAssertVT);
8874 SDValue NewAssert = DAG.getNode(Opcode, DL, BigA.getValueType(),
8875 BigA.getOperand(0), MinAssertVTVal);
8876 return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewAssert);
8877 }
8878
8879 return SDValue();
8880}
8881
8882/// If the result of a wider load is shifted to right of N bits and then
8883/// truncated to a narrower type and where N is a multiple of number of bits of
8884/// the narrower type, transform it to a narrower load from address + N / num of
8885/// bits of new type. Also narrow the load if the result is masked with an AND
8886/// to effectively produce a smaller type. If the result is to be extended, also
8887/// fold the extension to form a extending load.
8888SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
8889 unsigned Opc = N->getOpcode();
8890
8891 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
8892 SDValue N0 = N->getOperand(0);
8893 EVT VT = N->getValueType(0);
8894 EVT ExtVT = VT;
8895
8896 // This transformation isn't valid for vector loads.
8897 if (VT.isVector())
8898 return SDValue();
8899
8900 // Special case: SIGN_EXTEND_INREG is basically truncating to ExtVT then
8901 // extended to VT.
8902 if (Opc == ISD::SIGN_EXTEND_INREG) {
8903 ExtType = ISD::SEXTLOAD;
8904 ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT();
8905 } else if (Opc == ISD::SRL) {
8906 // Another special-case: SRL is basically zero-extending a narrower value,
8907 // or it maybe shifting a higher subword, half or byte into the lowest
8908 // bits.
8909 ExtType = ISD::ZEXTLOAD;
8910 N0 = SDValue(N, 0);
8911
8912 auto *LN0 = dyn_cast<LoadSDNode>(N0.getOperand(0));
8913 auto *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1));
8914 if (!N01 || !LN0)
8915 return SDValue();
8916
8917 uint64_t ShiftAmt = N01->getZExtValue();
8918 uint64_t MemoryWidth = LN0->getMemoryVT().getSizeInBits();
8919 if (LN0->getExtensionType() != ISD::SEXTLOAD && MemoryWidth > ShiftAmt)
8920 ExtVT = EVT::getIntegerVT(*DAG.getContext(), MemoryWidth - ShiftAmt);
8921 else
8922 ExtVT = EVT::getIntegerVT(*DAG.getContext(),
8923 VT.getSizeInBits() - ShiftAmt);
8924 } else if (Opc == ISD::AND) {
8925 // An AND with a constant mask is the same as a truncate + zero-extend.
8926 auto AndC = dyn_cast<ConstantSDNode>(N->getOperand(1));
8927 if (!AndC || !AndC->getAPIntValue().isMask())
8928 return SDValue();
8929
8930 unsigned ActiveBits = AndC->getAPIntValue().countTrailingOnes();
8931 ExtType = ISD::ZEXTLOAD;
8932 ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
8933 }
8934
8935 unsigned ShAmt = 0;
8936 if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
8937 SDValue SRL = N0;
8938 if (auto *ConstShift = dyn_cast<ConstantSDNode>(SRL.getOperand(1))) {
8939 ShAmt = ConstShift->getZExtValue();
8940 unsigned EVTBits = ExtVT.getSizeInBits();
8941 // Is the shift amount a multiple of size of VT?
8942 if ((ShAmt & (EVTBits-1)) == 0) {
8943 N0 = N0.getOperand(0);
8944 // Is the load width a multiple of size of VT?
8945 if ((N0.getValueSizeInBits() & (EVTBits-1)) != 0)
8946 return SDValue();
8947 }
8948
8949 // At this point, we must have a load or else we can't do the transform.
8950 if (!isa<LoadSDNode>(N0)) return SDValue();
8951
8952 auto *LN0 = cast<LoadSDNode>(N0);
8953
8954 // Because a SRL must be assumed to *need* to zero-extend the high bits
8955 // (as opposed to anyext the high bits), we can't combine the zextload
8956 // lowering of SRL and an sextload.
8957 if (LN0->getExtensionType() == ISD::SEXTLOAD)
8958 return SDValue();
8959
8960 // If the shift amount is larger than the input type then we're not
8961 // accessing any of the loaded bytes. If the load was a zextload/extload
8962 // then the result of the shift+trunc is zero/undef (handled elsewhere).
8963 if (ShAmt >= LN0->getMemoryVT().getSizeInBits())
8964 return SDValue();
8965
8966 // If the SRL is only used by a masking AND, we may be able to adjust
8967 // the ExtVT to make the AND redundant.
8968 SDNode *Mask = *(SRL->use_begin());
8969 if (Mask->getOpcode() == ISD::AND &&
8970 isa<ConstantSDNode>(Mask->getOperand(1))) {
8971 const APInt &ShiftMask =
8972 cast<ConstantSDNode>(Mask->getOperand(1))->getAPIntValue();
8973 if (ShiftMask.isMask()) {
8974 EVT MaskedVT = EVT::getIntegerVT(*DAG.getContext(),
8975 ShiftMask.countTrailingOnes());
8976 // If the mask is smaller, recompute the type.
8977 if ((ExtVT.getSizeInBits() > MaskedVT.getSizeInBits()) &&
8978 TLI.isLoadExtLegal(ExtType, N0.getValueType(), MaskedVT))
8979 ExtVT = MaskedVT;
8980 }
8981 }
8982 }
8983 }
8984
8985 // If the load is shifted left (and the result isn't shifted back right),
8986 // we can fold the truncate through the shift.
8987 unsigned ShLeftAmt = 0;
8988 if (ShAmt == 0 && N0.getOpcode() == ISD::SHL && N0.hasOneUse() &&
8989 ExtVT == VT && TLI.isNarrowingProfitable(N0.getValueType(), VT)) {
8990 if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
8991 ShLeftAmt = N01->getZExtValue();
8992 N0 = N0.getOperand(0);
8993 }
8994 }
8995
8996 // If we haven't found a load, we can't narrow it.
8997 if (!isa<LoadSDNode>(N0))
8998 return SDValue();
8999
9000 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
9001 if (!isLegalNarrowLdSt(LN0, ExtType, ExtVT, ShAmt))
9002 return SDValue();
9003
9004 // For big endian targets, we need to adjust the offset to the pointer to
9005 // load the correct bytes.
9006 if (DAG.getDataLayout().isBigEndian()) {
9007 unsigned LVTStoreBits = LN0->getMemoryVT().getStoreSizeInBits();
9008 unsigned EVTStoreBits = ExtVT.getStoreSizeInBits();
9009 ShAmt = LVTStoreBits - EVTStoreBits - ShAmt;
9010 }
9011
9012 EVT PtrType = N0.getOperand(1).getValueType();
9013 uint64_t PtrOff = ShAmt / 8;
9014 unsigned NewAlign = MinAlign(LN0->getAlignment(), PtrOff);
9015 SDLoc DL(LN0);
9016 // The original load itself didn't wrap, so an offset within it doesn't.
9017 SDNodeFlags Flags;
9018 Flags.setNoUnsignedWrap(true);
9019 SDValue NewPtr = DAG.getNode(ISD::ADD, DL,
9020 PtrType, LN0->getBasePtr(),
9021 DAG.getConstant(PtrOff, DL, PtrType),
9022 Flags);
9023 AddToWorklist(NewPtr.getNode());
9024
9025 SDValue Load;
9026 if (ExtType == ISD::NON_EXTLOAD)
9027 Load = DAG.getLoad(VT, SDLoc(N0), LN0->getChain(), NewPtr,
9028 LN0->getPointerInfo().getWithOffset(PtrOff), NewAlign,
9029 LN0->getMemOperand()->getFlags(), LN0->getAAInfo());
9030 else
9031 Load = DAG.getExtLoad(ExtType, SDLoc(N0), VT, LN0->getChain(), NewPtr,
9032 LN0->getPointerInfo().getWithOffset(PtrOff), ExtVT,
9033 NewAlign, LN0->getMemOperand()->getFlags(),
9034 LN0->getAAInfo());
9035
9036 // Replace the old load's chain with the new load's chain.
9037 WorklistRemover DeadNodes(*this);
9038 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
9039
9040 // Shift the result left, if we've swallowed a left shift.
9041 SDValue Result = Load;
9042 if (ShLeftAmt != 0) {
9043 EVT ShImmTy = getShiftAmountTy(Result.getValueType());
9044 if (!isUIntN(ShImmTy.getSizeInBits(), ShLeftAmt))
9045 ShImmTy = VT;
9046 // If the shift amount is as large as the result size (but, presumably,
9047 // no larger than the source) then the useful bits of the result are
9048 // zero; we can't simply return the shortened shift, because the result
9049 // of that operation is undefined.
9050 SDLoc DL(N0);
9051 if (ShLeftAmt >= VT.getSizeInBits())
9052 Result = DAG.getConstant(0, DL, VT);
9053 else
9054 Result = DAG.getNode(ISD::SHL, DL, VT,
9055 Result, DAG.getConstant(ShLeftAmt, DL, ShImmTy));
9056 }
9057
9058 // Return the new loaded value.
9059 return Result;
9060}
9061
9062SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
9063 SDValue N0 = N->getOperand(0);
9064 SDValue N1 = N->getOperand(1);
9065 EVT VT = N->getValueType(0);
9066 EVT EVT = cast<VTSDNode>(N1)->getVT();
9067 unsigned VTBits = VT.getScalarSizeInBits();
9068 unsigned EVTBits = EVT.getScalarSizeInBits();
9069
9070 if (N0.isUndef())
9071 return DAG.getUNDEF(VT);
9072
9073 // fold (sext_in_reg c1) -> c1
9074 if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
9075 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, N0, N1);
9076
9077 // If the input is already sign extended, just drop the extension.
9078 if (DAG.ComputeNumSignBits(N0) >= VTBits-EVTBits+1)
9079 return N0;
9080
9081 // fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt2
9082 if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
9083 EVT.bitsLT(cast<VTSDNode>(N0.getOperand(1))->getVT()))
9084 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT,
9085 N0.getOperand(0), N1);
9086
9087 // fold (sext_in_reg (sext x)) -> (sext x)
9088 // fold (sext_in_reg (aext x)) -> (sext x)
9089 // if x is small enough.
9090 if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) {
9091 SDValue N00 = N0.getOperand(0);
9092 if (N00.getScalarValueSizeInBits() <= EVTBits &&
9093 (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT)))
9094 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00, N1);
9095 }
9096
9097 // fold (sext_in_reg (*_extend_vector_inreg x)) -> (sext_vector_inreg x)
9098 if ((N0.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG ||
9099 N0.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG ||
9100 N0.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) &&
9101 N0.getOperand(0).getScalarValueSizeInBits() == EVTBits) {
9102 if (!LegalOperations ||
9103 TLI.isOperationLegal(ISD::SIGN_EXTEND_VECTOR_INREG, VT))
9104 return DAG.getSignExtendVectorInReg(N0.getOperand(0), SDLoc(N), VT);
9105 }
9106
9107 // fold (sext_in_reg (zext x)) -> (sext x)
9108 // iff we are extending the source sign bit.
9109 if (N0.getOpcode() == ISD::ZERO_EXTEND) {
9110 SDValue N00 = N0.getOperand(0);
9111 if (N00.getScalarValueSizeInBits() == EVTBits &&
9112 (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT)))
9113 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00, N1);
9114 }
9115
9116 // fold (sext_in_reg x) -> (zext_in_reg x) if the sign bit is known zero.
9117 if (DAG.MaskedValueIsZero(N0, APInt::getOneBitSet(VTBits, EVTBits - 1)))
9118 return DAG.getZeroExtendInReg(N0, SDLoc(N), EVT.getScalarType());
9119
9120 // fold operands of sext_in_reg based on knowledge that the top bits are not
9121 // demanded.
9122 if (SimplifyDemandedBits(SDValue(N, 0)))
9123 return SDValue(N, 0);
9124
9125 // fold (sext_in_reg (load x)) -> (smaller sextload x)
9126 // fold (sext_in_reg (srl (load x), c)) -> (smaller sextload (x+c/evtbits))
9127 if (SDValue NarrowLoad = ReduceLoadWidth(N))
9128 return NarrowLoad;
9129
9130 // fold (sext_in_reg (srl X, 24), i8) -> (sra X, 24)
9131 // fold (sext_in_reg (srl X, 23), i8) -> (sra X, 23) iff possible.
9132 // We already fold "(sext_in_reg (srl X, 25), i8) -> srl X, 25" above.
9133 if (N0.getOpcode() == ISD::SRL) {
9134 if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)))
9135 if (ShAmt->getZExtValue()+EVTBits <= VTBits) {
9136 // We can turn this into an SRA iff the input to the SRL is already sign
9137 // extended enough.
9138 unsigned InSignBits = DAG.ComputeNumSignBits(N0.getOperand(0));
9139 if (VTBits-(ShAmt->getZExtValue()+EVTBits) < InSignBits)
9140 return DAG.getNode(ISD::SRA, SDLoc(N), VT,
9141 N0.getOperand(0), N0.getOperand(1));
9142 }
9143 }
9144
9145 // fold (sext_inreg (extload x)) -> (sextload x)
9146 // If sextload is not supported by target, we can only do the combine when
9147 // load has one use. Doing otherwise can block folding the extload with other
9148 // extends that the target does support.
9149 if (ISD::isEXTLoad(N0.getNode()) &&
9150 ISD::isUNINDEXEDLoad(N0.getNode()) &&
9151 EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
9152 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile() &&
9153 N0.hasOneUse()) ||
9154 TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, EVT))) {
9155 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
9156 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT,
9157 LN0->getChain(),
9158 LN0->getBasePtr(), EVT,
9159 LN0->getMemOperand());
9160 CombineTo(N, ExtLoad);
9161 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
9162 AddToWorklist(ExtLoad.getNode());
9163 return SDValue(N, 0); // Return N so it doesn't get rechecked!
9164 }
9165 // fold (sext_inreg (zextload x)) -> (sextload x) iff load has one use
9166 if (ISD::isZEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
9167 N0.hasOneUse() &&
9168 EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
9169 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
9170 TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, EVT))) {
9171 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
9172 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT,
9173 LN0->getChain(),
9174 LN0->getBasePtr(), EVT,
9175 LN0->getMemOperand());
9176 CombineTo(N, ExtLoad);
9177 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
9178 return SDValue(N, 0); // Return N so it doesn't get rechecked!
9179 }
9180
9181 // Form (sext_inreg (bswap >> 16)) or (sext_inreg (rotl (bswap) 16))
9182 if (EVTBits <= 16 && N0.getOpcode() == ISD::OR) {
9183 if (SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0),
9184 N0.getOperand(1), false))
9185 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT,
9186 BSwap, N1);
9187 }
9188
9189 return SDValue();
9190}
9191
9192SDValue DAGCombiner::visitSIGN_EXTEND_VECTOR_INREG(SDNode *N) {
9193 SDValue N0 = N->getOperand(0);
9194 EVT VT = N->getValueType(0);
9195
9196 if (N0.isUndef())
9197 return DAG.getUNDEF(VT);
9198
9199 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes,
9200 LegalOperations))
9201 return SDValue(Res, 0);
9202
9203 return SDValue();
9204}
9205
9206SDValue DAGCombiner::visitZERO_EXTEND_VECTOR_INREG(SDNode *N) {
9207 SDValue N0 = N->getOperand(0);
9208 EVT VT = N->getValueType(0);
9209
9210 if (N0.isUndef())
9211 return DAG.getUNDEF(VT);
9212
9213 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes,
9214 LegalOperations))
9215 return SDValue(Res, 0);
9216
9217 return SDValue();
9218}
9219
9220SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
9221 SDValue N0 = N->getOperand(0);
9222 EVT VT = N->getValueType(0);
9223 bool isLE = DAG.getDataLayout().isLittleEndian();
9224
9225 // noop truncate
9226 if (N0.getValueType() == N->getValueType(0))
9227 return N0;
9228
9229 // fold (truncate (truncate x)) -> (truncate x)
9230 if (N0.getOpcode() == ISD::TRUNCATE)
9231 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0.getOperand(0));
9232
9233 // fold (truncate c1) -> c1
9234 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) {
9235 SDValue C = DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0);
9236 if (C.getNode() != N)
9237 return C;
9238 }
9239
9240 // fold (truncate (ext x)) -> (ext x) or (truncate x) or x
9241 if (N0.getOpcode() == ISD::ZERO_EXTEND ||
9242 N0.getOpcode() == ISD::SIGN_EXTEND ||
9243 N0.getOpcode() == ISD::ANY_EXTEND) {
9244 // if the source is smaller than the dest, we still need an extend.
9245 if (N0.getOperand(0).getValueType().bitsLT(VT))
9246 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, N0.getOperand(0));
9247 // if the source is larger than the dest, than we just need the truncate.
9248 if (N0.getOperand(0).getValueType().bitsGT(VT))
9249 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0.getOperand(0));
9250 // if the source and dest are the same type, we can drop both the extend
9251 // and the truncate.
9252 return N0.getOperand(0);
9253 }
9254
9255 // If this is anyext(trunc), don't fold it, allow ourselves to be folded.
9256 if (N->hasOneUse() && (N->use_begin()->getOpcode() == ISD::ANY_EXTEND))
9257 return SDValue();
9258
9259 // Fold extract-and-trunc into a narrow extract. For example:
9260 // i64 x = EXTRACT_VECTOR_ELT(v2i64 val, i32 1)
9261 // i32 y = TRUNCATE(i64 x)
9262 // -- becomes --
9263 // v16i8 b = BITCAST (v2i64 val)
9264 // i8 x = EXTRACT_VECTOR_ELT(v16i8 b, i32 8)
9265 //
9266 // Note: We only run this optimization after type legalization (which often
9267 // creates this pattern) and before operation legalization after which
9268 // we need to be more careful about the vector instructions that we generate.
9269 if (N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
9270 LegalTypes && !LegalOperations && N0->hasOneUse() && VT != MVT::i1) {
9271 EVT VecTy = N0.getOperand(0).getValueType();
9272 EVT ExTy = N0.getValueType();
9273 EVT TrTy = N->getValueType(0);
9274
9275 unsigned NumElem = VecTy.getVectorNumElements();
9276 unsigned SizeRatio = ExTy.getSizeInBits()/TrTy.getSizeInBits();
9277
9278 EVT NVT = EVT::getVectorVT(*DAG.getContext(), TrTy, SizeRatio * NumElem);
9279 assert(NVT.getSizeInBits() == VecTy.getSizeInBits() && "Invalid Size")(static_cast <bool> (NVT.getSizeInBits() == VecTy.getSizeInBits
() && "Invalid Size") ? void (0) : __assert_fail ("NVT.getSizeInBits() == VecTy.getSizeInBits() && \"Invalid Size\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 9279, __extension__ __PRETTY_FUNCTION__))
;
9280
9281 SDValue EltNo = N0->getOperand(1);
9282 if (isa<ConstantSDNode>(EltNo) && isTypeLegal(NVT)) {
9283 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
9284 EVT IndexTy = TLI.getVectorIdxTy(DAG.getDataLayout());
9285 int Index = isLE ? (Elt*SizeRatio) : (Elt*SizeRatio + (SizeRatio-1));
9286
9287 SDLoc DL(N);
9288 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TrTy,
9289 DAG.getBitcast(NVT, N0.getOperand(0)),
9290 DAG.getConstant(Index, DL, IndexTy));
9291 }
9292 }
9293
9294 // trunc (select c, a, b) -> select c, (trunc a), (trunc b)
9295 if (N0.getOpcode() == ISD::SELECT && N0.hasOneUse()) {
9296 EVT SrcVT = N0.getValueType();
9297 if ((!LegalOperations || TLI.isOperationLegal(ISD::SELECT, SrcVT)) &&
9298 TLI.isTruncateFree(SrcVT, VT)) {
9299 SDLoc SL(N0);
9300 SDValue Cond = N0.getOperand(0);
9301 SDValue TruncOp0 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(1));
9302 SDValue TruncOp1 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(2));
9303 return DAG.getNode(ISD::SELECT, SDLoc(N), VT, Cond, TruncOp0, TruncOp1);
9304 }
9305 }
9306
9307 // trunc (shl x, K) -> shl (trunc x), K => K < VT.getScalarSizeInBits()
9308 if (N0.getOpcode() == ISD::SHL && N0.hasOneUse() &&
9309 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::SHL, VT)) &&
9310 TLI.isTypeDesirableForOp(ISD::SHL, VT)) {
9311 SDValue Amt = N0.getOperand(1);
9312 KnownBits Known;
9313 DAG.computeKnownBits(Amt, Known);
9314 unsigned Size = VT.getScalarSizeInBits();
9315 if (Known.getBitWidth() - Known.countMinLeadingZeros() <= Log2_32(Size)) {
9316 SDLoc SL(N);
9317 EVT AmtVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
9318
9319 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(0));
9320 if (AmtVT != Amt.getValueType()) {
9321 Amt = DAG.getZExtOrTrunc(Amt, SL, AmtVT);
9322 AddToWorklist(Amt.getNode());
9323 }
9324 return DAG.getNode(ISD::SHL, SL, VT, Trunc, Amt);
9325 }
9326 }
9327
9328 // Fold a series of buildvector, bitcast, and truncate if possible.
9329 // For example fold
9330 // (2xi32 trunc (bitcast ((4xi32)buildvector x, x, y, y) 2xi64)) to
9331 // (2xi32 (buildvector x, y)).
9332 if (Level == AfterLegalizeVectorOps && VT.isVector() &&
9333 N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() &&
9334 N0.getOperand(0).getOpcode() == ISD::BUILD_VECTOR &&
9335 N0.getOperand(0).hasOneUse()) {
9336 SDValue BuildVect = N0.getOperand(0);
9337 EVT BuildVectEltTy = BuildVect.getValueType().getVectorElementType();
9338 EVT TruncVecEltTy = VT.getVectorElementType();
9339
9340 // Check that the element types match.
9341 if (BuildVectEltTy == TruncVecEltTy) {
9342 // Now we only need to compute the offset of the truncated elements.
9343 unsigned BuildVecNumElts = BuildVect.getNumOperands();
9344 unsigned TruncVecNumElts = VT.getVectorNumElements();
9345 unsigned TruncEltOffset = BuildVecNumElts / TruncVecNumElts;
9346
9347 assert((BuildVecNumElts % TruncVecNumElts) == 0 &&(static_cast <bool> ((BuildVecNumElts % TruncVecNumElts
) == 0 && "Invalid number of elements") ? void (0) : __assert_fail
("(BuildVecNumElts % TruncVecNumElts) == 0 && \"Invalid number of elements\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 9348, __extension__ __PRETTY_FUNCTION__))
9348 "Invalid number of elements")(static_cast <bool> ((BuildVecNumElts % TruncVecNumElts
) == 0 && "Invalid number of elements") ? void (0) : __assert_fail
("(BuildVecNumElts % TruncVecNumElts) == 0 && \"Invalid number of elements\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 9348, __extension__ __PRETTY_FUNCTION__))
;
9349
9350 SmallVector<SDValue, 8> Opnds;
9351 for (unsigned i = 0, e = BuildVecNumElts; i != e; i += TruncEltOffset)
9352 Opnds.push_back(BuildVect.getOperand(i));
9353
9354 return DAG.getBuildVector(VT, SDLoc(N), Opnds);
9355 }
9356 }
9357
9358 // See if we can simplify the input to this truncate through knowledge that
9359 // only the low bits are being used.
9360 // For example "trunc (or (shl x, 8), y)" // -> trunc y
9361 // Currently we only perform this optimization on scalars because vectors
9362 // may have different active low bits.
9363 if (!VT.isVector()) {
9364 APInt Mask =
9365 APInt::getLowBitsSet(N0.getValueSizeInBits(), VT.getSizeInBits());
9366 if (SDValue Shorter = DAG.GetDemandedBits(N0, Mask))
9367 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Shorter);
9368 }
9369
9370 // fold (truncate (load x)) -> (smaller load x)
9371 // fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits))
9372 if (!LegalTypes || TLI.isTypeDesirableForOp(N0.getOpcode(), VT)) {
9373 if (SDValue Reduced = ReduceLoadWidth(N))
9374 return Reduced;
9375
9376 // Handle the case where the load remains an extending load even
9377 // after truncation.
9378 if (N0.hasOneUse() && ISD::isUNINDEXEDLoad(N0.getNode())) {
9379 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
9380 if (!LN0->isVolatile() &&
9381 LN0->getMemoryVT().getStoreSizeInBits() < VT.getSizeInBits()) {
9382 SDValue NewLoad = DAG.getExtLoad(LN0->getExtensionType(), SDLoc(LN0),
9383 VT, LN0->getChain(), LN0->getBasePtr(),
9384 LN0->getMemoryVT(),
9385 LN0->getMemOperand());
9386 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLoad.getValue(1));
9387 return NewLoad;
9388 }
9389 }
9390 }
9391
9392 // fold (trunc (concat ... x ...)) -> (concat ..., (trunc x), ...)),
9393 // where ... are all 'undef'.
9394 if (N0.getOpcode() == ISD::CONCAT_VECTORS && !LegalTypes) {
9395 SmallVector<EVT, 8> VTs;
9396 SDValue V;
9397 unsigned Idx = 0;
9398 unsigned NumDefs = 0;
9399
9400 for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) {
9401 SDValue X = N0.getOperand(i);
9402 if (!X.isUndef()) {
9403 V = X;
9404 Idx = i;
9405 NumDefs++;
9406 }
9407 // Stop if more than one members are non-undef.
9408 if (NumDefs > 1)
9409 break;
9410 VTs.push_back(EVT::getVectorVT(*DAG.getContext(),
9411 VT.getVectorElementType(),
9412 X.getValueType().getVectorNumElements()));
9413 }
9414
9415 if (NumDefs == 0)
9416 return DAG.getUNDEF(VT);
9417
9418 if (NumDefs == 1) {
9419 assert(V.getNode() && "The single defined operand is empty!")(static_cast <bool> (V.getNode() && "The single defined operand is empty!"
) ? void (0) : __assert_fail ("V.getNode() && \"The single defined operand is empty!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 9419, __extension__ __PRETTY_FUNCTION__))
;
9420 SmallVector<SDValue, 8> Opnds;
9421 for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
9422 if (i != Idx) {
9423 Opnds.push_back(DAG.getUNDEF(VTs[i]));
9424 continue;
9425 }
9426 SDValue NV = DAG.getNode(ISD::TRUNCATE, SDLoc(V), VTs[i], V);
9427 AddToWorklist(NV.getNode());
9428 Opnds.push_back(NV);
9429 }
9430 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Opnds);
9431 }
9432 }
9433
9434 // Fold truncate of a bitcast of a vector to an extract of the low vector
9435 // element.
9436 //
9437 // e.g. trunc (i64 (bitcast v2i32:x)) -> extract_vector_elt v2i32:x, idx
9438 if (N0.getOpcode() == ISD::BITCAST && !VT.isVector()) {
9439 SDValue VecSrc = N0.getOperand(0);
9440 EVT SrcVT = VecSrc.getValueType();
9441 if (SrcVT.isVector() && SrcVT.getScalarType() == VT &&
9442 (!LegalOperations ||
9443 TLI.isOperationLegal(ISD::EXTRACT_VECTOR_ELT, SrcVT))) {
9444 SDLoc SL(N);
9445
9446 EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
9447 unsigned Idx = isLE ? 0 : SrcVT.getVectorNumElements() - 1;
9448 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, VT,
9449 VecSrc, DAG.getConstant(Idx, SL, IdxVT));
9450 }
9451 }
9452
9453 // Simplify the operands using demanded-bits information.
9454 if (!VT.isVector() &&
9455 SimplifyDemandedBits(SDValue(N, 0)))
9456 return SDValue(N, 0);
9457
9458 // (trunc adde(X, Y, Carry)) -> (adde trunc(X), trunc(Y), Carry)
9459 // (trunc addcarry(X, Y, Carry)) -> (addcarry trunc(X), trunc(Y), Carry)
9460 // When the adde's carry is not used.
9461 if ((N0.getOpcode() == ISD::ADDE || N0.getOpcode() == ISD::ADDCARRY) &&
9462 N0.hasOneUse() && !N0.getNode()->hasAnyUseOfValue(1) &&
9463 (!LegalOperations || TLI.isOperationLegal(N0.getOpcode(), VT))) {
9464 SDLoc SL(N);
9465 auto X = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(0));
9466 auto Y = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(1));
9467 auto VTs = DAG.getVTList(VT, N0->getValueType(1));
9468 return DAG.getNode(N0.getOpcode(), SL, VTs, X, Y, N0.getOperand(2));
9469 }
9470
9471 // fold (truncate (extract_subvector(ext x))) ->
9472 // (extract_subvector x)
9473 // TODO: This can be generalized to cover cases where the truncate and extract
9474 // do not fully cancel each other out.
9475 if (!LegalTypes && N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
9476 SDValue N00 = N0.getOperand(0);
9477 if (N00.getOpcode() == ISD::SIGN_EXTEND ||
9478 N00.getOpcode() == ISD::ZERO_EXTEND ||
9479 N00.getOpcode() == ISD::ANY_EXTEND) {
9480 if (N00.getOperand(0)->getValueType(0).getVectorElementType() ==
9481 VT.getVectorElementType())
9482 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N0->getOperand(0)), VT,
9483 N00.getOperand(0), N0.getOperand(1));
9484 }
9485 }
9486
9487 if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
9488 return NewVSel;
9489
9490 return SDValue();
9491}
9492
9493static SDNode *getBuildPairElt(SDNode *N, unsigned i) {
9494 SDValue Elt = N->getOperand(i);
9495 if (Elt.getOpcode() != ISD::MERGE_VALUES)
9496 return Elt.getNode();
9497 return Elt.getOperand(Elt.getResNo()).getNode();
9498}
9499
9500/// build_pair (load, load) -> load
9501/// if load locations are consecutive.
9502SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) {
9503 assert(N->getOpcode() == ISD::BUILD_PAIR)(static_cast <bool> (N->getOpcode() == ISD::BUILD_PAIR
) ? void (0) : __assert_fail ("N->getOpcode() == ISD::BUILD_PAIR"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 9503, __extension__ __PRETTY_FUNCTION__))
;
9504
9505 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 0));
9506 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 1));
9507
9508 // A BUILD_PAIR is always having the least significant part in elt 0 and the
9509 // most significant part in elt 1. So when combining into one large load, we
9510 // need to consider the endianness.
9511 if (DAG.getDataLayout().isBigEndian())
9512 std::swap(LD1, LD2);
9513
9514 if (!LD1 || !LD2 || !ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse() ||
9515 LD1->getAddressSpace() != LD2->getAddressSpace())
9516 return SDValue();
9517 EVT LD1VT = LD1->getValueType(0);
9518 unsigned LD1Bytes = LD1VT.getStoreSize();
9519 if (ISD::isNON_EXTLoad(LD2) && LD2->hasOneUse() &&
9520 DAG.areNonVolatileConsecutiveLoads(LD2, LD1, LD1Bytes, 1)) {
9521 unsigned Align = LD1->getAlignment();
9522 unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
9523 VT.getTypeForEVT(*DAG.getContext()));
9524
9525 if (NewAlign <= Align &&
9526 (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)))
9527 return DAG.getLoad(VT, SDLoc(N), LD1->getChain(), LD1->getBasePtr(),
9528 LD1->getPointerInfo(), Align);
9529 }
9530
9531 return SDValue();
9532}
9533
9534static unsigned getPPCf128HiElementSelector(const SelectionDAG &DAG) {
9535 // On little-endian machines, bitcasting from ppcf128 to i128 does swap the Hi
9536 // and Lo parts; on big-endian machines it doesn't.
9537 return DAG.getDataLayout().isBigEndian() ? 1 : 0;
9538}
9539
9540static SDValue foldBitcastedFPLogic(SDNode *N, SelectionDAG &DAG,
9541 const TargetLowering &TLI) {
9542 // If this is not a bitcast to an FP type or if the target doesn't have
9543 // IEEE754-compliant FP logic, we're done.
9544 EVT VT = N->getValueType(0);
9545 if (!VT.isFloatingPoint() || !TLI.hasBitPreservingFPLogic(VT))
9546 return SDValue();
9547
9548 // TODO: Use splat values for the constant-checking below and remove this
9549 // restriction.
9550 SDValue N0 = N->getOperand(0);
9551 EVT SourceVT = N0.getValueType();
9552 if (SourceVT.isVector())
9553 return SDValue();
9554
9555 unsigned FPOpcode;
9556 APInt SignMask;
9557 switch (N0.getOpcode()) {
9558 case ISD::AND:
9559 FPOpcode = ISD::FABS;
9560 SignMask = ~APInt::getSignMask(SourceVT.getSizeInBits());
9561 break;
9562 case ISD::XOR:
9563 FPOpcode = ISD::FNEG;
9564 SignMask = APInt::getSignMask(SourceVT.getSizeInBits());
9565 break;
9566 // TODO: ISD::OR --> ISD::FNABS?
9567 default:
9568 return SDValue();
9569 }
9570
9571 // Fold (bitcast int (and (bitcast fp X to int), 0x7fff...) to fp) -> fabs X
9572 // Fold (bitcast int (xor (bitcast fp X to int), 0x8000...) to fp) -> fneg X
9573 SDValue LogicOp0 = N0.getOperand(0);
9574 ConstantSDNode *LogicOp1 = dyn_cast<ConstantSDNode>(N0.getOperand(1));
9575 if (LogicOp1 && LogicOp1->getAPIntValue() == SignMask &&
9576 LogicOp0.getOpcode() == ISD::BITCAST &&
9577 LogicOp0->getOperand(0).getValueType() == VT)
9578 return DAG.getNode(FPOpcode, SDLoc(N), VT, LogicOp0->getOperand(0));
9579
9580 return SDValue();
9581}
9582
9583SDValue DAGCombiner::visitBITCAST(SDNode *N) {
9584 SDValue N0 = N->getOperand(0);
9585 EVT VT = N->getValueType(0);
9586
9587 if (N0.isUndef())
9588 return DAG.getUNDEF(VT);
9589
9590 // If the input is a BUILD_VECTOR with all constant elements, fold this now.
9591 // Only do this before legalize, since afterward the target may be depending
9592 // on the bitconvert.
9593 // First check to see if this is all constant.
9594 if (!LegalTypes &&
9595 N0.getOpcode() == ISD::BUILD_VECTOR && N0.getNode()->hasOneUse() &&
9596 VT.isVector()) {
9597 bool isSimple = cast<BuildVectorSDNode>(N0)->isConstant();
9598
9599 EVT DestEltVT = N->getValueType(0).getVectorElementType();
9600 assert(!DestEltVT.isVector() &&(static_cast <bool> (!DestEltVT.isVector() && "Element type of vector ValueType must not be vector!"
) ? void (0) : __assert_fail ("!DestEltVT.isVector() && \"Element type of vector ValueType must not be vector!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 9601, __extension__ __PRETTY_FUNCTION__))
9601 "Element type of vector ValueType must not be vector!")(static_cast <bool> (!DestEltVT.isVector() && "Element type of vector ValueType must not be vector!"
) ? void (0) : __assert_fail ("!DestEltVT.isVector() && \"Element type of vector ValueType must not be vector!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 9601, __extension__ __PRETTY_FUNCTION__))
;
9602 if (isSimple)
9603 return ConstantFoldBITCASTofBUILD_VECTOR(N0.getNode(), DestEltVT);
9604 }
9605
9606 // If the input is a constant, let getNode fold it.
9607 // We always need to check that this is just a fp -> int or int -> conversion
9608 // otherwise we will get back N which will confuse the caller into thinking
9609 // we used CombineTo. This can block target combines from running. If we can't
9610 // allowed legal operations, we need to ensure the resulting operation will be
9611 // legal.
9612 // TODO: Maybe we should check that the return value isn't N explicitly?
9613 if ((isa<ConstantSDNode>(N0) && VT.isFloatingPoint() && !VT.isVector() &&
9614 (!LegalOperations || TLI.isOperationLegal(ISD::ConstantFP, VT))) ||
9615 (isa<ConstantFPSDNode>(N0) && VT.isInteger() && !VT.isVector() &&
9616 (!LegalOperations || TLI.isOperationLegal(ISD::Constant, VT))))
9617 return DAG.getBitcast(VT, N0);
9618
9619 // (conv (conv x, t1), t2) -> (conv x, t2)
9620 if (N0.getOpcode() == ISD::BITCAST)
9621 return DAG.getBitcast(VT, N0.getOperand(0));
9622
9623 // fold (conv (load x)) -> (load (conv*)x)
9624 // If the resultant load doesn't need a higher alignment than the original!
9625 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
9626 // Do not change the width of a volatile load.
9627 !cast<LoadSDNode>(N0)->isVolatile() &&
9628 // Do not remove the cast if the types differ in endian layout.
9629 TLI.hasBigEndianPartOrdering(N0.getValueType(), DAG.getDataLayout()) ==
9630 TLI.hasBigEndianPartOrdering(VT, DAG.getDataLayout()) &&
9631 (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)) &&
9632 TLI.isLoadBitCastBeneficial(N0.getValueType(), VT)) {
9633 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
9634 unsigned OrigAlign = LN0->getAlignment();
9635
9636 bool Fast = false;
9637 if (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
9638 LN0->getAddressSpace(), OrigAlign, &Fast) &&
9639 Fast) {
9640 SDValue Load =
9641 DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(),
9642 LN0->getPointerInfo(), OrigAlign,
9643 LN0->getMemOperand()->getFlags(), LN0->getAAInfo());
9644 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
9645 return Load;
9646 }
9647 }
9648
9649 if (SDValue V = foldBitcastedFPLogic(N, DAG, TLI))
9650 return V;
9651
9652 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
9653 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
9654 //
9655 // For ppc_fp128:
9656 // fold (bitcast (fneg x)) ->
9657 // flipbit = signbit
9658 // (xor (bitcast x) (build_pair flipbit, flipbit))
9659 //
9660 // fold (bitcast (fabs x)) ->
9661 // flipbit = (and (extract_element (bitcast x), 0), signbit)
9662 // (xor (bitcast x) (build_pair flipbit, flipbit))
9663 // This often reduces constant pool loads.
9664 if (((N0.getOpcode() == ISD::FNEG && !TLI.isFNegFree(N0.getValueType())) ||
9665 (N0.getOpcode() == ISD::FABS && !TLI.isFAbsFree(N0.getValueType()))) &&
9666 N0.getNode()->hasOneUse() && VT.isInteger() &&
9667 !VT.isVector() && !N0.getValueType().isVector()) {
9668 SDValue NewConv = DAG.getBitcast(VT, N0.getOperand(0));
9669 AddToWorklist(NewConv.getNode());
9670
9671 SDLoc DL(N);
9672 if (N0.getValueType() == MVT::ppcf128 && !LegalTypes) {
9673 assert(VT.getSizeInBits() == 128)(static_cast <bool> (VT.getSizeInBits() == 128) ? void (
0) : __assert_fail ("VT.getSizeInBits() == 128", "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 9673, __extension__ __PRETTY_FUNCTION__))
;
9674 SDValue SignBit = DAG.getConstant(
9675 APInt::getSignMask(VT.getSizeInBits() / 2), SDLoc(N0), MVT::i64);
9676 SDValue FlipBit;
9677 if (N0.getOpcode() == ISD::FNEG) {
9678 FlipBit = SignBit;
9679 AddToWorklist(FlipBit.getNode());
9680 } else {
9681 assert(N0.getOpcode() == ISD::FABS)(static_cast <bool> (N0.getOpcode() == ISD::FABS) ? void
(0) : __assert_fail ("N0.getOpcode() == ISD::FABS", "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 9681, __extension__ __PRETTY_FUNCTION__))
;
9682 SDValue Hi =
9683 DAG.getNode(ISD::EXTRACT_ELEMENT, SDLoc(NewConv), MVT::i64, NewConv,
9684 DAG.getIntPtrConstant(getPPCf128HiElementSelector(DAG),
9685 SDLoc(NewConv)));
9686 AddToWorklist(Hi.getNode());
9687 FlipBit = DAG.getNode(ISD::AND, SDLoc(N0), MVT::i64, Hi, SignBit);
9688 AddToWorklist(FlipBit.getNode());
9689 }
9690 SDValue FlipBits =
9691 DAG.getNode(ISD::BUILD_PAIR, SDLoc(N0), VT, FlipBit, FlipBit);
9692 AddToWorklist(FlipBits.getNode());
9693 return DAG.getNode(ISD::XOR, DL, VT, NewConv, FlipBits);
9694 }
9695 APInt SignBit = APInt::getSignMask(VT.getSizeInBits());
9696 if (N0.getOpcode() == ISD::FNEG)
9697 return DAG.getNode(ISD::XOR, DL, VT,
9698 NewConv, DAG.getConstant(SignBit, DL, VT));
9699 assert(N0.getOpcode() == ISD::FABS)(static_cast <bool> (N0.getOpcode() == ISD::FABS) ? void
(0) : __assert_fail ("N0.getOpcode() == ISD::FABS", "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 9699, __extension__ __PRETTY_FUNCTION__))
;
9700 return DAG.getNode(ISD::AND, DL, VT,
9701 NewConv, DAG.getConstant(~SignBit, DL, VT));
9702 }
9703
9704 // fold (bitconvert (fcopysign cst, x)) ->
9705 // (or (and (bitconvert x), sign), (and cst, (not sign)))
9706 // Note that we don't handle (copysign x, cst) because this can always be
9707 // folded to an fneg or fabs.
9708 //
9709 // For ppc_fp128:
9710 // fold (bitcast (fcopysign cst, x)) ->
9711 // flipbit = (and (extract_element
9712 // (xor (bitcast cst), (bitcast x)), 0),
9713 // signbit)
9714 // (xor (bitcast cst) (build_pair flipbit, flipbit))
9715 if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse() &&
9716 isa<ConstantFPSDNode>(N0.getOperand(0)) &&
9717 VT.isInteger() && !VT.isVector()) {
9718 unsigned OrigXWidth = N0.getOperand(1).getValueSizeInBits();
9719 EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth);
9720 if (isTypeLegal(IntXVT)) {
9721 SDValue X = DAG.getBitcast(IntXVT, N0.getOperand(1));
9722 AddToWorklist(X.getNode());
9723
9724 // If X has a different width than the result/lhs, sext it or truncate it.
9725 unsigned VTWidth = VT.getSizeInBits();
9726 if (OrigXWidth < VTWidth) {
9727 X = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, X);
9728 AddToWorklist(X.getNode());
9729 } else if (OrigXWidth > VTWidth) {
9730 // To get the sign bit in the right place, we have to shift it right
9731 // before truncating.
9732 SDLoc DL(X);
9733 X = DAG.getNode(ISD::SRL, DL,
9734 X.getValueType(), X,
9735 DAG.getConstant(OrigXWidth-VTWidth, DL,
9736 X.getValueType()));
9737 AddToWorklist(X.getNode());
9738 X = DAG.getNode(ISD::TRUNCATE, SDLoc(X), VT, X);
9739 AddToWorklist(X.getNode());
9740 }
9741
9742 if (N0.getValueType() == MVT::ppcf128 && !LegalTypes) {
9743 APInt SignBit = APInt::getSignMask(VT.getSizeInBits() / 2);
9744 SDValue Cst = DAG.getBitcast(VT, N0.getOperand(0));
9745 AddToWorklist(Cst.getNode());
9746 SDValue X = DAG.getBitcast(VT, N0.getOperand(1));
9747 AddToWorklist(X.getNode());
9748 SDValue XorResult = DAG.getNode(ISD::XOR, SDLoc(N0), VT, Cst, X);
9749 AddToWorklist(XorResult.getNode());
9750 SDValue XorResult64 = DAG.getNode(
9751 ISD::EXTRACT_ELEMENT, SDLoc(XorResult), MVT::i64, XorResult,
9752 DAG.getIntPtrConstant(getPPCf128HiElementSelector(DAG),
9753 SDLoc(XorResult)));
9754 AddToWorklist(XorResult64.getNode());
9755 SDValue FlipBit =
9756 DAG.getNode(ISD::AND, SDLoc(XorResult64), MVT::i64, XorResult64,
9757 DAG.getConstant(SignBit, SDLoc(XorResult64), MVT::i64));
9758 AddToWorklist(FlipBit.getNode());
9759 SDValue FlipBits =
9760 DAG.getNode(ISD::BUILD_PAIR, SDLoc(N0), VT, FlipBit, FlipBit);
9761 AddToWorklist(FlipBits.getNode());
9762 return DAG.getNode(ISD::XOR, SDLoc(N), VT, Cst, FlipBits);
9763 }
9764 APInt SignBit = APInt::getSignMask(VT.getSizeInBits());
9765 X = DAG.getNode(ISD::AND, SDLoc(X), VT,
9766 X, DAG.getConstant(SignBit, SDLoc(X), VT));
9767 AddToWorklist(X.getNode());
9768
9769 SDValue Cst = DAG.getBitcast(VT, N0.getOperand(0));
9770 Cst = DAG.getNode(ISD::AND, SDLoc(Cst), VT,
9771 Cst, DAG.getConstant(~SignBit, SDLoc(Cst), VT));
9772 AddToWorklist(Cst.getNode());
9773
9774 return DAG.getNode(ISD::OR, SDLoc(N), VT, X, Cst);
9775 }
9776 }
9777
9778 // bitconvert(build_pair(ld, ld)) -> ld iff load locations are consecutive.
9779 if (N0.getOpcode() == ISD::BUILD_PAIR)
9780 if (SDValue CombineLD = CombineConsecutiveLoads(N0.getNode(), VT))
9781 return CombineLD;
9782
9783 // Remove double bitcasts from shuffles - this is often a legacy of
9784 // XformToShuffleWithZero being used to combine bitmaskings (of
9785 // float vectors bitcast to integer vectors) into shuffles.
9786 // bitcast(shuffle(bitcast(s0),bitcast(s1))) -> shuffle(s0,s1)
9787 if (Level < AfterLegalizeDAG && TLI.isTypeLegal(VT) && VT.isVector() &&
9788 N0->getOpcode() == ISD::VECTOR_SHUFFLE &&
9789 VT.getVectorNumElements() >= N0.getValueType().getVectorNumElements() &&
9790 !(VT.getVectorNumElements() % N0.getValueType().getVectorNumElements())) {
9791 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N0);
9792
9793 // If operands are a bitcast, peek through if it casts the original VT.
9794 // If operands are a constant, just bitcast back to original VT.
9795 auto PeekThroughBitcast = [&](SDValue Op) {
9796 if (Op.getOpcode() == ISD::BITCAST &&
9797 Op.getOperand(0).getValueType() == VT)
9798 return SDValue(Op.getOperand(0));
9799 if (Op.isUndef() || ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) ||
9800 ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode()))
9801 return DAG.getBitcast(VT, Op);
9802 return SDValue();
9803 };
9804
9805 // FIXME: If either input vector is bitcast, try to convert the shuffle to
9806 // the result type of this bitcast. This would eliminate at least one
9807 // bitcast. See the transform in InstCombine.
9808 SDValue SV0 = PeekThroughBitcast(N0->getOperand(0));
9809 SDValue SV1 = PeekThroughBitcast(N0->getOperand(1));
9810 if (!(SV0 && SV1))
9811 return SDValue();
9812
9813 int MaskScale =
9814 VT.getVectorNumElements() / N0.getValueType().getVectorNumElements();
9815 SmallVector<int, 8> NewMask;
9816 for (int M : SVN->getMask())
9817 for (int i = 0; i != MaskScale; ++i)
9818 NewMask.push_back(M < 0 ? -1 : M * MaskScale + i);
9819
9820 bool LegalMask = TLI.isShuffleMaskLegal(NewMask, VT);
9821 if (!LegalMask) {
9822 std::swap(SV0, SV1);
9823 ShuffleVectorSDNode::commuteMask(NewMask);
9824 LegalMask = TLI.isShuffleMaskLegal(NewMask, VT);
9825 }
9826
9827 if (LegalMask)
9828 return DAG.getVectorShuffle(VT, SDLoc(N), SV0, SV1, NewMask);
9829 }
9830
9831 return SDValue();
9832}
9833
9834SDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) {
9835 EVT VT = N->getValueType(0);
9836 return CombineConsecutiveLoads(N, VT);
9837}
9838
9839/// We know that BV is a build_vector node with Constant, ConstantFP or Undef
9840/// operands. DstEltVT indicates the destination element value type.
9841SDValue DAGCombiner::
9842ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
9843 EVT SrcEltVT = BV->getValueType(0).getVectorElementType();
9844
9845 // If this is already the right type, we're done.
9846 if (SrcEltVT == DstEltVT) return SDValue(BV, 0);
9847
9848 unsigned SrcBitSize = SrcEltVT.getSizeInBits();
9849 unsigned DstBitSize = DstEltVT.getSizeInBits();
9850
9851 // If this is a conversion of N elements of one type to N elements of another
9852 // type, convert each element. This handles FP<->INT cases.
9853 if (SrcBitSize == DstBitSize) {
9854 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT,
9855 BV->getValueType(0).getVectorNumElements());
9856
9857 // Due to the FP element handling below calling this routine recursively,
9858 // we can end up with a scalar-to-vector node here.
9859 if (BV->getOpcode() == ISD::SCALAR_TO_VECTOR)
9860 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(BV), VT,
9861 DAG.getBitcast(DstEltVT, BV->getOperand(0)));
9862
9863 SmallVector<SDValue, 8> Ops;
9864 for (SDValue Op : BV->op_values()) {
9865 // If the vector element type is not legal, the BUILD_VECTOR operands
9866 // are promoted and implicitly truncated. Make that explicit here.
9867 if (Op.getValueType() != SrcEltVT)
9868 Op = DAG.getNode(ISD::TRUNCATE, SDLoc(BV), SrcEltVT, Op);
9869 Ops.push_back(DAG.getBitcast(DstEltVT, Op));
9870 AddToWorklist(Ops.back().getNode());
9871 }
9872 return DAG.getBuildVector(VT, SDLoc(BV), Ops);
9873 }
9874
9875 // Otherwise, we're growing or shrinking the elements. To avoid having to
9876 // handle annoying details of growing/shrinking FP values, we convert them to
9877 // int first.
9878 if (SrcEltVT.isFloatingPoint()) {
9879 // Convert the input float vector to a int vector where the elements are the
9880 // same sizes.
9881 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltVT.getSizeInBits());
9882 BV = ConstantFoldBITCASTofBUILD_VECTOR(BV, IntVT).getNode();
9883 SrcEltVT = IntVT;
9884 }
9885
9886 // Now we know the input is an integer vector. If the output is a FP type,
9887 // convert to integer first, then to FP of the right size.
9888 if (DstEltVT.isFloatingPoint()) {
9889 EVT TmpVT = EVT::getIntegerVT(*DAG.getContext(), DstEltVT.getSizeInBits());
9890 SDNode *Tmp = ConstantFoldBITCASTofBUILD_VECTOR(BV, TmpVT).getNode();
9891
9892 // Next, convert to FP elements of the same size.
9893 return ConstantFoldBITCASTofBUILD_VECTOR(Tmp, DstEltVT);
9894 }
9895
9896 SDLoc DL(BV);
9897
9898 // Okay, we know the src/dst types are both integers of differing types.
9899 // Handling growing first.
9900 assert(SrcEltVT.isInteger() && DstEltVT.isInteger())(static_cast <bool> (SrcEltVT.isInteger() && DstEltVT
.isInteger()) ? void (0) : __assert_fail ("SrcEltVT.isInteger() && DstEltVT.isInteger()"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 9900, __extension__ __PRETTY_FUNCTION__))
;
9901 if (SrcBitSize < DstBitSize) {
9902 unsigned NumInputsPerOutput = DstBitSize/SrcBitSize;
9903
9904 SmallVector<SDValue, 8> Ops;
9905 for (unsigned i = 0, e = BV->getNumOperands(); i != e;
9906 i += NumInputsPerOutput) {
9907 bool isLE = DAG.getDataLayout().isLittleEndian();
9908 APInt NewBits = APInt(DstBitSize, 0);
9909 bool EltIsUndef = true;
9910 for (unsigned j = 0; j != NumInputsPerOutput; ++j) {
9911 // Shift the previously computed bits over.
9912 NewBits <<= SrcBitSize;
9913 SDValue Op = BV->getOperand(i+ (isLE ? (NumInputsPerOutput-j-1) : j));
9914 if (Op.isUndef()) continue;
9915 EltIsUndef = false;
9916
9917 NewBits |= cast<ConstantSDNode>(Op)->getAPIntValue().
9918 zextOrTrunc(SrcBitSize).zext(DstBitSize);
9919 }
9920
9921 if (EltIsUndef)
9922 Ops.push_back(DAG.getUNDEF(DstEltVT));
9923 else
9924 Ops.push_back(DAG.getConstant(NewBits, DL, DstEltVT));
9925 }
9926
9927 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, Ops.size());
9928 return DAG.getBuildVector(VT, DL, Ops);
9929 }
9930
9931 // Finally, this must be the case where we are shrinking elements: each input
9932 // turns into multiple outputs.
9933 unsigned NumOutputsPerInput = SrcBitSize/DstBitSize;
9934 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT,
9935 NumOutputsPerInput*BV->getNumOperands());
9936 SmallVector<SDValue, 8> Ops;
9937
9938 for (const SDValue &Op : BV->op_values()) {
9939 if (Op.isUndef()) {
9940 Ops.append(NumOutputsPerInput, DAG.getUNDEF(DstEltVT));
9941 continue;
9942 }
9943
9944 APInt OpVal = cast<ConstantSDNode>(Op)->
9945 getAPIntValue().zextOrTrunc(SrcBitSize);
9946
9947 for (unsigned j = 0; j != NumOutputsPerInput; ++j) {
9948 APInt ThisVal = OpVal.trunc(DstBitSize);
9949 Ops.push_back(DAG.getConstant(ThisVal, DL, DstEltVT));
9950 OpVal.lshrInPlace(DstBitSize);
9951 }
9952
9953 // For big endian targets, swap the order of the pieces of each element.
9954 if (DAG.getDataLayout().isBigEndian())
9955 std::reverse(Ops.end()-NumOutputsPerInput, Ops.end());
9956 }
9957
9958 return DAG.getBuildVector(VT, DL, Ops);
9959}
9960
9961static bool isContractable(SDNode *N) {
9962 SDNodeFlags F = N->getFlags();
9963 return F.hasAllowContract() || F.hasAllowReassociation();
9964}
9965
9966/// Try to perform FMA combining on a given FADD node.
9967SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {
9968 SDValue N0 = N->getOperand(0);
9969 SDValue N1 = N->getOperand(1);
9970 EVT VT = N->getValueType(0);
9971 SDLoc SL(N);
9972
9973 const TargetOptions &Options = DAG.getTarget().Options;
9974
9975 // Floating-point multiply-add with intermediate rounding.
9976 bool HasFMAD = (LegalOperations && TLI.isOperationLegal(ISD::FMAD, VT));
9977
9978 // Floating-point multiply-add without intermediate rounding.
9979 bool HasFMA =
9980 TLI.isFMAFasterThanFMulAndFAdd(VT) &&
9981 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT));
9982
9983 // No valid opcode, do not combine.
9984 if (!HasFMAD && !HasFMA)
9985 return SDValue();
9986
9987 SDNodeFlags Flags = N->getFlags();
9988 bool CanFuse = Options.UnsafeFPMath || isContractable(N);
9989 bool AllowFusionGlobally = (Options.AllowFPOpFusion == FPOpFusion::Fast ||
9990 CanFuse || HasFMAD);
9991 // If the addition is not contractable, do not combine.
9992 if (!AllowFusionGlobally && !isContractable(N))
9993 return SDValue();
9994
9995 const SelectionDAGTargetInfo *STI = DAG.getSubtarget().getSelectionDAGInfo();
9996 if (STI && STI->generateFMAsInMachineCombiner(OptLevel))
9997 return SDValue();
9998
9999 // Always prefer FMAD to FMA for precision.
10000 unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA;
10001 bool Aggressive = TLI.enableAggressiveFMAFusion(VT);
10002
10003 // Is the node an FMUL and contractable either due to global flags or
10004 // SDNodeFlags.
10005 auto isContractableFMUL = [AllowFusionGlobally](SDValue N) {
10006 if (N.getOpcode() != ISD::FMUL)
10007 return false;
10008 return AllowFusionGlobally || isContractable(N.getNode());
10009 };
10010 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
10011 // prefer to fold the multiply with fewer uses.
10012 if (Aggressive && isContractableFMUL(N0) && isContractableFMUL(N1)) {
10013 if (N0.getNode()->use_size() > N1.getNode()->use_size())
10014 std::swap(N0, N1);
10015 }
10016
10017 // fold (fadd (fmul x, y), z) -> (fma x, y, z)
10018 if (isContractableFMUL(N0) && (Aggressive || N0->hasOneUse())) {
10019 return DAG.getNode(PreferredFusedOpcode, SL, VT,
10020 N0.getOperand(0), N0.getOperand(1), N1, Flags);
10021 }
10022
10023 // fold (fadd x, (fmul y, z)) -> (fma y, z, x)
10024 // Note: Commutes FADD operands.
10025 if (isContractableFMUL(N1) && (Aggressive || N1->hasOneUse())) {
10026 return DAG.getNode(PreferredFusedOpcode, SL, VT,
10027 N1.getOperand(0), N1.getOperand(1), N0, Flags);
10028 }
10029
10030 // Look through FP_EXTEND nodes to do more combining.
10031
10032 // fold (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
10033 if (N0.getOpcode() == ISD::FP_EXTEND) {
10034 SDValue N00 = N0.getOperand(0);
10035 if (isContractableFMUL(N00) &&
10036 TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N00.getValueType())) {
10037 return DAG.getNode(PreferredFusedOpcode, SL, VT,
10038 DAG.getNode(ISD::FP_EXTEND, SL, VT,
10039 N00.getOperand(0)),
10040 DAG.getNode(ISD::FP_EXTEND, SL, VT,
10041 N00.getOperand(1)), N1, Flags);
10042 }
10043 }
10044
10045 // fold (fadd x, (fpext (fmul y, z))) -> (fma (fpext y), (fpext z), x)
10046 // Note: Commutes FADD operands.
10047 if (N1.getOpcode() == ISD::FP_EXTEND) {
10048 SDValue N10 = N1.getOperand(0);
10049 if (isContractableFMUL(N10) &&
10050 TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N10.getValueType())) {
10051 return DAG.getNode(PreferredFusedOpcode, SL, VT,
10052 DAG.getNode(ISD::FP_EXTEND, SL, VT,
10053 N10.getOperand(0)),
10054 DAG.getNode(ISD::FP_EXTEND, SL, VT,
10055 N10.getOperand(1)), N0, Flags);
10056 }
10057 }
10058
10059 // More folding opportunities when target permits.
10060 if (Aggressive) {
10061 // fold (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y (fma u, v, z))
10062 if (CanFuse &&
10063 N0.getOpcode() == PreferredFusedOpcode &&
10064 N0.getOperand(2).getOpcode() == ISD::FMUL &&
10065 N0->hasOneUse() && N0.getOperand(2)->hasOneUse()) {
10066 return DAG.getNode(PreferredFusedOpcode, SL, VT,
10067 N0.getOperand(0), N0.getOperand(1),
10068 DAG.getNode(PreferredFusedOpcode, SL, VT,
10069 N0.getOperand(2).getOperand(0),
10070 N0.getOperand(2).getOperand(1),
10071 N1, Flags), Flags);
10072 }
10073
10074 // fold (fadd x, (fma y, z, (fmul u, v)) -> (fma y, z (fma u, v, x))
10075 if (CanFuse &&
10076 N1->getOpcode() == PreferredFusedOpcode &&
10077 N1.getOperand(2).getOpcode() == ISD::FMUL &&
10078 N1->hasOneUse() && N1.getOperand(2)->hasOneUse()) {
10079 return DAG.getNode(PreferredFusedOpcode, SL, VT,
10080 N1.getOperand(0), N1.getOperand(1),
10081 DAG.getNode(PreferredFusedOpcode, SL, VT,
10082 N1.getOperand(2).getOperand(0),
10083 N1.getOperand(2).getOperand(1),
10084 N0, Flags), Flags);
10085 }
10086
10087
10088 // fold (fadd (fma x, y, (fpext (fmul u, v))), z)
10089 // -> (fma x, y, (fma (fpext u), (fpext v), z))
10090 auto FoldFAddFMAFPExtFMul = [&] (
10091 SDValue X, SDValue Y, SDValue U, SDValue V, SDValue Z,
10092 SDNodeFlags Flags) {
10093 return DAG.getNode(PreferredFusedOpcode, SL, VT, X, Y,
10094 DAG.getNode(PreferredFusedOpcode, SL, VT,
10095 DAG.getNode(ISD::FP_EXTEND, SL, VT, U),
10096 DAG.getNode(ISD::FP_EXTEND, SL, VT, V),
10097 Z, Flags), Flags);
10098 };
10099 if (N0.getOpcode() == PreferredFusedOpcode) {
10100 SDValue N02 = N0.getOperand(2);
10101 if (N02.getOpcode() == ISD::FP_EXTEND) {
10102 SDValue N020 = N02.getOperand(0);
10103 if (isContractableFMUL(N020) &&
10104 TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N020.getValueType())) {
10105 return FoldFAddFMAFPExtFMul(N0.getOperand(0), N0.getOperand(1),
10106 N020.getOperand(0), N020.getOperand(1),
10107 N1, Flags);
10108 }
10109 }
10110 }
10111
10112 // fold (fadd (fpext (fma x, y, (fmul u, v))), z)
10113 // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z))
10114 // FIXME: This turns two single-precision and one double-precision
10115 // operation into two double-precision operations, which might not be
10116 // interesting for all targets, especially GPUs.
10117 auto FoldFAddFPExtFMAFMul = [&] (
10118 SDValue X, SDValue Y, SDValue U, SDValue V, SDValue Z,
10119 SDNodeFlags Flags) {
10120 return DAG.getNode(PreferredFusedOpcode, SL, VT,
10121 DAG.getNode(ISD::FP_EXTEND, SL, VT, X),
10122 DAG.getNode(ISD::FP_EXTEND, SL, VT, Y),
10123 DAG.getNode(PreferredFusedOpcode, SL, VT,
10124 DAG.getNode(ISD::FP_EXTEND, SL, VT, U),
10125 DAG.getNode(ISD::FP_EXTEND, SL, VT, V),
10126 Z, Flags), Flags);
10127 };
10128 if (N0.getOpcode() == ISD::FP_EXTEND) {
10129 SDValue N00 = N0.getOperand(0);
10130 if (N00.getOpcode() == PreferredFusedOpcode) {
10131 SDValue N002 = N00.getOperand(2);
10132 if (isContractableFMUL(N002) &&
10133 TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N00.getValueType())) {
10134 return FoldFAddFPExtFMAFMul(N00.getOperand(0), N00.getOperand(1),
10135 N002.getOperand(0), N002.getOperand(1),
10136 N1, Flags);
10137 }
10138 }
10139 }
10140
10141 // fold (fadd x, (fma y, z, (fpext (fmul u, v)))
10142 // -> (fma y, z, (fma (fpext u), (fpext v), x))
10143 if (N1.getOpcode() == PreferredFusedOpcode) {
10144 SDValue N12 = N1.getOperand(2);
10145 if (N12.getOpcode() == ISD::FP_EXTEND) {
10146 SDValue N120 = N12.getOperand(0);
10147 if (isContractableFMUL(N120) &&
10148 TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N120.getValueType())) {
10149 return FoldFAddFMAFPExtFMul(N1.getOperand(0), N1.getOperand(1),
10150 N120.getOperand(0), N120.getOperand(1),
10151 N0, Flags);
10152 }
10153 }
10154 }
10155
10156 // fold (fadd x, (fpext (fma y, z, (fmul u, v)))
10157 // -> (fma (fpext y), (fpext z), (fma (fpext u), (fpext v), x))
10158 // FIXME: This turns two single-precision and one double-precision
10159 // operation into two double-precision operations, which might not be
10160 // interesting for all targets, especially GPUs.
10161 if (N1.getOpcode() == ISD::FP_EXTEND) {
10162 SDValue N10 = N1.getOperand(0);
10163 if (N10.getOpcode() == PreferredFusedOpcode) {
10164 SDValue N102 = N10.getOperand(2);
10165 if (isContractableFMUL(N102) &&
10166 TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N10.getValueType())) {
10167 return FoldFAddFPExtFMAFMul(N10.getOperand(0), N10.getOperand(1),
10168 N102.getOperand(0), N102.getOperand(1),
10169 N0, Flags);
10170 }
10171 }
10172 }
10173 }
10174
10175 return SDValue();
10176}
10177
10178/// Try to perform FMA combining on a given FSUB node.
10179SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
10180 SDValue N0 = N->getOperand(0);
10181 SDValue N1 = N->getOperand(1);
10182 EVT VT = N->getValueType(0);
10183 SDLoc SL(N);
10184
10185 const TargetOptions &Options = DAG.getTarget().Options;
10186 // Floating-point multiply-add with intermediate rounding.
10187 bool HasFMAD = (LegalOperations && TLI.isOperationLegal(ISD::FMAD, VT));
10188
10189 // Floating-point multiply-add without intermediate rounding.
10190 bool HasFMA =
10191 TLI.isFMAFasterThanFMulAndFAdd(VT) &&
10192 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT));
10193
10194 // No valid opcode, do not combine.
10195 if (!HasFMAD && !HasFMA)
10196 return SDValue();
10197
10198 const SDNodeFlags Flags = N->getFlags();
10199 bool CanFuse = Options.UnsafeFPMath || isContractable(N);
10200 bool AllowFusionGlobally = (Options.AllowFPOpFusion == FPOpFusion::Fast ||
10201 CanFuse || HasFMAD);
10202
10203 // If the subtraction is not contractable, do not combine.
10204 if (!AllowFusionGlobally && !isContractable(N))
10205 return SDValue();
10206
10207 const SelectionDAGTargetInfo *STI = DAG.getSubtarget().getSelectionDAGInfo();
10208 if (STI && STI->generateFMAsInMachineCombiner(OptLevel))
10209 return SDValue();
10210
10211 // Always prefer FMAD to FMA for precision.
10212 unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA;
10213 bool Aggressive = TLI.enableAggressiveFMAFusion(VT);
10214
10215 // Is the node an FMUL and contractable either due to global flags or
10216 // SDNodeFlags.
10217 auto isContractableFMUL = [AllowFusionGlobally](SDValue N) {
10218 if (N.getOpcode() != ISD::FMUL)
10219 return false;
10220 return AllowFusionGlobally || isContractable(N.getNode());
10221 };
10222
10223 // fold (fsub (fmul x, y), z) -> (fma x, y, (fneg z))
10224 if (isContractableFMUL(N0) && (Aggressive || N0->hasOneUse())) {
10225 return DAG.getNode(PreferredFusedOpcode, SL, VT,
10226 N0.getOperand(0), N0.getOperand(1),
10227 DAG.getNode(ISD::FNEG, SL, VT, N1), Flags);
10228 }
10229
10230 // fold (fsub x, (fmul y, z)) -> (fma (fneg y), z, x)
10231 // Note: Commutes FSUB operands.
10232 if (isContractableFMUL(N1) && (Aggressive || N1->hasOneUse())) {
10233 return DAG.getNode(PreferredFusedOpcode, SL, VT,
10234 DAG.getNode(ISD::FNEG, SL, VT,
10235 N1.getOperand(0)),
10236 N1.getOperand(1), N0, Flags);
10237 }
10238
10239 // fold (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
10240 if (N0.getOpcode() == ISD::FNEG && isContractableFMUL(N0.getOperand(0)) &&
10241 (Aggressive || (N0->hasOneUse() && N0.getOperand(0).hasOneUse()))) {
10242 SDValue N00 = N0.getOperand(0).getOperand(0);
10243 SDValue N01 = N0.getOperand(0).getOperand(1);
10244 return DAG.getNode(PreferredFusedOpcode, SL, VT,
10245 DAG.getNode(ISD::FNEG, SL, VT, N00), N01,
10246 DAG.getNode(ISD::FNEG, SL, VT, N1), Flags);
10247 }
10248
10249 // Look through FP_EXTEND nodes to do more combining.
10250
10251 // fold (fsub (fpext (fmul x, y)), z)
10252 // -> (fma (fpext x), (fpext y), (fneg z))
10253 if (N0.getOpcode() == ISD::FP_EXTEND) {
10254 SDValue N00 = N0.getOperand(0);
10255 if (isContractableFMUL(N00) &&
10256 TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N00.getValueType())) {
10257 return DAG.getNode(PreferredFusedOpcode, SL, VT,
10258 DAG.getNode(ISD::FP_EXTEND, SL, VT,
10259 N00.getOperand(0)),
10260 DAG.getNode(ISD::FP_EXTEND, SL, VT,
10261 N00.getOperand(1)),
10262 DAG.getNode(ISD::FNEG, SL, VT, N1), Flags);
10263 }
10264 }
10265
10266 // fold (fsub x, (fpext (fmul y, z)))
10267 // -> (fma (fneg (fpext y)), (fpext z), x)
10268 // Note: Commutes FSUB operands.
10269 if (N1.getOpcode() == ISD::FP_EXTEND) {
10270 SDValue N10 = N1.getOperand(0);
10271 if (isContractableFMUL(N10) &&
10272 TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N10.getValueType())) {
10273 return DAG.getNode(PreferredFusedOpcode, SL, VT,
10274 DAG.getNode(ISD::FNEG, SL, VT,
10275 DAG.getNode(ISD::FP_EXTEND, SL, VT,
10276 N10.getOperand(0))),
10277 DAG.getNode(ISD::FP_EXTEND, SL, VT,
10278 N10.getOperand(1)),
10279 N0, Flags);
10280 }
10281 }
10282
10283 // fold (fsub (fpext (fneg (fmul, x, y))), z)
10284 // -> (fneg (fma (fpext x), (fpext y), z))
10285 // Note: This could be removed with appropriate canonicalization of the
10286 // input expression into (fneg (fadd (fpext (fmul, x, y)), z). However, the
10287 // orthogonal flags -fp-contract=fast and -enable-unsafe-fp-math prevent
10288 // from implementing the canonicalization in visitFSUB.
10289 if (N0.getOpcode() == ISD::FP_EXTEND) {
10290 SDValue N00 = N0.getOperand(0);
10291 if (N00.getOpcode() == ISD::FNEG) {
10292 SDValue N000 = N00.getOperand(0);
10293 if (isContractableFMUL(N000) &&
10294 TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N00.getValueType())) {
10295 return DAG.getNode(ISD::FNEG, SL, VT,
10296 DAG.getNode(PreferredFusedOpcode, SL, VT,
10297 DAG.getNode(ISD::FP_EXTEND, SL, VT,
10298 N000.getOperand(0)),
10299 DAG.getNode(ISD::FP_EXTEND, SL, VT,
10300 N000.getOperand(1)),
10301 N1, Flags));
10302 }
10303 }
10304 }
10305
10306 // fold (fsub (fneg (fpext (fmul, x, y))), z)
10307 // -> (fneg (fma (fpext x)), (fpext y), z)
10308 // Note: This could be removed with appropriate canonicalization of the
10309 // input expression into (fneg (fadd (fpext (fmul, x, y)), z). However, the
10310 // orthogonal flags -fp-contract=fast and -enable-unsafe-fp-math prevent
10311 // from implementing the canonicalization in visitFSUB.
10312 if (N0.getOpcode() == ISD::FNEG) {
10313 SDValue N00 = N0.getOperand(0);
10314 if (N00.getOpcode() == ISD::FP_EXTEND) {
10315 SDValue N000 = N00.getOperand(0);
10316 if (isContractableFMUL(N000) &&
10317 TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N000.getValueType())) {
10318 return DAG.getNode(ISD::FNEG, SL, VT,
10319 DAG.getNode(PreferredFusedOpcode, SL, VT,
10320 DAG.getNode(ISD::FP_EXTEND, SL, VT,
10321 N000.getOperand(0)),
10322 DAG.getNode(ISD::FP_EXTEND, SL, VT,
10323 N000.getOperand(1)),
10324 N1, Flags));
10325 }
10326 }
10327 }
10328
10329 // More folding opportunities when target permits.
10330 if (Aggressive) {
10331 // fold (fsub (fma x, y, (fmul u, v)), z)
10332 // -> (fma x, y (fma u, v, (fneg z)))
10333 if (CanFuse && N0.getOpcode() == PreferredFusedOpcode &&
10334 isContractableFMUL(N0.getOperand(2)) && N0->hasOneUse() &&
10335 N0.getOperand(2)->hasOneUse()) {
10336 return DAG.getNode(PreferredFusedOpcode, SL, VT,
10337 N0.getOperand(0), N0.getOperand(1),
10338 DAG.getNode(PreferredFusedOpcode, SL, VT,
10339 N0.getOperand(2).getOperand(0),
10340 N0.getOperand(2).getOperand(1),
10341 DAG.getNode(ISD::FNEG, SL, VT,
10342 N1), Flags), Flags);
10343 }
10344
10345 // fold (fsub x, (fma y, z, (fmul u, v)))
10346 // -> (fma (fneg y), z, (fma (fneg u), v, x))
10347 if (CanFuse && N1.getOpcode() == PreferredFusedOpcode &&
10348 isContractableFMUL(N1.getOperand(2))) {
10349 SDValue N20 = N1.getOperand(2).getOperand(0);
10350 SDValue N21 = N1.getOperand(2).getOperand(1);
10351 return DAG.getNode(PreferredFusedOpcode, SL, VT,
10352 DAG.getNode(ISD::FNEG, SL, VT,
10353 N1.getOperand(0)),
10354 N1.getOperand(1),
10355 DAG.getNode(PreferredFusedOpcode, SL, VT,
10356 DAG.getNode(ISD::FNEG, SL, VT, N20),
10357 N21, N0, Flags), Flags);
10358 }
10359
10360
10361 // fold (fsub (fma x, y, (fpext (fmul u, v))), z)
10362 // -> (fma x, y (fma (fpext u), (fpext v), (fneg z)))
10363 if (N0.getOpcode() == PreferredFusedOpcode) {
10364 SDValue N02 = N0.getOperand(2);
10365 if (N02.getOpcode() == ISD::FP_EXTEND) {
10366 SDValue N020 = N02.getOperand(0);
10367 if (isContractableFMUL(N020) &&
10368 TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N020.getValueType())) {
10369 return DAG.getNode(PreferredFusedOpcode, SL, VT,
10370 N0.getOperand(0), N0.getOperand(1),
10371 DAG.getNode(PreferredFusedOpcode, SL, VT,
10372 DAG.getNode(ISD::FP_EXTEND, SL, VT,
10373 N020.getOperand(0)),
10374 DAG.getNode(ISD::FP_EXTEND, SL, VT,
10375 N020.getOperand(1)),
10376 DAG.getNode(ISD::FNEG, SL, VT,
10377 N1), Flags), Flags);
10378 }
10379 }
10380 }
10381
10382 // fold (fsub (fpext (fma x, y, (fmul u, v))), z)
10383 // -> (fma (fpext x), (fpext y),
10384 // (fma (fpext u), (fpext v), (fneg z)))
10385 // FIXME: This turns two single-precision and one double-precision
10386 // operation into two double-precision operations, which might not be
10387 // interesting for all targets, especially GPUs.
10388 if (N0.getOpcode() == ISD::FP_EXTEND) {
10389 SDValue N00 = N0.getOperand(0);
10390 if (N00.getOpcode() == PreferredFusedOpcode) {
10391 SDValue N002 = N00.getOperand(2);
10392 if (isContractableFMUL(N002) &&
10393 TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N00.getValueType())) {
10394 return DAG.getNode(PreferredFusedOpcode, SL, VT,
10395 DAG.getNode(ISD::FP_EXTEND, SL, VT,
10396 N00.getOperand(0)),
10397 DAG.getNode(ISD::FP_EXTEND, SL, VT,
10398 N00.getOperand(1)),
10399 DAG.getNode(PreferredFusedOpcode, SL, VT,
10400 DAG.getNode(ISD::FP_EXTEND, SL, VT,
10401 N002.getOperand(0)),
10402 DAG.getNode(ISD::FP_EXTEND, SL, VT,
10403 N002.getOperand(1)),
10404 DAG.getNode(ISD::FNEG, SL, VT,
10405 N1), Flags), Flags);
10406 }
10407 }
10408 }
10409
10410 // fold (fsub x, (fma y, z, (fpext (fmul u, v))))
10411 // -> (fma (fneg y), z, (fma (fneg (fpext u)), (fpext v), x))
10412 if (N1.getOpcode() == PreferredFusedOpcode &&
10413 N1.getOperand(2).getOpcode() == ISD::FP_EXTEND) {
10414 SDValue N120 = N1.getOperand(2).getOperand(0);
10415 if (isContractableFMUL(N120) &&
10416 TLI.isFPExtFoldable(PreferredFusedOpcode, VT, N120.getValueType())) {
10417 SDValue N1200 = N120.getOperand(0);
10418 SDValue N1201 = N120.getOperand(1);
10419 return DAG.getNode(PreferredFusedOpcode, SL, VT,
10420 DAG.getNode(ISD::FNEG, SL, VT, N1.getOperand(0)),
10421 N1.getOperand(1),
10422 DAG.getNode(PreferredFusedOpcode, SL, VT,
10423 DAG.getNode(ISD::FNEG, SL, VT,
10424 DAG.getNode(ISD::FP_EXTEND, SL,
10425 VT, N1200)),
10426 DAG.getNode(ISD::FP_EXTEND, SL, VT,
10427 N1201),
10428 N0, Flags), Flags);
10429 }
10430 }
10431
10432 // fold (fsub x, (fpext (fma y, z, (fmul u, v))))
10433 // -> (fma (fneg (fpext y)), (fpext z),
10434 // (fma (fneg (fpext u)), (fpext v), x))
10435 // FIXME: This turns two single-precision and one double-precision
10436 // operation into two double-precision operations, which might not be
10437 // interesting for all targets, especially GPUs.
10438 if (N1.getOpcode() == ISD::FP_EXTEND &&
10439 N1.getOperand(0).getOpcode() == PreferredFusedOpcode) {
10440 SDValue CvtSrc = N1.getOperand(0);
10441 SDValue N100 = CvtSrc.getOperand(0);
10442 SDValue N101 = CvtSrc.getOperand(1);
10443 SDValue N102 = CvtSrc.getOperand(2);
10444 if (isContractableFMUL(N102) &&
10445 TLI.isFPExtFoldable(PreferredFusedOpcode, VT, CvtSrc.getValueType())) {
10446 SDValue N1020 = N102.getOperand(0);
10447 SDValue N1021 = N102.getOperand(1);
10448 return DAG.getNode(PreferredFusedOpcode, SL, VT,
10449 DAG.getNode(ISD::FNEG, SL, VT,
10450 DAG.getNode(ISD::FP_EXTEND, SL, VT,
10451 N100)),
10452 DAG.getNode(ISD::FP_EXTEND, SL, VT, N101),
10453 DAG.getNode(PreferredFusedOpcode, SL, VT,
10454 DAG.getNode(ISD::FNEG, SL, VT,
10455 DAG.getNode(ISD::FP_EXTEND, SL,
10456 VT, N1020)),
10457 DAG.getNode(ISD::FP_EXTEND, SL, VT,
10458 N1021),
10459 N0, Flags), Flags);
10460 }
10461 }
10462 }
10463
10464 return SDValue();
10465}
10466
10467/// Try to perform FMA combining on a given FMUL node based on the distributive
10468/// law x * (y + 1) = x * y + x and variants thereof (commuted versions,
10469/// subtraction instead of addition).
10470SDValue DAGCombiner::visitFMULForFMADistributiveCombine(SDNode *N) {
10471 SDValue N0 = N->getOperand(0);
10472 SDValue N1 = N->getOperand(1);
10473 EVT VT = N->getValueType(0);
10474 SDLoc SL(N);
10475 const SDNodeFlags Flags = N->getFlags();
10476
10477 assert(N->getOpcode() == ISD::FMUL && "Expected FMUL Operation")(static_cast <bool> (N->getOpcode() == ISD::FMUL &&
"Expected FMUL Operation") ? void (0) : __assert_fail ("N->getOpcode() == ISD::FMUL && \"Expected FMUL Operation\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 10477, __extension__ __PRETTY_FUNCTION__))
;
10478
10479 const TargetOptions &Options = DAG.getTarget().Options;
10480
10481 // The transforms below are incorrect when x == 0 and y == inf, because the
10482 // intermediate multiplication produces a nan.
10483 if (!Options.NoInfsFPMath)
10484 return SDValue();
10485
10486 // Floating-point multiply-add without intermediate rounding.
10487 bool HasFMA =
10488 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath) &&
10489 TLI.isFMAFasterThanFMulAndFAdd(VT) &&
10490 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT));
10491
10492 // Floating-point multiply-add with intermediate rounding. This can result
10493 // in a less precise result due to the changed rounding order.
10494 bool HasFMAD = Options.UnsafeFPMath &&
10495 (LegalOperations && TLI.isOperationLegal(ISD::FMAD, VT));
10496
10497 // No valid opcode, do not combine.
10498 if (!HasFMAD && !HasFMA)
10499 return SDValue();
10500
10501 // Always prefer FMAD to FMA for precision.
10502 unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA;
10503 bool Aggressive = TLI.enableAggressiveFMAFusion(VT);
10504
10505 // fold (fmul (fadd x, +1.0), y) -> (fma x, y, y)
10506 // fold (fmul (fadd x, -1.0), y) -> (fma x, y, (fneg y))
10507 auto FuseFADD = [&](SDValue X, SDValue Y, const SDNodeFlags Flags) {
10508 if (X.getOpcode() == ISD::FADD && (Aggressive || X->hasOneUse())) {
10509 auto XC1 = isConstOrConstSplatFP(X.getOperand(1));
10510 if (XC1 && XC1->isExactlyValue(+1.0))
10511 return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
10512 Y, Flags);
10513 if (XC1 && XC1->isExactlyValue(-1.0))
10514 return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
10515 DAG.getNode(ISD::FNEG, SL, VT, Y), Flags);
10516 }
10517 return SDValue();
10518 };
10519
10520 if (SDValue FMA = FuseFADD(N0, N1, Flags))
10521 return FMA;
10522 if (SDValue FMA = FuseFADD(N1, N0, Flags))
10523 return FMA;
10524
10525 // fold (fmul (fsub +1.0, x), y) -> (fma (fneg x), y, y)
10526 // fold (fmul (fsub -1.0, x), y) -> (fma (fneg x), y, (fneg y))
10527 // fold (fmul (fsub x, +1.0), y) -> (fma x, y, (fneg y))
10528 // fold (fmul (fsub x, -1.0), y) -> (fma x, y, y)
10529 auto FuseFSUB = [&](SDValue X, SDValue Y, const SDNodeFlags Flags) {
10530 if (X.getOpcode() == ISD::FSUB && (Aggressive || X->hasOneUse())) {
10531 auto XC0 = isConstOrConstSplatFP(X.getOperand(0));
10532 if (XC0 && XC0->isExactlyValue(+1.0))
10533 return DAG.getNode(PreferredFusedOpcode, SL, VT,
10534 DAG.getNode(ISD::FNEG, SL, VT, X.getOperand(1)), Y,
10535 Y, Flags);
10536 if (XC0 && XC0->isExactlyValue(-1.0))
10537 return DAG.getNode(PreferredFusedOpcode, SL, VT,
10538 DAG.getNode(ISD::FNEG, SL, VT, X.getOperand(1)), Y,
10539 DAG.getNode(ISD::FNEG, SL, VT, Y), Flags);
10540
10541 auto XC1 = isConstOrConstSplatFP(X.getOperand(1));
10542 if (XC1 && XC1->isExactlyValue(+1.0))
10543 return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
10544 DAG.getNode(ISD::FNEG, SL, VT, Y), Flags);
10545 if (XC1 && XC1->isExactlyValue(-1.0))
10546 return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
10547 Y, Flags);
10548 }
10549 return SDValue();
10550 };
10551
10552 if (SDValue FMA = FuseFSUB(N0, N1, Flags))
10553 return FMA;
10554 if (SDValue FMA = FuseFSUB(N1, N0, Flags))
10555 return FMA;
10556
10557 return SDValue();
10558}
10559
10560static bool isFMulNegTwo(SDValue &N) {
10561 if (N.getOpcode() != ISD::FMUL)
10562 return false;
10563 if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N.getOperand(1)))
10564 return CFP->isExactlyValue(-2.0);
10565 return false;
10566}
10567
10568SDValue DAGCombiner::visitFADD(SDNode *N) {
10569 SDValue N0 = N->getOperand(0);
10570 SDValue N1 = N->getOperand(1);
10571 bool N0CFP = isConstantFPBuildVectorOrConstantFP(N0);
10572 bool N1CFP = isConstantFPBuildVectorOrConstantFP(N1);
10573 EVT VT = N->getValueType(0);
10574 SDLoc DL(N);
10575 const TargetOptions &Options = DAG.getTarget().Options;
10576 const SDNodeFlags Flags = N->getFlags();
10577
10578 // fold vector ops
10579 if (VT.isVector())
10580 if (SDValue FoldedVOp = SimplifyVBinOp(N))
10581 return FoldedVOp;
10582
10583 // fold (fadd c1, c2) -> c1 + c2
10584 if (N0CFP && N1CFP)
10585 return DAG.getNode(ISD::FADD, DL, VT, N0, N1, Flags);
10586
10587 // canonicalize constant to RHS
10588 if (N0CFP && !N1CFP)
10589 return DAG.getNode(ISD::FADD, DL, VT, N1, N0, Flags);
10590
10591 if (SDValue NewSel = foldBinOpIntoSelect(N))
10592 return NewSel;
10593
10594 // fold (fadd A, (fneg B)) -> (fsub A, B)
10595 if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) &&
10596 isNegatibleForFree(N1, LegalOperations, TLI, &Options) == 2)
10597 return DAG.getNode(ISD::FSUB, DL, VT, N0,
10598 GetNegatedExpression(N1, DAG, LegalOperations), Flags);
10599
10600 // fold (fadd (fneg A), B) -> (fsub B, A)
10601 if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) &&
10602 isNegatibleForFree(N0, LegalOperations, TLI, &Options) == 2)
10603 return DAG.getNode(ISD::FSUB, DL, VT, N1,
10604 GetNegatedExpression(N0, DAG, LegalOperations), Flags);
10605
10606 // fold (fadd A, (fmul B, -2.0)) -> (fsub A, (fadd B, B))
10607 // fold (fadd (fmul B, -2.0), A) -> (fsub A, (fadd B, B))
10608 if ((isFMulNegTwo(N0) && N0.hasOneUse()) ||
10609 (isFMulNegTwo(N1) && N1.hasOneUse())) {
10610 bool N1IsFMul = isFMulNegTwo(N1);
10611 SDValue AddOp = N1IsFMul ? N1.getOperand(0) : N0.getOperand(0);
10612 SDValue Add = DAG.getNode(ISD::FADD, DL, VT, AddOp, AddOp, Flags);
10613 return DAG.getNode(ISD::FSUB, DL, VT, N1IsFMul ? N0 : N1, Add, Flags);
10614 }
10615
10616 ConstantFPSDNode *N1C = isConstOrConstSplatFP(N1);
10617 if (N1C && N1C->isZero()) {
10618 if (N1C->isNegative() || Options.UnsafeFPMath ||
10619 Flags.hasNoSignedZeros()) {
10620 // fold (fadd A, 0) -> A
10621 return N0;
10622 }
10623 }
10624
10625 // No FP constant should be created after legalization as Instruction
10626 // Selection pass has a hard time dealing with FP constants.
10627 bool AllowNewConst = (Level < AfterLegalizeDAG);
10628
10629 // If 'unsafe math' or nnan is enabled, fold lots of things.
10630 if ((Options.UnsafeFPMath || Flags.hasNoNaNs()) && AllowNewConst) {
10631 // If allowed, fold (fadd (fneg x), x) -> 0.0
10632 if (N0.getOpcode() == ISD::FNEG && N0.getOperand(0) == N1)
10633 return DAG.getConstantFP(0.0, DL, VT);
10634
10635 // If allowed, fold (fadd x, (fneg x)) -> 0.0
10636 if (N1.getOpcode() == ISD::FNEG && N1.getOperand(0) == N0)
10637 return DAG.getConstantFP(0.0, DL, VT);
10638 }
10639
10640 // If 'unsafe math' or reassoc and nsz, fold lots of things.
10641 // TODO: break out portions of the transformations below for which Unsafe is
10642 // considered and which do not require both nsz and reassoc
10643 if ((Options.UnsafeFPMath ||
10644 (Flags.hasAllowReassociation() && Flags.hasNoSignedZeros())) &&
10645 AllowNewConst) {
10646 // fadd (fadd x, c1), c2 -> fadd x, c1 + c2
10647 if (N1CFP && N0.getOpcode() == ISD::FADD &&
10648 isConstantFPBuildVectorOrConstantFP(N0.getOperand(1))) {
10649 SDValue NewC = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1), N1, Flags);
10650 return DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(0), NewC, Flags);
10651 }
10652
10653 // We can fold chains of FADD's of the same value into multiplications.
10654 // This transform is not safe in general because we are reducing the number
10655 // of rounding steps.
10656 if (TLI.isOperationLegalOrCustom(ISD::FMUL, VT) && !N0CFP && !N1CFP) {
10657 if (N0.getOpcode() == ISD::FMUL) {
10658 bool CFP00 = isConstantFPBuildVectorOrConstantFP(N0.getOperand(0));
10659 bool CFP01 = isConstantFPBuildVectorOrConstantFP(N0.getOperand(1));
10660
10661 // (fadd (fmul x, c), x) -> (fmul x, c+1)
10662 if (CFP01 && !CFP00 && N0.getOperand(0) == N1) {
10663 SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1),
10664 DAG.getConstantFP(1.0, DL, VT), Flags);
10665 return DAG.getNode(ISD::FMUL, DL, VT, N1, NewCFP, Flags);
10666 }
10667
10668 // (fadd (fmul x, c), (fadd x, x)) -> (fmul x, c+2)
10669 if (CFP01 && !CFP00 && N1.getOpcode() == ISD::FADD &&
10670 N1.getOperand(0) == N1.getOperand(1) &&
10671 N0.getOperand(0) == N1.getOperand(0)) {
10672 SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1),
10673 DAG.getConstantFP(2.0, DL, VT), Flags);
10674 return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), NewCFP, Flags);
10675 }
10676 }
10677
10678 if (N1.getOpcode() == ISD::FMUL) {
10679 bool CFP10 = isConstantFPBuildVectorOrConstantFP(N1.getOperand(0));
10680 bool CFP11 = isConstantFPBuildVectorOrConstantFP(N1.getOperand(1));
10681
10682 // (fadd x, (fmul x, c)) -> (fmul x, c+1)
10683 if (CFP11 && !CFP10 && N1.getOperand(0) == N0) {
10684 SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N1.getOperand(1),
10685 DAG.getConstantFP(1.0, DL, VT), Flags);
10686 return DAG.getNode(ISD::FMUL, DL, VT, N0, NewCFP, Flags);
10687 }
10688
10689 // (fadd (fadd x, x), (fmul x, c)) -> (fmul x, c+2)
10690 if (CFP11 && !CFP10 && N0.getOpcode() == ISD::FADD &&
10691 N0.getOperand(0) == N0.getOperand(1) &&
10692 N1.getOperand(0) == N0.getOperand(0)) {
10693 SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N1.getOperand(1),
10694 DAG.getConstantFP(2.0, DL, VT), Flags);
10695 return DAG.getNode(ISD::FMUL, DL, VT, N1.getOperand(0), NewCFP, Flags);
10696 }
10697 }
10698
10699 if (N0.getOpcode() == ISD::FADD) {
10700 bool CFP00 = isConstantFPBuildVectorOrConstantFP(N0.getOperand(0));
10701 // (fadd (fadd x, x), x) -> (fmul x, 3.0)
10702 if (!CFP00 && N0.getOperand(0) == N0.getOperand(1) &&
10703 (N0.getOperand(0) == N1)) {
10704 return DAG.getNode(ISD::FMUL, DL, VT,
10705 N1, DAG.getConstantFP(3.0, DL, VT), Flags);
10706 }
10707 }
10708
10709 if (N1.getOpcode() == ISD::FADD) {
10710 bool CFP10 = isConstantFPBuildVectorOrConstantFP(N1.getOperand(0));
10711 // (fadd x, (fadd x, x)) -> (fmul x, 3.0)
10712 if (!CFP10 && N1.getOperand(0) == N1.getOperand(1) &&
10713 N1.getOperand(0) == N0) {
10714 return DAG.getNode(ISD::FMUL, DL, VT,
10715 N0, DAG.getConstantFP(3.0, DL, VT), Flags);
10716 }
10717 }
10718
10719 // (fadd (fadd x, x), (fadd x, x)) -> (fmul x, 4.0)
10720 if (N0.getOpcode() == ISD::FADD && N1.getOpcode() == ISD::FADD &&
10721 N0.getOperand(0) == N0.getOperand(1) &&
10722 N1.getOperand(0) == N1.getOperand(1) &&
10723 N0.getOperand(0) == N1.getOperand(0)) {
10724 return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0),
10725 DAG.getConstantFP(4.0, DL, VT), Flags);
10726 }
10727 }
10728 } // enable-unsafe-fp-math
10729
10730 // FADD -> FMA combines:
10731 if (SDValue Fused = visitFADDForFMACombine(N)) {
10732 AddToWorklist(Fused.getNode());
10733 return Fused;
10734 }
10735 return SDValue();
10736}
10737
10738SDValue DAGCombiner::visitFSUB(SDNode *N) {
10739 SDValue N0 = N->getOperand(0);
10740 SDValue N1 = N->getOperand(1);
10741 ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0);
10742 ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1);
10743 EVT VT = N->getValueType(0);
10744 SDLoc DL(N);
10745 const TargetOptions &Options = DAG.getTarget().Options;
10746 const SDNodeFlags Flags = N->getFlags();
10747
10748 // fold vector ops
10749 if (VT.isVector())
10750 if (SDValue FoldedVOp = SimplifyVBinOp(N))
10751 return FoldedVOp;
10752
10753 // fold (fsub c1, c2) -> c1-c2
10754 if (N0CFP && N1CFP)
10755 return DAG.getNode(ISD::FSUB, DL, VT, N0, N1, Flags);
10756
10757 if (SDValue NewSel = foldBinOpIntoSelect(N))
10758 return NewSel;
10759
10760 // (fsub A, 0) -> A
10761 if (N1CFP && N1CFP->isZero()) {
10762 if (!N1CFP->isNegative() || Options.UnsafeFPMath ||
10763 Flags.hasNoSignedZeros()) {
10764 return N0;
10765 }
10766 }
10767
10768 if (N0 == N1) {
10769 // (fsub x, x) -> 0.0
10770 if (Options.UnsafeFPMath || Flags.hasNoNaNs())
10771 return DAG.getConstantFP(0.0f, DL, VT);
10772 }
10773
10774 // (fsub 0, B) -> -B
10775 if (N0CFP && N0CFP->isZero()) {
10776 if (Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros()) {
10777 if (isNegatibleForFree(N1, LegalOperations, TLI, &Options))
10778 return GetNegatedExpression(N1, DAG, LegalOperations);
10779 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
10780 return DAG.getNode(ISD::FNEG, DL, VT, N1, Flags);
10781 }
10782 }
10783
10784 // fold (fsub A, (fneg B)) -> (fadd A, B)
10785 if (isNegatibleForFree(N1, LegalOperations, TLI, &Options))
10786 return DAG.getNode(ISD::FADD, DL, VT, N0,
10787 GetNegatedExpression(N1, DAG, LegalOperations), Flags);
10788
10789 // If 'unsafe math' is enabled, fold lots of things.
10790 if (Options.UnsafeFPMath) {
10791 // (fsub x, (fadd x, y)) -> (fneg y)
10792 // (fsub x, (fadd y, x)) -> (fneg y)
10793 if (N1.getOpcode() == ISD::FADD) {
10794 SDValue N10 = N1->getOperand(0);
10795 SDValue N11 = N1->getOperand(1);
10796
10797 if (N10 == N0 && isNegatibleForFree(N11, LegalOperations, TLI, &Options))
10798 return GetNegatedExpression(N11, DAG, LegalOperations);
10799
10800 if (N11 == N0 && isNegatibleForFree(N10, LegalOperations, TLI, &Options))
10801 return GetNegatedExpression(N10, DAG, LegalOperations);
10802 }
10803 }
10804
10805 // FSUB -> FMA combines:
10806 if (SDValue Fused = visitFSUBForFMACombine(N)) {
10807 AddToWorklist(Fused.getNode());
10808 return Fused;
10809 }
10810
10811 return SDValue();
10812}
10813
10814SDValue DAGCombiner::visitFMUL(SDNode *N) {
10815 SDValue N0 = N->getOperand(0);
10816 SDValue N1 = N->getOperand(1);
10817 ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0);
10818 ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1);
10819 EVT VT = N->getValueType(0);
10820 SDLoc DL(N);
10821 const TargetOptions &Options = DAG.getTarget().Options;
10822 const SDNodeFlags Flags = N->getFlags();
10823
10824 // fold vector ops
10825 if (VT.isVector()) {
10826 // This just handles C1 * C2 for vectors. Other vector folds are below.
10827 if (SDValue FoldedVOp = SimplifyVBinOp(N))
10828 return FoldedVOp;
10829 }
10830
10831 // fold (fmul c1, c2) -> c1*c2
10832 if (N0CFP && N1CFP)
10833 return DAG.getNode(ISD::FMUL, DL, VT, N0, N1, Flags);
10834
10835 // canonicalize constant to RHS
10836 if (isConstantFPBuildVectorOrConstantFP(N0) &&
10837 !isConstantFPBuildVectorOrConstantFP(N1))
10838 return DAG.getNode(ISD::FMUL, DL, VT, N1, N0, Flags);
10839
10840 // fold (fmul A, 1.0) -> A
10841 if (N1CFP && N1CFP->isExactlyValue(1.0))
10842 return N0;
10843
10844 if (SDValue NewSel = foldBinOpIntoSelect(N))
10845 return NewSel;
10846
10847 if (Options.UnsafeFPMath ||
10848 (Flags.hasNoNaNs() && Flags.hasNoSignedZeros())) {
10849 // fold (fmul A, 0) -> 0
10850 if (N1CFP && N1CFP->isZero())
10851 return N1;
10852 }
10853
10854 if (Options.UnsafeFPMath || Flags.hasAllowReassociation()) {
10855 // fmul (fmul X, C1), C2 -> fmul X, C1 * C2
10856 if (N0.getOpcode() == ISD::FMUL) {
10857 // Fold scalars or any vector constants (not just splats).
10858 // This fold is done in general by InstCombine, but extra fmul insts
10859 // may have been generated during lowering.
10860 SDValue N00 = N0.getOperand(0);
10861 SDValue N01 = N0.getOperand(1);
10862 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
10863 auto *BV00 = dyn_cast<BuildVectorSDNode>(N00);
10864 auto *BV01 = dyn_cast<BuildVectorSDNode>(N01);
10865
10866 // Check 1: Make sure that the first operand of the inner multiply is NOT
10867 // a constant. Otherwise, we may induce infinite looping.
10868 if (!(isConstOrConstSplatFP(N00) || (BV00 && BV00->isConstant()))) {
10869 // Check 2: Make sure that the second operand of the inner multiply and
10870 // the second operand of the outer multiply are constants.
10871 if ((N1CFP && isConstOrConstSplatFP(N01)) ||
10872 (BV1 && BV01 && BV1->isConstant() && BV01->isConstant())) {
10873 SDValue MulConsts = DAG.getNode(ISD::FMUL, DL, VT, N01, N1, Flags);
10874 return DAG.getNode(ISD::FMUL, DL, VT, N00, MulConsts, Flags);
10875 }
10876 }
10877 }
10878
10879 // Match a special-case: we convert X * 2.0 into fadd.
10880 // fmul (fadd X, X), C -> fmul X, 2.0 * C
10881 if (N0.getOpcode() == ISD::FADD && N0.hasOneUse() &&
10882 N0.getOperand(0) == N0.getOperand(1)) {
10883 const SDValue Two = DAG.getConstantFP(2.0, DL, VT);
10884 SDValue MulConsts = DAG.getNode(ISD::FMUL, DL, VT, Two, N1, Flags);
10885 return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), MulConsts, Flags);
10886 }
10887 }
10888
10889 // fold (fmul X, 2.0) -> (fadd X, X)
10890 if (N1CFP && N1CFP->isExactlyValue(+2.0))
10891 return DAG.getNode(ISD::FADD, DL, VT, N0, N0, Flags);
10892
10893 // fold (fmul X, -1.0) -> (fneg X)
10894 if (N1CFP && N1CFP->isExactlyValue(-1.0))
10895 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
10896 return DAG.getNode(ISD::FNEG, DL, VT, N0);
10897
10898 // fold (fmul (fneg X), (fneg Y)) -> (fmul X, Y)
10899 if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI, &Options)) {
10900 if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI, &Options)) {
10901 // Both can be negated for free, check to see if at least one is cheaper
10902 // negated.
10903 if (LHSNeg == 2 || RHSNeg == 2)
10904 return DAG.getNode(ISD::FMUL, DL, VT,
10905 GetNegatedExpression(N0, DAG, LegalOperations),
10906 GetNegatedExpression(N1, DAG, LegalOperations),
10907 Flags);
10908 }
10909 }
10910
10911 // fold (fmul X, (select (fcmp X > 0.0), -1.0, 1.0)) -> (fneg (fabs X))
10912 // fold (fmul X, (select (fcmp X > 0.0), 1.0, -1.0)) -> (fabs X)
10913 if (Flags.hasNoNaNs() && Flags.hasNoSignedZeros() &&
10914 (N0.getOpcode() == ISD::SELECT || N1.getOpcode() == ISD::SELECT) &&
10915 TLI.isOperationLegal(ISD::FABS, VT)) {
10916 SDValue Select = N0, X = N1;
10917 if (Select.getOpcode() != ISD::SELECT)
10918 std::swap(Select, X);
10919
10920 SDValue Cond = Select.getOperand(0);
10921 auto TrueOpnd = dyn_cast<ConstantFPSDNode>(Select.getOperand(1));
10922 auto FalseOpnd = dyn_cast<ConstantFPSDNode>(Select.getOperand(2));
10923
10924 if (TrueOpnd && FalseOpnd &&
10925 Cond.getOpcode() == ISD::SETCC && Cond.getOperand(0) == X &&
10926 isa<ConstantFPSDNode>(Cond.getOperand(1)) &&
10927 cast<ConstantFPSDNode>(Cond.getOperand(1))->isExactlyValue(0.0)) {
10928 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
10929 switch (CC) {
10930 default: break;
10931 case ISD::SETOLT:
10932 case ISD::SETULT:
10933 case ISD::SETOLE:
10934 case ISD::SETULE:
10935 case ISD::SETLT:
10936 case ISD::SETLE:
10937 std::swap(TrueOpnd, FalseOpnd);
10938 LLVM_FALLTHROUGH[[clang::fallthrough]];
10939 case ISD::SETOGT:
10940 case ISD::SETUGT:
10941 case ISD::SETOGE:
10942 case ISD::SETUGE:
10943 case ISD::SETGT:
10944 case ISD::SETGE:
10945 if (TrueOpnd->isExactlyValue(-1.0) && FalseOpnd->isExactlyValue(1.0) &&
10946 TLI.isOperationLegal(ISD::FNEG, VT))
10947 return DAG.getNode(ISD::FNEG, DL, VT,
10948 DAG.getNode(ISD::FABS, DL, VT, X));
10949 if (TrueOpnd->isExactlyValue(1.0) && FalseOpnd->isExactlyValue(-1.0))
10950 return DAG.getNode(ISD::FABS, DL, VT, X);
10951
10952 break;
10953 }
10954 }
10955 }
10956
10957 // FMUL -> FMA combines:
10958 if (SDValue Fused = visitFMULForFMADistributiveCombine(N)) {
10959 AddToWorklist(Fused.getNode());
10960 return Fused;
10961 }
10962
10963 return SDValue();
10964}
10965
10966SDValue DAGCombiner::visitFMA(SDNode *N) {
10967 SDValue N0 = N->getOperand(0);
10968 SDValue N1 = N->getOperand(1);
10969 SDValue N2 = N->getOperand(2);
10970 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
10971 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
10972 EVT VT = N->getValueType(0);
10973 SDLoc DL(N);
10974 const TargetOptions &Options = DAG.getTarget().Options;
10975
10976 // FMA nodes have flags that propagate to the created nodes.
10977 const SDNodeFlags Flags = N->getFlags();
10978 bool UnsafeFPMath = Options.UnsafeFPMath || isContractable(N);
10979
10980 // Constant fold FMA.
10981 if (isa<ConstantFPSDNode>(N0) &&
10982 isa<ConstantFPSDNode>(N1) &&
10983 isa<ConstantFPSDNode>(N2)) {
10984 return DAG.getNode(ISD::FMA, DL, VT, N0, N1, N2);
10985 }
10986
10987 if (UnsafeFPMath) {
10988 if (N0CFP && N0CFP->isZero())
10989 return N2;
10990 if (N1CFP && N1CFP->isZero())
10991 return N2;
10992 }
10993 // TODO: The FMA node should have flags that propagate to these nodes.
10994 if (N0CFP && N0CFP->isExactlyValue(1.0))
10995 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N1, N2);
10996 if (N1CFP && N1CFP->isExactlyValue(1.0))
10997 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N0, N2);
10998
10999 // Canonicalize (fma c, x, y) -> (fma x, c, y)
11000 if (isConstantFPBuildVectorOrConstantFP(N0) &&
11001 !isConstantFPBuildVectorOrConstantFP(N1))
11002 return DAG.getNode(ISD::FMA, SDLoc(N), VT, N1, N0, N2);
11003
11004 if (UnsafeFPMath) {
11005 // (fma x, c1, (fmul x, c2)) -> (fmul x, c1+c2)
11006 if (N2.getOpcode() == ISD::FMUL && N0 == N2.getOperand(0) &&
11007 isConstantFPBuildVectorOrConstantFP(N1) &&
11008 isConstantFPBuildVectorOrConstantFP(N2.getOperand(1))) {
11009 return DAG.getNode(ISD::FMUL, DL, VT, N0,
11010 DAG.getNode(ISD::FADD, DL, VT, N1, N2.getOperand(1),
11011 Flags), Flags);
11012 }
11013
11014 // (fma (fmul x, c1), c2, y) -> (fma x, c1*c2, y)
11015 if (N0.getOpcode() == ISD::FMUL &&
11016 isConstantFPBuildVectorOrConstantFP(N1) &&
11017 isConstantFPBuildVectorOrConstantFP(N0.getOperand(1))) {
11018 return DAG.getNode(ISD::FMA, DL, VT,
11019 N0.getOperand(0),
11020 DAG.getNode(ISD::FMUL, DL, VT, N1, N0.getOperand(1),
11021 Flags),
11022 N2);
11023 }
11024 }
11025
11026 // (fma x, 1, y) -> (fadd x, y)
11027 // (fma x, -1, y) -> (fadd (fneg x), y)
11028 if (N1CFP) {
11029 if (N1CFP->isExactlyValue(1.0))
11030 // TODO: The FMA node should have flags that propagate to this node.
11031 return DAG.getNode(ISD::FADD, DL, VT, N0, N2);
11032
11033 if (N1CFP->isExactlyValue(-1.0) &&
11034 (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))) {
11035 SDValue RHSNeg = DAG.getNode(ISD::FNEG, DL, VT, N0);
11036 AddToWorklist(RHSNeg.getNode());
11037 // TODO: The FMA node should have flags that propagate to this node.
11038 return DAG.getNode(ISD::FADD, DL, VT, N2, RHSNeg);
11039 }
11040
11041 // fma (fneg x), K, y -> fma x -K, y
11042 if (N0.getOpcode() == ISD::FNEG &&
11043 (TLI.isOperationLegal(ISD::ConstantFP, VT) ||
11044 (N1.hasOneUse() && !TLI.isFPImmLegal(N1CFP->getValueAPF(), VT)))) {
11045 return DAG.getNode(ISD::FMA, DL, VT, N0.getOperand(0),
11046 DAG.getNode(ISD::FNEG, DL, VT, N1, Flags), N2);
11047 }
11048 }
11049
11050 if (UnsafeFPMath) {
11051 // (fma x, c, x) -> (fmul x, (c+1))
11052 if (N1CFP && N0 == N2) {
11053 return DAG.getNode(ISD::FMUL, DL, VT, N0,
11054 DAG.getNode(ISD::FADD, DL, VT, N1,
11055 DAG.getConstantFP(1.0, DL, VT), Flags),
11056 Flags);
11057 }
11058
11059 // (fma x, c, (fneg x)) -> (fmul x, (c-1))
11060 if (N1CFP && N2.getOpcode() == ISD::FNEG && N2.getOperand(0) == N0) {
11061 return DAG.getNode(ISD::FMUL, DL, VT, N0,
11062 DAG.getNode(ISD::FADD, DL, VT, N1,
11063 DAG.getConstantFP(-1.0, DL, VT), Flags),
11064 Flags);
11065 }
11066 }
11067
11068 return SDValue();
11069}
11070
11071// Combine multiple FDIVs with the same divisor into multiple FMULs by the
11072// reciprocal.
11073// E.g., (a / D; b / D;) -> (recip = 1.0 / D; a * recip; b * recip)
11074// Notice that this is not always beneficial. One reason is different targets
11075// may have different costs for FDIV and FMUL, so sometimes the cost of two
11076// FDIVs may be lower than the cost of one FDIV and two FMULs. Another reason
11077// is the critical path is increased from "one FDIV" to "one FDIV + one FMUL".
11078SDValue DAGCombiner::combineRepeatedFPDivisors(SDNode *N) {
11079 bool UnsafeMath = DAG.getTarget().Options.UnsafeFPMath;
11080 const SDNodeFlags Flags = N->getFlags();
11081 if (!UnsafeMath && !Flags.hasAllowReciprocal())
11082 return SDValue();
11083
11084 // Skip if current node is a reciprocal.
11085 SDValue N0 = N->getOperand(0);
11086 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
11087 if (N0CFP && N0CFP->isExactlyValue(1.0))
11088 return SDValue();
11089
11090 // Exit early if the target does not want this transform or if there can't
11091 // possibly be enough uses of the divisor to make the transform worthwhile.
11092 SDValue N1 = N->getOperand(1);
11093 unsigned MinUses = TLI.combineRepeatedFPDivisors();
11094 if (!MinUses || N1->use_size() < MinUses)
11095 return SDValue();
11096
11097 // Find all FDIV users of the same divisor.
11098 // Use a set because duplicates may be present in the user list.
11099 SetVector<SDNode *> Users;
11100 for (auto *U : N1->uses()) {
11101 if (U->getOpcode() == ISD::FDIV && U->getOperand(1) == N1) {
11102 // This division is eligible for optimization only if global unsafe math
11103 // is enabled or if this division allows reciprocal formation.
11104 if (UnsafeMath || U->getFlags().hasAllowReciprocal())
11105 Users.insert(U);
11106 }
11107 }
11108
11109 // Now that we have the actual number of divisor uses, make sure it meets
11110 // the minimum threshold specified by the target.
11111 if (Users.size() < MinUses)
11112 return SDValue();
11113
11114 EVT VT = N->getValueType(0);
11115 SDLoc DL(N);
11116 SDValue FPOne = DAG.getConstantFP(1.0, DL, VT);
11117 SDValue Reciprocal = DAG.getNode(ISD::FDIV, DL, VT, FPOne, N1, Flags);
11118
11119 // Dividend / Divisor -> Dividend * Reciprocal
11120 for (auto *U : Users) {
11121 SDValue Dividend = U->getOperand(0);
11122 if (Dividend != FPOne) {
11123 SDValue NewNode = DAG.getNode(ISD::FMUL, SDLoc(U), VT, Dividend,
11124 Reciprocal, Flags);
11125 CombineTo(U, NewNode);
11126 } else if (U != Reciprocal.getNode()) {
11127 // In the absence of fast-math-flags, this user node is always the
11128 // same node as Reciprocal, but with FMF they may be different nodes.
11129 CombineTo(U, Reciprocal);
11130 }
11131 }
11132 return SDValue(N, 0); // N was replaced.
11133}
11134
11135SDValue DAGCombiner::visitFDIV(SDNode *N) {
11136 SDValue N0 = N->getOperand(0);
11137 SDValue N1 = N->getOperand(1);
11138 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
11139 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
11140 EVT VT = N->getValueType(0);
11141 SDLoc DL(N);
11142 const TargetOptions &Options = DAG.getTarget().Options;
11143 SDNodeFlags Flags = N->getFlags();
11144
11145 // fold vector ops
11146 if (VT.isVector())
11147 if (SDValue FoldedVOp = SimplifyVBinOp(N))
11148 return FoldedVOp;
11149
11150 // fold (fdiv c1, c2) -> c1/c2
11151 if (N0CFP && N1CFP)
11152 return DAG.getNode(ISD::FDIV, SDLoc(N), VT, N0, N1, Flags);
11153
11154 if (SDValue NewSel = foldBinOpIntoSelect(N))
11155 return NewSel;
11156
11157 if (Options.UnsafeFPMath || Flags.hasAllowReciprocal()) {
11158 // fold (fdiv X, c2) -> fmul X, 1/c2 if losing precision is acceptable.
11159 if (N1CFP) {
11160 // Compute the reciprocal 1.0 / c2.
11161 const APFloat &N1APF = N1CFP->getValueAPF();
11162 APFloat Recip(N1APF.getSemantics(), 1); // 1.0
11163 APFloat::opStatus st = Recip.divide(N1APF, APFloat::rmNearestTiesToEven);
11164 // Only do the transform if the reciprocal is a legal fp immediate that
11165 // isn't too nasty (eg NaN, denormal, ...).
11166 if ((st == APFloat::opOK || st == APFloat::opInexact) && // Not too nasty
11167 (!LegalOperations ||
11168 // FIXME: custom lowering of ConstantFP might fail (see e.g. ARM
11169 // backend)... we should handle this gracefully after Legalize.
11170 // TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT) ||
11171 TLI.isOperationLegal(ISD::ConstantFP, VT) ||
11172 TLI.isFPImmLegal(Recip, VT)))
11173 return DAG.getNode(ISD::FMUL, DL, VT, N0,
11174 DAG.getConstantFP(Recip, DL, VT), Flags);
11175 }
11176
11177 // If this FDIV is part of a reciprocal square root, it may be folded
11178 // into a target-specific square root estimate instruction.
11179 if (N1.getOpcode() == ISD::FSQRT) {
11180 if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0), Flags)) {
11181 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags);
11182 }
11183 } else if (N1.getOpcode() == ISD::FP_EXTEND &&
11184 N1.getOperand(0).getOpcode() == ISD::FSQRT) {
11185 if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0).getOperand(0),
11186 Flags)) {
11187 RV = DAG.getNode(ISD::FP_EXTEND, SDLoc(N1), VT, RV);
11188 AddToWorklist(RV.getNode());
11189 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags);
11190 }
11191 } else if (N1.getOpcode() == ISD::FP_ROUND &&
11192 N1.getOperand(0).getOpcode() == ISD::FSQRT) {
11193 if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0).getOperand(0),
11194 Flags)) {
11195 RV = DAG.getNode(ISD::FP_ROUND, SDLoc(N1), VT, RV, N1.getOperand(1));
11196 AddToWorklist(RV.getNode());
11197 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags);
11198 }
11199 } else if (N1.getOpcode() == ISD::FMUL) {
11200 // Look through an FMUL. Even though this won't remove the FDIV directly,
11201 // it's still worthwhile to get rid of the FSQRT if possible.
11202 SDValue SqrtOp;
11203 SDValue OtherOp;
11204 if (N1.getOperand(0).getOpcode() == ISD::FSQRT) {
11205 SqrtOp = N1.getOperand(0);
11206 OtherOp = N1.getOperand(1);
11207 } else if (N1.getOperand(1).getOpcode() == ISD::FSQRT) {
11208 SqrtOp = N1.getOperand(1);
11209 OtherOp = N1.getOperand(0);
11210 }
11211 if (SqrtOp.getNode()) {
11212 // We found a FSQRT, so try to make this fold:
11213 // x / (y * sqrt(z)) -> x * (rsqrt(z) / y)
11214 if (SDValue RV = buildRsqrtEstimate(SqrtOp.getOperand(0), Flags)) {
11215 RV = DAG.getNode(ISD::FDIV, SDLoc(N1), VT, RV, OtherOp, Flags);
11216 AddToWorklist(RV.getNode());
11217 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags);
11218 }
11219 }
11220 }
11221
11222 // Fold into a reciprocal estimate and multiply instead of a real divide.
11223 if (SDValue RV = BuildReciprocalEstimate(N1, Flags)) {
11224 AddToWorklist(RV.getNode());
11225 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags);
11226 }
11227 }
11228
11229 // (fdiv (fneg X), (fneg Y)) -> (fdiv X, Y)
11230 if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI, &Options)) {
11231 if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI, &Options)) {
11232 // Both can be negated for free, check to see if at least one is cheaper
11233 // negated.
11234 if (LHSNeg == 2 || RHSNeg == 2)
11235 return DAG.getNode(ISD::FDIV, SDLoc(N), VT,
11236 GetNegatedExpression(N0, DAG, LegalOperations),
11237 GetNegatedExpression(N1, DAG, LegalOperations),
11238 Flags);
11239 }
11240 }
11241
11242 if (SDValue CombineRepeatedDivisors = combineRepeatedFPDivisors(N))
11243 return CombineRepeatedDivisors;
11244
11245 return SDValue();
11246}
11247
11248SDValue DAGCombiner::visitFREM(SDNode *N) {
11249 SDValue N0 = N->getOperand(0);
11250 SDValue N1 = N->getOperand(1);
11251 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
11252 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
11253 EVT VT = N->getValueType(0);
11254
11255 // fold (frem c1, c2) -> fmod(c1,c2)
11256 if (N0CFP && N1CFP)
11257 return DAG.getNode(ISD::FREM, SDLoc(N), VT, N0, N1, N->getFlags());
11258
11259 if (SDValue NewSel = foldBinOpIntoSelect(N))
11260 return NewSel;
11261
11262 return SDValue();
11263}
11264
11265SDValue DAGCombiner::visitFSQRT(SDNode *N) {
11266 SDNodeFlags Flags = N->getFlags();
11267 if (!DAG.getTarget().Options.UnsafeFPMath &&
11268 !Flags.hasApproximateFuncs())
11269 return SDValue();
11270
11271 SDValue N0 = N->getOperand(0);
11272 if (TLI.isFsqrtCheap(N0, DAG))
11273 return SDValue();
11274
11275 // FSQRT nodes have flags that propagate to the created nodes.
11276 return buildSqrtEstimate(N0, Flags);
11277}
11278
11279/// copysign(x, fp_extend(y)) -> copysign(x, y)
11280/// copysign(x, fp_round(y)) -> copysign(x, y)
11281static inline bool CanCombineFCOPYSIGN_EXTEND_ROUND(SDNode *N) {
11282 SDValue N1 = N->getOperand(1);
11283 if ((N1.getOpcode() == ISD::FP_EXTEND ||
11284 N1.getOpcode() == ISD::FP_ROUND)) {
11285 // Do not optimize out type conversion of f128 type yet.
11286 // For some targets like x86_64, configuration is changed to keep one f128
11287 // value in one SSE register, but instruction selection cannot handle
11288 // FCOPYSIGN on SSE registers yet.
11289 EVT N1VT = N1->getValueType(0);
11290 EVT N1Op0VT = N1->getOperand(0).getValueType();
11291 return (N1VT == N1Op0VT || N1Op0VT != MVT::f128);
11292 }
11293 return false;
11294}
11295
11296SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) {
11297 SDValue N0 = N->getOperand(0);
11298 SDValue N1 = N->getOperand(1);
11299 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
11300 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
11301 EVT VT = N->getValueType(0);
11302
11303 if (N0CFP && N1CFP) // Constant fold
11304 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1);
11305
11306 if (N1CFP) {
11307 const APFloat &V = N1CFP->getValueAPF();
11308 // copysign(x, c1) -> fabs(x) iff ispos(c1)
11309 // copysign(x, c1) -> fneg(fabs(x)) iff isneg(c1)
11310 if (!V.isNegative()) {
11311 if (!LegalOperations || TLI.isOperationLegal(ISD::FABS, VT))
11312 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0);
11313 } else {
11314 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
11315 return DAG.getNode(ISD::FNEG, SDLoc(N), VT,
11316 DAG.getNode(ISD::FABS, SDLoc(N0), VT, N0));
11317 }
11318 }
11319
11320 // copysign(fabs(x), y) -> copysign(x, y)
11321 // copysign(fneg(x), y) -> copysign(x, y)
11322 // copysign(copysign(x,z), y) -> copysign(x, y)
11323 if (N0.getOpcode() == ISD::FABS || N0.getOpcode() == ISD::FNEG ||
11324 N0.getOpcode() == ISD::FCOPYSIGN)
11325 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0.getOperand(0), N1);
11326
11327 // copysign(x, abs(y)) -> abs(x)
11328 if (N1.getOpcode() == ISD::FABS)
11329 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0);
11330
11331 // copysign(x, copysign(y,z)) -> copysign(x, z)
11332 if (N1.getOpcode() == ISD::FCOPYSIGN)
11333 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1.getOperand(1));
11334
11335 // copysign(x, fp_extend(y)) -> copysign(x, y)
11336 // copysign(x, fp_round(y)) -> copysign(x, y)
11337 if (CanCombineFCOPYSIGN_EXTEND_ROUND(N))
11338 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1.getOperand(0));
11339
11340 return SDValue();
11341}
11342
11343static SDValue foldFPToIntToFP(SDNode *N, SelectionDAG &DAG,
11344 const TargetLowering &TLI) {
11345 // This optimization is guarded by a function attribute because it may produce
11346 // unexpected results. Ie, programs may be relying on the platform-specific
11347 // undefined behavior when the float-to-int conversion overflows.
11348 const Function &F = DAG.getMachineFunction().getFunction();
11349 Attribute StrictOverflow = F.getFnAttribute("strict-float-cast-overflow");
11350 if (StrictOverflow.getValueAsString().equals("false"))
11351 return SDValue();
11352
11353 // We only do this if the target has legal ftrunc. Otherwise, we'd likely be
11354 // replacing casts with a libcall. We also must be allowed to ignore -0.0
11355 // because FTRUNC will return -0.0 for (-1.0, -0.0), but using integer
11356 // conversions would return +0.0.
11357 // FIXME: We should be able to use node-level FMF here.
11358 // TODO: If strict math, should we use FABS (+ range check for signed cast)?
11359 EVT VT = N->getValueType(0);
11360 if (!TLI.isOperationLegal(ISD::FTRUNC, VT) ||
11361 !DAG.getTarget().Options.NoSignedZerosFPMath)
11362 return SDValue();
11363
11364 // fptosi/fptoui round towards zero, so converting from FP to integer and
11365 // back is the same as an 'ftrunc': [us]itofp (fpto[us]i X) --> ftrunc X
11366 SDValue N0 = N->getOperand(0);
11367 if (N->getOpcode() == ISD::SINT_TO_FP && N0.getOpcode() == ISD::FP_TO_SINT &&
11368 N0.getOperand(0).getValueType() == VT)
11369 return DAG.getNode(ISD::FTRUNC, SDLoc(N), VT, N0.getOperand(0));
11370
11371 if (N->getOpcode() == ISD::UINT_TO_FP && N0.getOpcode() == ISD::FP_TO_UINT &&
11372 N0.getOperand(0).getValueType() == VT)
11373 return DAG.getNode(ISD::FTRUNC, SDLoc(N), VT, N0.getOperand(0));
11374
11375 return SDValue();
11376}
11377
11378SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
11379 SDValue N0 = N->getOperand(0);
11380 EVT VT = N->getValueType(0);
11381 EVT OpVT = N0.getValueType();
11382
11383 // fold (sint_to_fp c1) -> c1fp
11384 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
11385 // ...but only if the target supports immediate floating-point values
11386 (!LegalOperations ||
11387 TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT)))
11388 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0);
11389
11390 // If the input is a legal type, and SINT_TO_FP is not legal on this target,
11391 // but UINT_TO_FP is legal on this target, try to convert.
11392 if (!TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT) &&
11393 TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT)) {
11394 // If the sign bit is known to be zero, we can change this to UINT_TO_FP.
11395 if (DAG.SignBitIsZero(N0))
11396 return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0);
11397 }
11398
11399 // The next optimizations are desirable only if SELECT_CC can be lowered.
11400 if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT) || !LegalOperations) {
11401 // fold (sint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc)
11402 if (N0.getOpcode() == ISD::SETCC && N0.getValueType() == MVT::i1 &&
11403 !VT.isVector() &&
11404 (!LegalOperations ||
11405 TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT))) {
11406 SDLoc DL(N);
11407 SDValue Ops[] =
11408 { N0.getOperand(0), N0.getOperand(1),
11409 DAG.getConstantFP(-1.0, DL, VT), DAG.getConstantFP(0.0, DL, VT),
11410 N0.getOperand(2) };
11411 return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops);
11412 }
11413
11414 // fold (sint_to_fp (zext (setcc x, y, cc))) ->
11415 // (select_cc x, y, 1.0, 0.0,, cc)
11416 if (N0.getOpcode() == ISD::ZERO_EXTEND &&
11417 N0.getOperand(0).getOpcode() == ISD::SETCC &&!VT.isVector() &&
11418 (!LegalOperations ||
11419 TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT))) {
11420 SDLoc DL(N);
11421 SDValue Ops[] =
11422 { N0.getOperand(0).getOperand(0), N0.getOperand(0).getOperand(1),
11423 DAG.getConstantFP(1.0, DL, VT), DAG.getConstantFP(0.0, DL, VT),
11424 N0.getOperand(0).getOperand(2) };
11425 return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops);
11426 }
11427 }
11428
11429 if (SDValue FTrunc = foldFPToIntToFP(N, DAG, TLI))
11430 return FTrunc;
11431
11432 return SDValue();
11433}
11434
11435SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {
11436 SDValue N0 = N->getOperand(0);
11437 EVT VT = N->getValueType(0);
11438 EVT OpVT = N0.getValueType();
11439
11440 // fold (uint_to_fp c1) -> c1fp
11441 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
11442 // ...but only if the target supports immediate floating-point values
11443 (!LegalOperations ||
11444 TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT)))
11445 return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0);
11446
11447 // If the input is a legal type, and UINT_TO_FP is not legal on this target,
11448 // but SINT_TO_FP is legal on this target, try to convert.
11449 if (!TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT) &&
11450 TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT)) {
11451 // If the sign bit is known to be zero, we can change this to SINT_TO_FP.
11452 if (DAG.SignBitIsZero(N0))
11453 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0);
11454 }
11455
11456 // The next optimizations are desirable only if SELECT_CC can be lowered.
11457 if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT) || !LegalOperations) {
11458 // fold (uint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc)
11459 if (N0.getOpcode() == ISD::SETCC && !VT.isVector() &&
11460 (!LegalOperations ||
11461 TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT))) {
11462 SDLoc DL(N);
11463 SDValue Ops[] =
11464 { N0.getOperand(0), N0.getOperand(1),
11465 DAG.getConstantFP(1.0, DL, VT), DAG.getConstantFP(0.0, DL, VT),
11466 N0.getOperand(2) };
11467 return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops);
11468 }
11469 }
11470
11471 if (SDValue FTrunc = foldFPToIntToFP(N, DAG, TLI))
11472 return FTrunc;
11473
11474 return SDValue();
11475}
11476
11477// Fold (fp_to_{s/u}int ({s/u}int_to_fpx)) -> zext x, sext x, trunc x, or x
11478static SDValue FoldIntToFPToInt(SDNode *N, SelectionDAG &DAG) {
11479 SDValue N0 = N->getOperand(0);
11480 EVT VT = N->getValueType(0);
11481
11482 if (N0.getOpcode() != ISD::UINT_TO_FP && N0.getOpcode() != ISD::SINT_TO_FP)
11483 return SDValue();
11484
11485 SDValue Src = N0.getOperand(0);
11486 EVT SrcVT = Src.getValueType();
11487 bool IsInputSigned = N0.getOpcode() == ISD::SINT_TO_FP;
11488 bool IsOutputSigned = N->getOpcode() == ISD::FP_TO_SINT;
11489
11490 // We can safely assume the conversion won't overflow the output range,
11491 // because (for example) (uint8_t)18293.f is undefined behavior.
11492
11493 // Since we can assume the conversion won't overflow, our decision as to
11494 // whether the input will fit in the float should depend on the minimum
11495 // of the input range and output range.
11496
11497 // This means this is also safe for a signed input and unsigned output, since
11498 // a negative input would lead to undefined behavior.
11499 unsigned InputSize = (int)SrcVT.getScalarSizeInBits() - IsInputSigned;
11500 unsigned OutputSize = (int)VT.getScalarSizeInBits() - IsOutputSigned;
11501 unsigned ActualSize = std::min(InputSize, OutputSize);
11502 const fltSemantics &sem = DAG.EVTToAPFloatSemantics(N0.getValueType());
11503
11504 // We can only fold away the float conversion if the input range can be
11505 // represented exactly in the float range.
11506 if (APFloat::semanticsPrecision(sem) >= ActualSize) {
11507 if (VT.getScalarSizeInBits() > SrcVT.getScalarSizeInBits()) {
11508 unsigned ExtOp = IsInputSigned && IsOutputSigned ? ISD::SIGN_EXTEND
11509 : ISD::ZERO_EXTEND;
11510 return DAG.getNode(ExtOp, SDLoc(N), VT, Src);
11511 }
11512 if (VT.getScalarSizeInBits() < SrcVT.getScalarSizeInBits())
11513 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Src);
11514 return DAG.getBitcast(VT, Src);
11515 }
11516 return SDValue();
11517}
11518
11519SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) {
11520 SDValue N0 = N->getOperand(0);
11521 EVT VT = N->getValueType(0);
11522
11523 // fold (fp_to_sint c1fp) -> c1
11524 if (isConstantFPBuildVectorOrConstantFP(N0))
11525 return DAG.getNode(ISD::FP_TO_SINT, SDLoc(N), VT, N0);
11526
11527 return FoldIntToFPToInt(N, DAG);
11528}
11529
11530SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) {
11531 SDValue N0 = N->getOperand(0);
11532 EVT VT = N->getValueType(0);
11533
11534 // fold (fp_to_uint c1fp) -> c1
11535 if (isConstantFPBuildVectorOrConstantFP(N0))
11536 return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), VT, N0);
11537
11538 return FoldIntToFPToInt(N, DAG);
11539}
11540
11541SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {
11542 SDValue N0 = N->getOperand(0);
11543 SDValue N1 = N->getOperand(1);
11544 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
11545 EVT VT = N->getValueType(0);
11546
11547 // fold (fp_round c1fp) -> c1fp
11548 if (N0CFP)
11549 return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT, N0, N1);
11550
11551 // fold (fp_round (fp_extend x)) -> x
11552 if (N0.getOpcode() == ISD::FP_EXTEND && VT == N0.getOperand(0).getValueType())
11553 return N0.getOperand(0);
11554
11555 // fold (fp_round (fp_round x)) -> (fp_round x)
11556 if (N0.getOpcode() == ISD::FP_ROUND) {
11557 const bool NIsTrunc = N->getConstantOperandVal(1) == 1;
11558 const bool N0IsTrunc = N0.getConstantOperandVal(1) == 1;
11559
11560 // Skip this folding if it results in an fp_round from f80 to f16.
11561 //
11562 // f80 to f16 always generates an expensive (and as yet, unimplemented)
11563 // libcall to __truncxfhf2 instead of selecting native f16 conversion
11564 // instructions from f32 or f64. Moreover, the first (value-preserving)
11565 // fp_round from f80 to either f32 or f64 may become a NOP in platforms like
11566 // x86.
11567 if (N0.getOperand(0).getValueType() == MVT::f80 && VT == MVT::f16)
11568 return SDValue();
11569
11570 // If the first fp_round isn't a value preserving truncation, it might
11571 // introduce a tie in the second fp_round, that wouldn't occur in the
11572 // single-step fp_round we want to fold to.
11573 // In other words, double rounding isn't the same as rounding.
11574 // Also, this is a value preserving truncation iff both fp_round's are.
11575 if (DAG.getTarget().Options.UnsafeFPMath || N0IsTrunc) {
11576 SDLoc DL(N);
11577 return DAG.getNode(ISD::FP_ROUND, DL, VT, N0.getOperand(0),
11578 DAG.getIntPtrConstant(NIsTrunc && N0IsTrunc, DL));
11579 }
11580 }
11581
11582 // fold (fp_round (copysign X, Y)) -> (copysign (fp_round X), Y)
11583 if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse()) {
11584 SDValue Tmp = DAG.getNode(ISD::FP_ROUND, SDLoc(N0), VT,
11585 N0.getOperand(0), N1);
11586 AddToWorklist(Tmp.getNode());
11587 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT,
11588 Tmp, N0.getOperand(1));
11589 }
11590
11591 if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
11592 return NewVSel;
11593
11594 return SDValue();
11595}
11596
11597SDValue DAGCombiner::visitFP_ROUND_INREG(SDNode *N) {
11598 SDValue N0 = N->getOperand(0);
11599 EVT VT = N->getValueType(0);
11600 EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
11601 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
11602
11603 // fold (fp_round_inreg c1fp) -> c1fp
11604 if (N0CFP && isTypeLegal(EVT)) {
11605 SDLoc DL(N);
11606 SDValue Round = DAG.getConstantFP(*N0CFP->getConstantFPValue(), DL, EVT);
11607 return DAG.getNode(ISD::FP_EXTEND, DL, VT, Round);
11608 }
11609
11610 return SDValue();
11611}
11612
11613SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
11614 SDValue N0 = N->getOperand(0);
11615 EVT VT = N->getValueType(0);
11616
11617 // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded.
11618 if (N->hasOneUse() &&
11619 N->use_begin()->getOpcode() == ISD::FP_ROUND)
11620 return SDValue();
11621
11622 // fold (fp_extend c1fp) -> c1fp
11623 if (isConstantFPBuildVectorOrConstantFP(N0))
11624 return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, N0);
11625
11626 // fold (fp_extend (fp16_to_fp op)) -> (fp16_to_fp op)
11627 if (N0.getOpcode() == ISD::FP16_TO_FP &&
11628 TLI.getOperationAction(ISD::FP16_TO_FP, VT) == TargetLowering::Legal)
11629 return DAG.getNode(ISD::FP16_TO_FP, SDLoc(N), VT, N0.getOperand(0));
11630
11631 // Turn fp_extend(fp_round(X, 1)) -> x since the fp_round doesn't affect the
11632 // value of X.
11633 if (N0.getOpcode() == ISD::FP_ROUND
11634 && N0.getConstantOperandVal(1) == 1) {
11635 SDValue In = N0.getOperand(0);
11636 if (In.getValueType() == VT) return In;
11637 if (VT.bitsLT(In.getValueType()))
11638 return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT,
11639 In, N0.getOperand(1));
11640 return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, In);
11641 }
11642
11643 // fold (fpext (load x)) -> (fpext (fptrunc (extload x)))
11644 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
11645 TLI.isLoadExtLegal(ISD::EXTLOAD, VT, N0.getValueType())) {
11646 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
11647 SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT,
11648 LN0->getChain(),
11649 LN0->getBasePtr(), N0.getValueType(),
11650 LN0->getMemOperand());
11651 CombineTo(N, ExtLoad);
11652 CombineTo(N0.getNode(),
11653 DAG.getNode(ISD::FP_ROUND, SDLoc(N0),
11654 N0.getValueType(), ExtLoad,
11655 DAG.getIntPtrConstant(1, SDLoc(N0))),
11656 ExtLoad.getValue(1));
11657 return SDValue(N, 0); // Return N so it doesn't get rechecked!
11658 }
11659
11660 if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
11661 return NewVSel;
11662
11663 return SDValue();
11664}
11665
11666SDValue DAGCombiner::visitFCEIL(SDNode *N) {
11667 SDValue N0 = N->getOperand(0);
11668 EVT VT = N->getValueType(0);
11669
11670 // fold (fceil c1) -> fceil(c1)
11671 if (isConstantFPBuildVectorOrConstantFP(N0))
11672 return DAG.getNode(ISD::FCEIL, SDLoc(N), VT, N0);
11673
11674 return SDValue();
11675}
11676
11677SDValue DAGCombiner::visitFTRUNC(SDNode *N) {
11678 SDValue N0 = N->getOperand(0);
11679 EVT VT = N->getValueType(0);
11680
11681 // fold (ftrunc c1) -> ftrunc(c1)
11682 if (isConstantFPBuildVectorOrConstantFP(N0))
11683 return DAG.getNode(ISD::FTRUNC, SDLoc(N), VT, N0);
11684
11685 // fold ftrunc (known rounded int x) -> x
11686 // ftrunc is a part of fptosi/fptoui expansion on some targets, so this is
11687 // likely to be generated to extract integer from a rounded floating value.
11688 switch (N0.getOpcode()) {
11689 default: break;
11690 case ISD::FRINT:
11691 case ISD::FTRUNC:
11692 case ISD::FNEARBYINT:
11693 case ISD::FFLOOR:
11694 case ISD::FCEIL:
11695 return N0;
11696 }
11697
11698 return SDValue();
11699}
11700
11701SDValue DAGCombiner::visitFFLOOR(SDNode *N) {
11702 SDValue N0 = N->getOperand(0);
11703 EVT VT = N->getValueType(0);
11704
11705 // fold (ffloor c1) -> ffloor(c1)
11706 if (isConstantFPBuildVectorOrConstantFP(N0))
11707 return DAG.getNode(ISD::FFLOOR, SDLoc(N), VT, N0);
11708
11709 return SDValue();
11710}
11711
11712// FIXME: FNEG and FABS have a lot in common; refactor.
11713SDValue DAGCombiner::visitFNEG(SDNode *N) {
11714 SDValue N0 = N->getOperand(0);
11715 EVT VT = N->getValueType(0);
11716
11717 // Constant fold FNEG.
11718 if (isConstantFPBuildVectorOrConstantFP(N0))
11719 return DAG.getNode(ISD::FNEG, SDLoc(N), VT, N0);
11720
11721 if (isNegatibleForFree(N0, LegalOperations, DAG.getTargetLoweringInfo(),
11722 &DAG.getTarget().Options))
11723 return GetNegatedExpression(N0, DAG, LegalOperations);
11724
11725 // Transform fneg(bitconvert(x)) -> bitconvert(x ^ sign) to avoid loading
11726 // constant pool values.
11727 if (!TLI.isFNegFree(VT) &&
11728 N0.getOpcode() == ISD::BITCAST &&
11729 N0.getNode()->hasOneUse()) {
11730 SDValue Int = N0.getOperand(0);
11731 EVT IntVT = Int.getValueType();
11732 if (IntVT.isInteger() && !IntVT.isVector()) {
11733 APInt SignMask;
11734 if (N0.getValueType().isVector()) {
11735 // For a vector, get a mask such as 0x80... per scalar element
11736 // and splat it.
11737 SignMask = APInt::getSignMask(N0.getScalarValueSizeInBits());
11738 SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask);
11739 } else {
11740 // For a scalar, just generate 0x80...
11741 SignMask = APInt::getSignMask(IntVT.getSizeInBits());
11742 }
11743 SDLoc DL0(N0);
11744 Int = DAG.getNode(ISD::XOR, DL0, IntVT, Int,
11745 DAG.getConstant(SignMask, DL0, IntVT));
11746 AddToWorklist(Int.getNode());
11747 return DAG.getBitcast(VT, Int);
11748 }
11749 }
11750
11751 // (fneg (fmul c, x)) -> (fmul -c, x)
11752 if (N0.getOpcode() == ISD::FMUL &&
11753 (N0.getNode()->hasOneUse() || !TLI.isFNegFree(VT))) {
11754 ConstantFPSDNode *CFP1 = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
11755 if (CFP1) {
11756 APFloat CVal = CFP1->getValueAPF();
11757 CVal.changeSign();
11758 if (Level >= AfterLegalizeDAG &&
11759 (TLI.isFPImmLegal(CVal, VT) ||
11760 TLI.isOperationLegal(ISD::ConstantFP, VT)))
11761 return DAG.getNode(
11762 ISD::FMUL, SDLoc(N), VT, N0.getOperand(0),
11763 DAG.getNode(ISD::FNEG, SDLoc(N), VT, N0.getOperand(1)),
11764 N0->getFlags());
11765 }
11766 }
11767
11768 return SDValue();
11769}
11770
11771SDValue DAGCombiner::visitFMINNUM(SDNode *N) {
11772 SDValue N0 = N->getOperand(0);
11773 SDValue N1 = N->getOperand(1);
11774 EVT VT = N->getValueType(0);
11775 const ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0);
11776 const ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1);
11777
11778 if (N0CFP && N1CFP) {
11779 const APFloat &C0 = N0CFP->getValueAPF();
11780 const APFloat &C1 = N1CFP->getValueAPF();
11781 return DAG.getConstantFP(minnum(C0, C1), SDLoc(N), VT);
11782 }
11783
11784 // Canonicalize to constant on RHS.
11785 if (isConstantFPBuildVectorOrConstantFP(N0) &&
11786 !isConstantFPBuildVectorOrConstantFP(N1))
11787 return DAG.getNode(ISD::FMINNUM, SDLoc(N), VT, N1, N0);
11788
11789 return SDValue();
11790}
11791
11792SDValue DAGCombiner::visitFMAXNUM(SDNode *N) {
11793 SDValue N0 = N->getOperand(0);
11794 SDValue N1 = N->getOperand(1);
11795 EVT VT = N->getValueType(0);
11796 const ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0);
11797 const ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1);
11798
11799 if (N0CFP && N1CFP) {
11800 const APFloat &C0 = N0CFP->getValueAPF();
11801 const APFloat &C1 = N1CFP->getValueAPF();
11802 return DAG.getConstantFP(maxnum(C0, C1), SDLoc(N), VT);
11803 }
11804
11805 // Canonicalize to constant on RHS.
11806 if (isConstantFPBuildVectorOrConstantFP(N0) &&
11807 !isConstantFPBuildVectorOrConstantFP(N1))
11808 return DAG.getNode(ISD::FMAXNUM, SDLoc(N), VT, N1, N0);
11809
11810 return SDValue();
11811}
11812
11813SDValue DAGCombiner::visitFABS(SDNode *N) {
11814 SDValue N0 = N->getOperand(0);
11815 EVT VT = N->getValueType(0);
11816
11817 // fold (fabs c1) -> fabs(c1)
11818 if (isConstantFPBuildVectorOrConstantFP(N0))
11819 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0);
11820
11821 // fold (fabs (fabs x)) -> (fabs x)
11822 if (N0.getOpcode() == ISD::FABS)
11823 return N->getOperand(0);
11824
11825 // fold (fabs (fneg x)) -> (fabs x)
11826 // fold (fabs (fcopysign x, y)) -> (fabs x)
11827 if (N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FCOPYSIGN)
11828 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0.getOperand(0));
11829
11830 // Transform fabs(bitconvert(x)) -> bitconvert(x & ~sign) to avoid loading
11831 // constant pool values.
11832 if (!TLI.isFAbsFree(VT) &&
11833 N0.getOpcode() == ISD::BITCAST &&
11834 N0.getNode()->hasOneUse()) {
11835 SDValue Int = N0.getOperand(0);
11836 EVT IntVT = Int.getValueType();
11837 if (IntVT.isInteger() && !IntVT.isVector()) {
11838 APInt SignMask;
11839 if (N0.getValueType().isVector()) {
11840 // For a vector, get a mask such as 0x7f... per scalar element
11841 // and splat it.
11842 SignMask = ~APInt::getSignMask(N0.getScalarValueSizeInBits());
11843 SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask);
11844 } else {
11845 // For a scalar, just generate 0x7f...
11846 SignMask = ~APInt::getSignMask(IntVT.getSizeInBits());
11847 }
11848 SDLoc DL(N0);
11849 Int = DAG.getNode(ISD::AND, DL, IntVT, Int,
11850 DAG.getConstant(SignMask, DL, IntVT));
11851 AddToWorklist(Int.getNode());
11852 return DAG.getBitcast(N->getValueType(0), Int);
11853 }
11854 }
11855
11856 return SDValue();
11857}
11858
11859SDValue DAGCombiner::visitBRCOND(SDNode *N) {
11860 SDValue Chain = N->getOperand(0);
11861 SDValue N1 = N->getOperand(1);
11862 SDValue N2 = N->getOperand(2);
11863
11864 // If N is a constant we could fold this into a fallthrough or unconditional
11865 // branch. However that doesn't happen very often in normal code, because
11866 // Instcombine/SimplifyCFG should have handled the available opportunities.
11867 // If we did this folding here, it would be necessary to update the
11868 // MachineBasicBlock CFG, which is awkward.
11869
11870 // fold a brcond with a setcc condition into a BR_CC node if BR_CC is legal
11871 // on the target.
11872 if (N1.getOpcode() == ISD::SETCC &&
11873 TLI.isOperationLegalOrCustom(ISD::BR_CC,
11874 N1.getOperand(0).getValueType())) {
11875 return DAG.getNode(ISD::BR_CC, SDLoc(N), MVT::Other,
11876 Chain, N1.getOperand(2),
11877 N1.getOperand(0), N1.getOperand(1), N2);
11878 }
11879
11880 if (N1.hasOneUse()) {
11881 if (SDValue NewN1 = rebuildSetCC(N1))
11882 return DAG.getNode(ISD::BRCOND, SDLoc(N), MVT::Other, Chain, NewN1, N2);
11883 }
11884
11885 return SDValue();
11886}
11887
11888SDValue DAGCombiner::rebuildSetCC(SDValue N) {
11889 if (N.getOpcode() == ISD::SRL ||
11890 (N.getOpcode() == ISD::TRUNCATE &&
11891 (N.getOperand(0).hasOneUse() &&
11892 N.getOperand(0).getOpcode() == ISD::SRL))) {
11893 // Look pass the truncate.
11894 if (N.getOpcode() == ISD::TRUNCATE)
11895 N = N.getOperand(0);
11896
11897 // Match this pattern so that we can generate simpler code:
11898 //
11899 // %a = ...
11900 // %b = and i32 %a, 2
11901 // %c = srl i32 %b, 1
11902 // brcond i32 %c ...
11903 //
11904 // into
11905 //
11906 // %a = ...
11907 // %b = and i32 %a, 2
11908 // %c = setcc eq %b, 0
11909 // brcond %c ...
11910 //
11911 // This applies only when the AND constant value has one bit set and the
11912 // SRL constant is equal to the log2 of the AND constant. The back-end is
11913 // smart enough to convert the result into a TEST/JMP sequence.
11914 SDValue Op0 = N.getOperand(0);
11915 SDValue Op1 = N.getOperand(1);
11916
11917 if (Op0.getOpcode() == ISD::AND && Op1.getOpcode() == ISD::Constant) {
11918 SDValue AndOp1 = Op0.getOperand(1);
11919
11920 if (AndOp1.getOpcode() == ISD::Constant) {
11921 const APInt &AndConst = cast<ConstantSDNode>(AndOp1)->getAPIntValue();
11922
11923 if (AndConst.isPowerOf2() &&
11924 cast<ConstantSDNode>(Op1)->getAPIntValue() == AndConst.logBase2()) {
11925 SDLoc DL(N);
11926 return DAG.getSetCC(DL, getSetCCResultType(Op0.getValueType()),
11927 Op0, DAG.getConstant(0, DL, Op0.getValueType()),
11928 ISD::SETNE);
11929 }
11930 }
11931 }
11932 }
11933
11934 // Transform br(xor(x, y)) -> br(x != y)
11935 // Transform br(xor(xor(x,y), 1)) -> br (x == y)
11936 if (N.getOpcode() == ISD::XOR) {
11937 // Because we may call this on a speculatively constructed
11938 // SimplifiedSetCC Node, we need to simplify this node first.
11939 // Ideally this should be folded into SimplifySetCC and not
11940 // here. For now, grab a handle to N so we don't lose it from
11941 // replacements interal to the visit.
11942 HandleSDNode XORHandle(N);
11943 while (N.getOpcode() == ISD::XOR) {
11944 SDValue Tmp = visitXOR(N.getNode());
11945 // No simplification done.
11946 if (!Tmp.getNode())
11947 break;
11948 // Returning N is form in-visit replacement that may invalidated
11949 // N. Grab value from Handle.
11950 if (Tmp.getNode() == N.getNode())
11951 N = XORHandle.getValue();
11952 else // Node simplified. Try simplifying again.
11953 N = Tmp;
11954 }
11955
11956 if (N.getOpcode() != ISD::XOR)
11957 return N;
11958
11959 SDNode *TheXor = N.getNode();
11960
11961 SDValue Op0 = TheXor->getOperand(0);
11962 SDValue Op1 = TheXor->getOperand(1);
11963
11964 if (Op0.getOpcode() != ISD::SETCC && Op1.getOpcode() != ISD::SETCC) {
11965 bool Equal = false;
11966 if (isOneConstant(Op0) && Op0.hasOneUse() &&
11967 Op0.getOpcode() == ISD::XOR) {
11968 TheXor = Op0.getNode();
11969 Equal = true;
11970 }
11971
11972 EVT SetCCVT = N.getValueType();
11973 if (LegalTypes)
11974 SetCCVT = getSetCCResultType(SetCCVT);
11975 // Replace the uses of XOR with SETCC
11976 return DAG.getSetCC(SDLoc(TheXor), SetCCVT, Op0, Op1,
11977 Equal ? ISD::SETEQ : ISD::SETNE);
11978 }
11979 }
11980
11981 return SDValue();
11982}
11983
11984// Operand List for BR_CC: Chain, CondCC, CondLHS, CondRHS, DestBB.
11985//
11986SDValue DAGCombiner::visitBR_CC(SDNode *N) {
11987 CondCodeSDNode *CC = cast<CondCodeSDNode>(N->getOperand(1));
11988 SDValue CondLHS = N->getOperand(2), CondRHS = N->getOperand(3);
11989
11990 // If N is a constant we could fold this into a fallthrough or unconditional
11991 // branch. However that doesn't happen very often in normal code, because
11992 // Instcombine/SimplifyCFG should have handled the available opportunities.
11993 // If we did this folding here, it would be necessary to update the
11994 // MachineBasicBlock CFG, which is awkward.
11995
11996 // Use SimplifySetCC to simplify SETCC's.
11997 SDValue Simp = SimplifySetCC(getSetCCResultType(CondLHS.getValueType()),
11998 CondLHS, CondRHS, CC->get(), SDLoc(N),
11999 false);
12000 if (Simp.getNode()) AddToWorklist(Simp.getNode());
12001
12002 // fold to a simpler setcc
12003 if (Simp.getNode() && Simp.getOpcode() == ISD::SETCC)
12004 return DAG.getNode(ISD::BR_CC, SDLoc(N), MVT::Other,
12005 N->getOperand(0), Simp.getOperand(2),
12006 Simp.getOperand(0), Simp.getOperand(1),
12007 N->getOperand(4));
12008
12009 return SDValue();
12010}
12011
12012/// Return true if 'Use' is a load or a store that uses N as its base pointer
12013/// and that N may be folded in the load / store addressing mode.
12014static bool canFoldInAddressingMode(SDNode *N, SDNode *Use,
12015 SelectionDAG &DAG,
12016 const TargetLowering &TLI) {
12017 EVT VT;
12018 unsigned AS;
12019
12020 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Use)) {
12021 if (LD->isIndexed() || LD->getBasePtr().getNode() != N)
12022 return false;
12023 VT = LD->getMemoryVT();
12024 AS = LD->getAddressSpace();
12025 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Use)) {
12026 if (ST->isIndexed() || ST->getBasePtr().getNode() != N)
12027 return false;
12028 VT = ST->getMemoryVT();
12029 AS = ST->getAddressSpace();
12030 } else
12031 return false;
12032
12033 TargetLowering::AddrMode AM;
12034 if (N->getOpcode() == ISD::ADD) {
12035 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
12036 if (Offset)
12037 // [reg +/- imm]
12038 AM.BaseOffs = Offset->getSExtValue();
12039 else
12040 // [reg +/- reg]
12041 AM.Scale = 1;
12042 } else if (N->getOpcode() == ISD::SUB) {
12043 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
12044 if (Offset)
12045 // [reg +/- imm]
12046 AM.BaseOffs = -Offset->getSExtValue();
12047 else
12048 // [reg +/- reg]
12049 AM.Scale = 1;
12050 } else
12051 return false;
12052
12053 return TLI.isLegalAddressingMode(DAG.getDataLayout(), AM,
12054 VT.getTypeForEVT(*DAG.getContext()), AS);
12055}
12056
12057/// Try turning a load/store into a pre-indexed load/store when the base
12058/// pointer is an add or subtract and it has other uses besides the load/store.
12059/// After the transformation, the new indexed load/store has effectively folded
12060/// the add/subtract in and all of its other uses are redirected to the
12061/// new load/store.
12062bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
12063 if (Level < AfterLegalizeDAG)
12064 return false;
12065
12066 bool isLoad = true;
12067 SDValue Ptr;
12068 EVT VT;
12069 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
12070 if (LD->isIndexed())
12071 return false;
12072 VT = LD->getMemoryVT();
12073 if (!TLI.isIndexedLoadLegal(ISD::PRE_INC, VT) &&
12074 !TLI.isIndexedLoadLegal(ISD::PRE_DEC, VT))
12075 return false;
12076 Ptr = LD->getBasePtr();
12077 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
12078 if (ST->isIndexed())
12079 return false;
12080 VT = ST->getMemoryVT();
12081 if (!TLI.isIndexedStoreLegal(ISD::PRE_INC, VT) &&
12082 !TLI.isIndexedStoreLegal(ISD::PRE_DEC, VT))
12083 return false;
12084 Ptr = ST->getBasePtr();
12085 isLoad = false;
12086 } else {
12087 return false;
12088 }
12089
12090 // If the pointer is not an add/sub, or if it doesn't have multiple uses, bail
12091 // out. There is no reason to make this a preinc/predec.
12092 if ((Ptr.getOpcode() != ISD::ADD && Ptr.getOpcode() != ISD::SUB) ||
12093 Ptr.getNode()->hasOneUse())
12094 return false;
12095
12096 // Ask the target to do addressing mode selection.
12097 SDValue BasePtr;
12098 SDValue Offset;
12099 ISD::MemIndexedMode AM = ISD::UNINDEXED;
12100 if (!TLI.getPreIndexedAddressParts(N, BasePtr, Offset, AM, DAG))
12101 return false;
12102
12103 // Backends without true r+i pre-indexed forms may need to pass a
12104 // constant base with a variable offset so that constant coercion
12105 // will work with the patterns in canonical form.
12106 bool Swapped = false;
12107 if (isa<ConstantSDNode>(BasePtr)) {
12108 std::swap(BasePtr, Offset);
12109 Swapped = true;
12110 }
12111
12112 // Don't create a indexed load / store with zero offset.
12113 if (isNullConstant(Offset))
12114 return false;
12115
12116 // Try turning it into a pre-indexed load / store except when:
12117 // 1) The new base ptr is a frame index.
12118 // 2) If N is a store and the new base ptr is either the same as or is a
12119 // predecessor of the value being stored.
12120 // 3) Another use of old base ptr is a predecessor of N. If ptr is folded
12121 // that would create a cycle.
12122 // 4) All uses are load / store ops that use it as old base ptr.
12123
12124 // Check #1. Preinc'ing a frame index would require copying the stack pointer
12125 // (plus the implicit offset) to a register to preinc anyway.
12126 if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr))
12127 return false;
12128
12129 // Check #2.
12130 if (!isLoad) {
12131 SDValue Val = cast<StoreSDNode>(N)->getValue();
12132 if (Val == BasePtr || BasePtr.getNode()->isPredecessorOf(Val.getNode()))
12133 return false;
12134 }
12135
12136 // Caches for hasPredecessorHelper.
12137 SmallPtrSet<const SDNode *, 32> Visited;
12138 SmallVector<const SDNode *, 16> Worklist;
12139 Worklist.push_back(N);
12140
12141 // If the offset is a constant, there may be other adds of constants that
12142 // can be folded with this one. We should do this to avoid having to keep
12143 // a copy of the original base pointer.
12144 SmallVector<SDNode *, 16> OtherUses;
12145 if (isa<ConstantSDNode>(Offset))
12146 for (SDNode::use_iterator UI = BasePtr.getNode()->use_begin(),
12147 UE = BasePtr.getNode()->use_end();
12148 UI != UE; ++UI) {
12149 SDUse &Use = UI.getUse();
12150 // Skip the use that is Ptr and uses of other results from BasePtr's
12151 // node (important for nodes that return multiple results).
12152 if (Use.getUser() == Ptr.getNode() || Use != BasePtr)
12153 continue;
12154
12155 if (SDNode::hasPredecessorHelper(Use.getUser(), Visited, Worklist))
12156 continue;
12157
12158 if (Use.getUser()->getOpcode() != ISD::ADD &&
12159 Use.getUser()->getOpcode() != ISD::SUB) {
12160 OtherUses.clear();
12161 break;
12162 }
12163
12164 SDValue Op1 = Use.getUser()->getOperand((UI.getOperandNo() + 1) & 1);
12165 if (!isa<ConstantSDNode>(Op1)) {
12166 OtherUses.clear();
12167 break;
12168 }
12169
12170 // FIXME: In some cases, we can be smarter about this.
12171 if (Op1.getValueType() != Offset.getValueType()) {
12172 OtherUses.clear();
12173 break;
12174 }
12175
12176 OtherUses.push_back(Use.getUser());
12177 }
12178
12179 if (Swapped)
12180 std::swap(BasePtr, Offset);
12181
12182 // Now check for #3 and #4.
12183 bool RealUse = false;
12184
12185 for (SDNode *Use : Ptr.getNode()->uses()) {
12186 if (Use == N)
12187 continue;
12188 if (SDNode::hasPredecessorHelper(Use, Visited, Worklist))
12189 return false;
12190
12191 // If Ptr may be folded in addressing mode of other use, then it's
12192 // not profitable to do this transformation.
12193 if (!canFoldInAddressingMode(Ptr.getNode(), Use, DAG, TLI))
12194 RealUse = true;
12195 }
12196
12197 if (!RealUse)
12198 return false;
12199
12200 SDValue Result;
12201 if (isLoad)
12202 Result = DAG.getIndexedLoad(SDValue(N,0), SDLoc(N),
12203 BasePtr, Offset, AM);
12204 else
12205 Result = DAG.getIndexedStore(SDValue(N,0), SDLoc(N),
12206 BasePtr, Offset, AM);
12207 ++PreIndexedNodes;
12208 ++NodesCombined;
12209 LLVM_DEBUG(dbgs() << "\nReplacing.4 "; N->dump(&DAG); dbgs() << "\nWith: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.4 "; N->dump
(&DAG); dbgs() << "\nWith: "; Result.getNode()->
dump(&DAG); dbgs() << '\n'; } } while (false)
12210 Result.getNode()->dump(&DAG); dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.4 "; N->dump
(&DAG); dbgs() << "\nWith: "; Result.getNode()->
dump(&DAG); dbgs() << '\n'; } } while (false)
;
12211 WorklistRemover DeadNodes(*this);
12212 if (isLoad) {
12213 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0));
12214 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2));
12215 } else {
12216 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1));
12217 }
12218
12219 // Finally, since the node is now dead, remove it from the graph.
12220 deleteAndRecombine(N);
12221
12222 if (Swapped)
12223 std::swap(BasePtr, Offset);
12224
12225 // Replace other uses of BasePtr that can be updated to use Ptr
12226 for (unsigned i = 0, e = OtherUses.size(); i != e; ++i) {
12227 unsigned OffsetIdx = 1;
12228 if (OtherUses[i]->getOperand(OffsetIdx).getNode() == BasePtr.getNode())
12229 OffsetIdx = 0;
12230 assert(OtherUses[i]->getOperand(!OffsetIdx).getNode() ==(static_cast <bool> (OtherUses[i]->getOperand(!OffsetIdx
).getNode() == BasePtr.getNode() && "Expected BasePtr operand"
) ? void (0) : __assert_fail ("OtherUses[i]->getOperand(!OffsetIdx).getNode() == BasePtr.getNode() && \"Expected BasePtr operand\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12231, __extension__ __PRETTY_FUNCTION__))
12231 BasePtr.getNode() && "Expected BasePtr operand")(static_cast <bool> (OtherUses[i]->getOperand(!OffsetIdx
).getNode() == BasePtr.getNode() && "Expected BasePtr operand"
) ? void (0) : __assert_fail ("OtherUses[i]->getOperand(!OffsetIdx).getNode() == BasePtr.getNode() && \"Expected BasePtr operand\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12231, __extension__ __PRETTY_FUNCTION__))
;
12232
12233 // We need to replace ptr0 in the following expression:
12234 // x0 * offset0 + y0 * ptr0 = t0
12235 // knowing that
12236 // x1 * offset1 + y1 * ptr0 = t1 (the indexed load/store)
12237 //
12238 // where x0, x1, y0 and y1 in {-1, 1} are given by the types of the
12239 // indexed load/store and the expression that needs to be re-written.
12240 //
12241 // Therefore, we have:
12242 // t0 = (x0 * offset0 - x1 * y0 * y1 *offset1) + (y0 * y1) * t1
12243
12244 ConstantSDNode *CN =
12245 cast<ConstantSDNode>(OtherUses[i]->getOperand(OffsetIdx));
12246 int X0, X1, Y0, Y1;
12247 const APInt &Offset0 = CN->getAPIntValue();
12248 APInt Offset1 = cast<ConstantSDNode>(Offset)->getAPIntValue();
12249
12250 X0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 1) ? -1 : 1;
12251 Y0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 0) ? -1 : 1;
12252 X1 = (AM == ISD::PRE_DEC && !Swapped) ? -1 : 1;
12253 Y1 = (AM == ISD::PRE_DEC && Swapped) ? -1 : 1;
12254
12255 unsigned Opcode = (Y0 * Y1 < 0) ? ISD::SUB : ISD::ADD;
12256
12257 APInt CNV = Offset0;
12258 if (X0 < 0) CNV = -CNV;
12259 if (X1 * Y0 * Y1 < 0) CNV = CNV + Offset1;
12260 else CNV = CNV - Offset1;
12261
12262 SDLoc DL(OtherUses[i]);
12263
12264 // We can now generate the new expression.
12265 SDValue NewOp1 = DAG.getConstant(CNV, DL, CN->getValueType(0));
12266 SDValue NewOp2 = Result.getValue(isLoad ? 1 : 0);
12267
12268 SDValue NewUse = DAG.getNode(Opcode,
12269 DL,
12270 OtherUses[i]->getValueType(0), NewOp1, NewOp2);
12271 DAG.ReplaceAllUsesOfValueWith(SDValue(OtherUses[i], 0), NewUse);
12272 deleteAndRecombine(OtherUses[i]);
12273 }
12274
12275 // Replace the uses of Ptr with uses of the updated base value.
12276 DAG.ReplaceAllUsesOfValueWith(Ptr, Result.getValue(isLoad ? 1 : 0));
12277 deleteAndRecombine(Ptr.getNode());
12278 AddToWorklist(Result.getNode());
12279
12280 return true;
12281}
12282
12283/// Try to combine a load/store with a add/sub of the base pointer node into a
12284/// post-indexed load/store. The transformation folded the add/subtract into the
12285/// new indexed load/store effectively and all of its uses are redirected to the
12286/// new load/store.
12287bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
12288 if (Level < AfterLegalizeDAG)
12289 return false;
12290
12291 bool isLoad = true;
12292 SDValue Ptr;
12293 EVT VT;
12294 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
12295 if (LD->isIndexed())
12296 return false;
12297 VT = LD->getMemoryVT();
12298 if (!TLI.isIndexedLoadLegal(ISD::POST_INC, VT) &&
12299 !TLI.isIndexedLoadLegal(ISD::POST_DEC, VT))
12300 return false;
12301 Ptr = LD->getBasePtr();
12302 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
12303 if (ST->isIndexed())
12304 return false;
12305 VT = ST->getMemoryVT();
12306 if (!TLI.isIndexedStoreLegal(ISD::POST_INC, VT) &&
12307 !TLI.isIndexedStoreLegal(ISD::POST_DEC, VT))
12308 return false;
12309 Ptr = ST->getBasePtr();
12310 isLoad = false;
12311 } else {
12312 return false;
12313 }
12314
12315 if (Ptr.getNode()->hasOneUse())
12316 return false;
12317
12318 for (SDNode *Op : Ptr.getNode()->uses()) {
12319 if (Op == N ||
12320 (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB))
12321 continue;
12322
12323 SDValue BasePtr;
12324 SDValue Offset;
12325 ISD::MemIndexedMode AM = ISD::UNINDEXED;
12326 if (TLI.getPostIndexedAddressParts(N, Op, BasePtr, Offset, AM, DAG)) {
12327 // Don't create a indexed load / store with zero offset.
12328 if (isNullConstant(Offset))
12329 continue;
12330
12331 // Try turning it into a post-indexed load / store except when
12332 // 1) All uses are load / store ops that use it as base ptr (and
12333 // it may be folded as addressing mmode).
12334 // 2) Op must be independent of N, i.e. Op is neither a predecessor
12335 // nor a successor of N. Otherwise, if Op is folded that would
12336 // create a cycle.
12337
12338 if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr))
12339 continue;
12340
12341 // Check for #1.
12342 bool TryNext = false;
12343 for (SDNode *Use : BasePtr.getNode()->uses()) {
12344 if (Use == Ptr.getNode())
12345 continue;
12346
12347 // If all the uses are load / store addresses, then don't do the
12348 // transformation.
12349 if (Use->getOpcode() == ISD::ADD || Use->getOpcode() == ISD::SUB){
12350 bool RealUse = false;
12351 for (SDNode *UseUse : Use->uses()) {
12352 if (!canFoldInAddressingMode(Use, UseUse, DAG, TLI))
12353 RealUse = true;
12354 }
12355
12356 if (!RealUse) {
12357 TryNext = true;
12358 break;
12359 }
12360 }
12361 }
12362
12363 if (TryNext)
12364 continue;
12365
12366 // Check for #2
12367 if (!Op->isPredecessorOf(N) && !N->isPredecessorOf(Op)) {
12368 SDValue Result = isLoad
12369 ? DAG.getIndexedLoad(SDValue(N,0), SDLoc(N),
12370 BasePtr, Offset, AM)
12371 : DAG.getIndexedStore(SDValue(N,0), SDLoc(N),
12372 BasePtr, Offset, AM);
12373 ++PostIndexedNodes;
12374 ++NodesCombined;
12375 LLVM_DEBUG(dbgs() << "\nReplacing.5 "; N->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.5 "; N->dump
(&DAG); dbgs() << "\nWith: "; Result.getNode()->
dump(&DAG); dbgs() << '\n'; } } while (false)
12376 dbgs() << "\nWith: "; Result.getNode()->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.5 "; N->dump
(&DAG); dbgs() << "\nWith: "; Result.getNode()->
dump(&DAG); dbgs() << '\n'; } } while (false)
12377 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.5 "; N->dump
(&DAG); dbgs() << "\nWith: "; Result.getNode()->
dump(&DAG); dbgs() << '\n'; } } while (false)
;
12378 WorklistRemover DeadNodes(*this);
12379 if (isLoad) {
12380 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0));
12381 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2));
12382 } else {
12383 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1));
12384 }
12385
12386 // Finally, since the node is now dead, remove it from the graph.
12387 deleteAndRecombine(N);
12388
12389 // Replace the uses of Use with uses of the updated base value.
12390 DAG.ReplaceAllUsesOfValueWith(SDValue(Op, 0),
12391 Result.getValue(isLoad ? 1 : 0));
12392 deleteAndRecombine(Op);
12393 return true;
12394 }
12395 }
12396 }
12397
12398 return false;
12399}
12400
12401/// Return the base-pointer arithmetic from an indexed \p LD.
12402SDValue DAGCombiner::SplitIndexingFromLoad(LoadSDNode *LD) {
12403 ISD::MemIndexedMode AM = LD->getAddressingMode();
12404 assert(AM != ISD::UNINDEXED)(static_cast <bool> (AM != ISD::UNINDEXED) ? void (0) :
__assert_fail ("AM != ISD::UNINDEXED", "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12404, __extension__ __PRETTY_FUNCTION__))
;
12405 SDValue BP = LD->getOperand(1);
12406 SDValue Inc = LD->getOperand(2);
12407
12408 // Some backends use TargetConstants for load offsets, but don't expect
12409 // TargetConstants in general ADD nodes. We can convert these constants into
12410 // regular Constants (if the constant is not opaque).
12411 assert((Inc.getOpcode() != ISD::TargetConstant ||(static_cast <bool> ((Inc.getOpcode() != ISD::TargetConstant
|| !cast<ConstantSDNode>(Inc)->isOpaque()) &&
"Cannot split out indexing using opaque target constants") ?
void (0) : __assert_fail ("(Inc.getOpcode() != ISD::TargetConstant || !cast<ConstantSDNode>(Inc)->isOpaque()) && \"Cannot split out indexing using opaque target constants\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12413, __extension__ __PRETTY_FUNCTION__))
12412 !cast<ConstantSDNode>(Inc)->isOpaque()) &&(static_cast <bool> ((Inc.getOpcode() != ISD::TargetConstant
|| !cast<ConstantSDNode>(Inc)->isOpaque()) &&
"Cannot split out indexing using opaque target constants") ?
void (0) : __assert_fail ("(Inc.getOpcode() != ISD::TargetConstant || !cast<ConstantSDNode>(Inc)->isOpaque()) && \"Cannot split out indexing using opaque target constants\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12413, __extension__ __PRETTY_FUNCTION__))
12413 "Cannot split out indexing using opaque target constants")(static_cast <bool> ((Inc.getOpcode() != ISD::TargetConstant
|| !cast<ConstantSDNode>(Inc)->isOpaque()) &&
"Cannot split out indexing using opaque target constants") ?
void (0) : __assert_fail ("(Inc.getOpcode() != ISD::TargetConstant || !cast<ConstantSDNode>(Inc)->isOpaque()) && \"Cannot split out indexing using opaque target constants\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12413, __extension__ __PRETTY_FUNCTION__))
;
12414 if (Inc.getOpcode() == ISD::TargetConstant) {
12415 ConstantSDNode *ConstInc = cast<ConstantSDNode>(Inc);
12416 Inc = DAG.getConstant(*ConstInc->getConstantIntValue(), SDLoc(Inc),
12417 ConstInc->getValueType(0));
12418 }
12419
12420 unsigned Opc =
12421 (AM == ISD::PRE_INC || AM == ISD::POST_INC ? ISD::ADD : ISD::SUB);
12422 return DAG.getNode(Opc, SDLoc(LD), BP.getSimpleValueType(), BP, Inc);
12423}
12424
12425SDValue DAGCombiner::visitLOAD(SDNode *N) {
12426 LoadSDNode *LD = cast<LoadSDNode>(N);
12427 SDValue Chain = LD->getChain();
12428 SDValue Ptr = LD->getBasePtr();
12429
12430 // If load is not volatile and there are no uses of the loaded value (and
12431 // the updated indexed value in case of indexed loads), change uses of the
12432 // chain value into uses of the chain input (i.e. delete the dead load).
12433 if (!LD->isVolatile()) {
12434 if (N->getValueType(1) == MVT::Other) {
12435 // Unindexed loads.
12436 if (!N->hasAnyUseOfValue(0)) {
12437 // It's not safe to use the two value CombineTo variant here. e.g.
12438 // v1, chain2 = load chain1, loc
12439 // v2, chain3 = load chain2, loc
12440 // v3 = add v2, c
12441 // Now we replace use of chain2 with chain1. This makes the second load
12442 // isomorphic to the one we are deleting, and thus makes this load live.
12443 LLVM_DEBUG(dbgs() << "\nReplacing.6 "; N->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.6 "; N->dump
(&DAG); dbgs() << "\nWith chain: "; Chain.getNode()
->dump(&DAG); dbgs() << "\n"; } } while (false)
12444 dbgs() << "\nWith chain: "; Chain.getNode()->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.6 "; N->dump
(&DAG); dbgs() << "\nWith chain: "; Chain.getNode()
->dump(&DAG); dbgs() << "\n"; } } while (false)
12445 dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.6 "; N->dump
(&DAG); dbgs() << "\nWith chain: "; Chain.getNode()
->dump(&DAG); dbgs() << "\n"; } } while (false)
;
12446 WorklistRemover DeadNodes(*this);
12447 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain);
12448 AddUsersToWorklist(Chain.getNode());
12449 if (N->use_empty())
12450 deleteAndRecombine(N);
12451
12452 return SDValue(N, 0); // Return N so it doesn't get rechecked!
12453 }
12454 } else {
12455 // Indexed loads.
12456 assert(N->getValueType(2) == MVT::Other && "Malformed indexed loads?")(static_cast <bool> (N->getValueType(2) == MVT::Other
&& "Malformed indexed loads?") ? void (0) : __assert_fail
("N->getValueType(2) == MVT::Other && \"Malformed indexed loads?\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12456, __extension__ __PRETTY_FUNCTION__))
;
12457
12458 // If this load has an opaque TargetConstant offset, then we cannot split
12459 // the indexing into an add/sub directly (that TargetConstant may not be
12460 // valid for a different type of node, and we cannot convert an opaque
12461 // target constant into a regular constant).
12462 bool HasOTCInc = LD->getOperand(2).getOpcode() == ISD::TargetConstant &&
12463 cast<ConstantSDNode>(LD->getOperand(2))->isOpaque();
12464
12465 if (!N->hasAnyUseOfValue(0) &&
12466 ((MaySplitLoadIndex && !HasOTCInc) || !N->hasAnyUseOfValue(1))) {
12467 SDValue Undef = DAG.getUNDEF(N->getValueType(0));
12468 SDValue Index;
12469 if (N->hasAnyUseOfValue(1) && MaySplitLoadIndex && !HasOTCInc) {
12470 Index = SplitIndexingFromLoad(LD);
12471 // Try to fold the base pointer arithmetic into subsequent loads and
12472 // stores.
12473 AddUsersToWorklist(N);
12474 } else
12475 Index = DAG.getUNDEF(N->getValueType(1));
12476 LLVM_DEBUG(dbgs() << "\nReplacing.7 "; N->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.7 "; N->dump
(&DAG); dbgs() << "\nWith: "; Undef.getNode()->dump
(&DAG); dbgs() << " and 2 other values\n"; } } while
(false)
12477 dbgs() << "\nWith: "; Undef.getNode()->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.7 "; N->dump
(&DAG); dbgs() << "\nWith: "; Undef.getNode()->dump
(&DAG); dbgs() << " and 2 other values\n"; } } while
(false)
12478 dbgs() << " and 2 other values\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.7 "; N->dump
(&DAG); dbgs() << "\nWith: "; Undef.getNode()->dump
(&DAG); dbgs() << " and 2 other values\n"; } } while
(false)
;
12479 WorklistRemover DeadNodes(*this);
12480 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Undef);
12481 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Index);
12482 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 2), Chain);
12483 deleteAndRecombine(N);
12484 return SDValue(N, 0); // Return N so it doesn't get rechecked!
12485 }
12486 }
12487 }
12488
12489 // If this load is directly stored, replace the load value with the stored
12490 // value.
12491 // TODO: Handle store large -> read small portion.
12492 // TODO: Handle TRUNCSTORE/LOADEXT
12493 if (OptLevel != CodeGenOpt::None &&
12494 ISD::isNormalLoad(N) && !LD->isVolatile()) {
12495 if (ISD::isNON_TRUNCStore(Chain.getNode())) {
12496 StoreSDNode *PrevST = cast<StoreSDNode>(Chain);
12497 if (PrevST->getBasePtr() == Ptr &&
12498 PrevST->getValue().getValueType() == N->getValueType(0))
12499 return CombineTo(N, PrevST->getOperand(1), Chain);
12500 }
12501 }
12502
12503 // Try to infer better alignment information than the load already has.
12504 if (OptLevel != CodeGenOpt::None && LD->isUnindexed()) {
12505 if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
12506 if (Align > LD->getAlignment() && LD->getSrcValueOffset() % Align == 0) {
12507 SDValue NewLoad = DAG.getExtLoad(
12508 LD->getExtensionType(), SDLoc(N), LD->getValueType(0), Chain, Ptr,
12509 LD->getPointerInfo(), LD->getMemoryVT(), Align,
12510 LD->getMemOperand()->getFlags(), LD->getAAInfo());
12511 // NewLoad will always be N as we are only refining the alignment
12512 assert(NewLoad.getNode() == N)(static_cast <bool> (NewLoad.getNode() == N) ? void (0)
: __assert_fail ("NewLoad.getNode() == N", "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12512, __extension__ __PRETTY_FUNCTION__))
;
12513 (void)NewLoad;
12514 }
12515 }
12516 }
12517
12518 if (LD->isUnindexed()) {
12519 // Walk up chain skipping non-aliasing memory nodes.
12520 SDValue BetterChain = FindBetterChain(N, Chain);
12521
12522 // If there is a better chain.
12523 if (Chain != BetterChain) {
12524 SDValue ReplLoad;
12525
12526 // Replace the chain to void dependency.
12527 if (LD->getExtensionType() == ISD::NON_EXTLOAD) {
12528 ReplLoad = DAG.getLoad(N->getValueType(0), SDLoc(LD),
12529 BetterChain, Ptr, LD->getMemOperand());
12530 } else {
12531 ReplLoad = DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD),
12532 LD->getValueType(0),
12533 BetterChain, Ptr, LD->getMemoryVT(),
12534 LD->getMemOperand());
12535 }
12536
12537 // Create token factor to keep old chain connected.
12538 SDValue Token = DAG.getNode(ISD::TokenFactor, SDLoc(N),
12539 MVT::Other, Chain, ReplLoad.getValue(1));
12540
12541 // Replace uses with load result and token factor
12542 return CombineTo(N, ReplLoad.getValue(0), Token);
12543 }
12544 }
12545
12546 // Try transforming N to an indexed load.
12547 if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
12548 return SDValue(N, 0);
12549
12550 // Try to slice up N to more direct loads if the slices are mapped to
12551 // different register banks or pairing can take place.
12552 if (SliceUpLoad(N))
12553 return SDValue(N, 0);
12554
12555 return SDValue();
12556}
12557
12558namespace {
12559
12560/// Helper structure used to slice a load in smaller loads.
12561/// Basically a slice is obtained from the following sequence:
12562/// Origin = load Ty1, Base
12563/// Shift = srl Ty1 Origin, CstTy Amount
12564/// Inst = trunc Shift to Ty2
12565///
12566/// Then, it will be rewritten into:
12567/// Slice = load SliceTy, Base + SliceOffset
12568/// [Inst = zext Slice to Ty2], only if SliceTy <> Ty2
12569///
12570/// SliceTy is deduced from the number of bits that are actually used to
12571/// build Inst.
12572struct LoadedSlice {
12573 /// Helper structure used to compute the cost of a slice.
12574 struct Cost {
12575 /// Are we optimizing for code size.
12576 bool ForCodeSize;
12577
12578 /// Various cost.
12579 unsigned Loads = 0;
12580 unsigned Truncates = 0;
12581 unsigned CrossRegisterBanksCopies = 0;
12582 unsigned ZExts = 0;
12583 unsigned Shift = 0;
12584
12585 Cost(bool ForCodeSize = false) : ForCodeSize(ForCodeSize) {}
12586
12587 /// Get the cost of one isolated slice.
12588 Cost(const LoadedSlice &LS, bool ForCodeSize = false)
12589 : ForCodeSize(ForCodeSize), Loads(1) {
12590 EVT TruncType = LS.Inst->getValueType(0);
12591 EVT LoadedType = LS.getLoadedType();
12592 if (TruncType != LoadedType &&
12593 !LS.DAG->getTargetLoweringInfo().isZExtFree(LoadedType, TruncType))
12594 ZExts = 1;
12595 }
12596
12597 /// Account for slicing gain in the current cost.
12598 /// Slicing provide a few gains like removing a shift or a
12599 /// truncate. This method allows to grow the cost of the original
12600 /// load with the gain from this slice.
12601 void addSliceGain(const LoadedSlice &LS) {
12602 // Each slice saves a truncate.
12603 const TargetLowering &TLI = LS.DAG->getTargetLoweringInfo();
12604 if (!TLI.isTruncateFree(LS.Inst->getOperand(0).getValueType(),
12605 LS.Inst->getValueType(0)))
12606 ++Truncates;
12607 // If there is a shift amount, this slice gets rid of it.
12608 if (LS.Shift)
12609 ++Shift;
12610 // If this slice can merge a cross register bank copy, account for it.
12611 if (LS.canMergeExpensiveCrossRegisterBankCopy())
12612 ++CrossRegisterBanksCopies;
12613 }
12614
12615 Cost &operator+=(const Cost &RHS) {
12616 Loads += RHS.Loads;
12617 Truncates += RHS.Truncates;
12618 CrossRegisterBanksCopies += RHS.CrossRegisterBanksCopies;
12619 ZExts += RHS.ZExts;
12620 Shift += RHS.Shift;
12621 return *this;
12622 }
12623
12624 bool operator==(const Cost &RHS) const {
12625 return Loads == RHS.Loads && Truncates == RHS.Truncates &&
12626 CrossRegisterBanksCopies == RHS.CrossRegisterBanksCopies &&
12627 ZExts == RHS.ZExts && Shift == RHS.Shift;
12628 }
12629
12630 bool operator!=(const Cost &RHS) const { return !(*this == RHS); }
12631
12632 bool operator<(const Cost &RHS) const {
12633 // Assume cross register banks copies are as expensive as loads.
12634 // FIXME: Do we want some more target hooks?
12635 unsigned ExpensiveOpsLHS = Loads + CrossRegisterBanksCopies;
12636 unsigned ExpensiveOpsRHS = RHS.Loads + RHS.CrossRegisterBanksCopies;
12637 // Unless we are optimizing for code size, consider the
12638 // expensive operation first.
12639 if (!ForCodeSize && ExpensiveOpsLHS != ExpensiveOpsRHS)
12640 return ExpensiveOpsLHS < ExpensiveOpsRHS;
12641 return (Truncates + ZExts + Shift + ExpensiveOpsLHS) <
12642 (RHS.Truncates + RHS.ZExts + RHS.Shift + ExpensiveOpsRHS);
12643 }
12644
12645 bool operator>(const Cost &RHS) const { return RHS < *this; }
12646
12647 bool operator<=(const Cost &RHS) const { return !(RHS < *this); }
12648
12649 bool operator>=(const Cost &RHS) const { return !(*this < RHS); }
12650 };
12651
12652 // The last instruction that represent the slice. This should be a
12653 // truncate instruction.
12654 SDNode *Inst;
12655
12656 // The original load instruction.
12657 LoadSDNode *Origin;
12658
12659 // The right shift amount in bits from the original load.
12660 unsigned Shift;
12661
12662 // The DAG from which Origin came from.
12663 // This is used to get some contextual information about legal types, etc.
12664 SelectionDAG *DAG;
12665
12666 LoadedSlice(SDNode *Inst = nullptr, LoadSDNode *Origin = nullptr,
12667 unsigned Shift = 0, SelectionDAG *DAG = nullptr)
12668 : Inst(Inst), Origin(Origin), Shift(Shift), DAG(DAG) {}
12669
12670 /// Get the bits used in a chunk of bits \p BitWidth large.
12671 /// \return Result is \p BitWidth and has used bits set to 1 and
12672 /// not used bits set to 0.
12673 APInt getUsedBits() const {
12674 // Reproduce the trunc(lshr) sequence:
12675 // - Start from the truncated value.
12676 // - Zero extend to the desired bit width.
12677 // - Shift left.
12678 assert(Origin && "No original load to compare against.")(static_cast <bool> (Origin && "No original load to compare against."
) ? void (0) : __assert_fail ("Origin && \"No original load to compare against.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12678, __extension__ __PRETTY_FUNCTION__))
;
12679 unsigned BitWidth = Origin->getValueSizeInBits(0);
12680 assert(Inst && "This slice is not bound to an instruction")(static_cast <bool> (Inst && "This slice is not bound to an instruction"
) ? void (0) : __assert_fail ("Inst && \"This slice is not bound to an instruction\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12680, __extension__ __PRETTY_FUNCTION__))
;
12681 assert(Inst->getValueSizeInBits(0) <= BitWidth &&(static_cast <bool> (Inst->getValueSizeInBits(0) <=
BitWidth && "Extracted slice is bigger than the whole type!"
) ? void (0) : __assert_fail ("Inst->getValueSizeInBits(0) <= BitWidth && \"Extracted slice is bigger than the whole type!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12682, __extension__ __PRETTY_FUNCTION__))
12682 "Extracted slice is bigger than the whole type!")(static_cast <bool> (Inst->getValueSizeInBits(0) <=
BitWidth && "Extracted slice is bigger than the whole type!"
) ? void (0) : __assert_fail ("Inst->getValueSizeInBits(0) <= BitWidth && \"Extracted slice is bigger than the whole type!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12682, __extension__ __PRETTY_FUNCTION__))
;
12683 APInt UsedBits(Inst->getValueSizeInBits(0), 0);
12684 UsedBits.setAllBits();
12685 UsedBits = UsedBits.zext(BitWidth);
12686 UsedBits <<= Shift;
12687 return UsedBits;
12688 }
12689
12690 /// Get the size of the slice to be loaded in bytes.
12691 unsigned getLoadedSize() const {
12692 unsigned SliceSize = getUsedBits().countPopulation();
12693 assert(!(SliceSize & 0x7) && "Size is not a multiple of a byte.")(static_cast <bool> (!(SliceSize & 0x7) && "Size is not a multiple of a byte."
) ? void (0) : __assert_fail ("!(SliceSize & 0x7) && \"Size is not a multiple of a byte.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12693, __extension__ __PRETTY_FUNCTION__))
;
12694 return SliceSize / 8;
12695 }
12696
12697 /// Get the type that will be loaded for this slice.
12698 /// Note: This may not be the final type for the slice.
12699 EVT getLoadedType() const {
12700 assert(DAG && "Missing context")(static_cast <bool> (DAG && "Missing context") ?
void (0) : __assert_fail ("DAG && \"Missing context\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12700, __extension__ __PRETTY_FUNCTION__))
;
12701 LLVMContext &Ctxt = *DAG->getContext();
12702 return EVT::getIntegerVT(Ctxt, getLoadedSize() * 8);
12703 }
12704
12705 /// Get the alignment of the load used for this slice.
12706 unsigned getAlignment() const {
12707 unsigned Alignment = Origin->getAlignment();
12708 unsigned Offset = getOffsetFromBase();
12709 if (Offset != 0)
12710 Alignment = MinAlign(Alignment, Alignment + Offset);
12711 return Alignment;
12712 }
12713
12714 /// Check if this slice can be rewritten with legal operations.
12715 bool isLegal() const {
12716 // An invalid slice is not legal.
12717 if (!Origin || !Inst || !DAG)
12718 return false;
12719
12720 // Offsets are for indexed load only, we do not handle that.
12721 if (!Origin->getOffset().isUndef())
12722 return false;
12723
12724 const TargetLowering &TLI = DAG->getTargetLoweringInfo();
12725
12726 // Check that the type is legal.
12727 EVT SliceType = getLoadedType();
12728 if (!TLI.isTypeLegal(SliceType))
12729 return false;
12730
12731 // Check that the load is legal for this type.
12732 if (!TLI.isOperationLegal(ISD::LOAD, SliceType))
12733 return false;
12734
12735 // Check that the offset can be computed.
12736 // 1. Check its type.
12737 EVT PtrType = Origin->getBasePtr().getValueType();
12738 if (PtrType == MVT::Untyped || PtrType.isExtended())
12739 return false;
12740
12741 // 2. Check that it fits in the immediate.
12742 if (!TLI.isLegalAddImmediate(getOffsetFromBase()))
12743 return false;
12744
12745 // 3. Check that the computation is legal.
12746 if (!TLI.isOperationLegal(ISD::ADD, PtrType))
12747 return false;
12748
12749 // Check that the zext is legal if it needs one.
12750 EVT TruncateType = Inst->getValueType(0);
12751 if (TruncateType != SliceType &&
12752 !TLI.isOperationLegal(ISD::ZERO_EXTEND, TruncateType))
12753 return false;
12754
12755 return true;
12756 }
12757
12758 /// Get the offset in bytes of this slice in the original chunk of
12759 /// bits.
12760 /// \pre DAG != nullptr.
12761 uint64_t getOffsetFromBase() const {
12762 assert(DAG && "Missing context.")(static_cast <bool> (DAG && "Missing context.")
? void (0) : __assert_fail ("DAG && \"Missing context.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12762, __extension__ __PRETTY_FUNCTION__))
;
12763 bool IsBigEndian = DAG->getDataLayout().isBigEndian();
12764 assert(!(Shift & 0x7) && "Shifts not aligned on Bytes are not supported.")(static_cast <bool> (!(Shift & 0x7) && "Shifts not aligned on Bytes are not supported."
) ? void (0) : __assert_fail ("!(Shift & 0x7) && \"Shifts not aligned on Bytes are not supported.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12764, __extension__ __PRETTY_FUNCTION__))
;
12765 uint64_t Offset = Shift / 8;
12766 unsigned TySizeInBytes = Origin->getValueSizeInBits(0) / 8;
12767 assert(!(Origin->getValueSizeInBits(0) & 0x7) &&(static_cast <bool> (!(Origin->getValueSizeInBits(0)
& 0x7) && "The size of the original loaded type is not a multiple of a"
" byte.") ? void (0) : __assert_fail ("!(Origin->getValueSizeInBits(0) & 0x7) && \"The size of the original loaded type is not a multiple of a\" \" byte.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12769, __extension__ __PRETTY_FUNCTION__))
12768 "The size of the original loaded type is not a multiple of a"(static_cast <bool> (!(Origin->getValueSizeInBits(0)
& 0x7) && "The size of the original loaded type is not a multiple of a"
" byte.") ? void (0) : __assert_fail ("!(Origin->getValueSizeInBits(0) & 0x7) && \"The size of the original loaded type is not a multiple of a\" \" byte.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12769, __extension__ __PRETTY_FUNCTION__))
12769 " byte.")(static_cast <bool> (!(Origin->getValueSizeInBits(0)
& 0x7) && "The size of the original loaded type is not a multiple of a"
" byte.") ? void (0) : __assert_fail ("!(Origin->getValueSizeInBits(0) & 0x7) && \"The size of the original loaded type is not a multiple of a\" \" byte.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12769, __extension__ __PRETTY_FUNCTION__))
;
12770 // If Offset is bigger than TySizeInBytes, it means we are loading all
12771 // zeros. This should have been optimized before in the process.
12772 assert(TySizeInBytes > Offset &&(static_cast <bool> (TySizeInBytes > Offset &&
"Invalid shift amount for given loaded size") ? void (0) : __assert_fail
("TySizeInBytes > Offset && \"Invalid shift amount for given loaded size\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12773, __extension__ __PRETTY_FUNCTION__))
12773 "Invalid shift amount for given loaded size")(static_cast <bool> (TySizeInBytes > Offset &&
"Invalid shift amount for given loaded size") ? void (0) : __assert_fail
("TySizeInBytes > Offset && \"Invalid shift amount for given loaded size\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12773, __extension__ __PRETTY_FUNCTION__))
;
12774 if (IsBigEndian)
12775 Offset = TySizeInBytes - Offset - getLoadedSize();
12776 return Offset;
12777 }
12778
12779 /// Generate the sequence of instructions to load the slice
12780 /// represented by this object and redirect the uses of this slice to
12781 /// this new sequence of instructions.
12782 /// \pre this->Inst && this->Origin are valid Instructions and this
12783 /// object passed the legal check: LoadedSlice::isLegal returned true.
12784 /// \return The last instruction of the sequence used to load the slice.
12785 SDValue loadSlice() const {
12786 assert(Inst && Origin && "Unable to replace a non-existing slice.")(static_cast <bool> (Inst && Origin && "Unable to replace a non-existing slice."
) ? void (0) : __assert_fail ("Inst && Origin && \"Unable to replace a non-existing slice.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12786, __extension__ __PRETTY_FUNCTION__))
;
12787 const SDValue &OldBaseAddr = Origin->getBasePtr();
12788 SDValue BaseAddr = OldBaseAddr;
12789 // Get the offset in that chunk of bytes w.r.t. the endianness.
12790 int64_t Offset = static_cast<int64_t>(getOffsetFromBase());
12791 assert(Offset >= 0 && "Offset too big to fit in int64_t!")(static_cast <bool> (Offset >= 0 && "Offset too big to fit in int64_t!"
) ? void (0) : __assert_fail ("Offset >= 0 && \"Offset too big to fit in int64_t!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12791, __extension__ __PRETTY_FUNCTION__))
;
12792 if (Offset) {
12793 // BaseAddr = BaseAddr + Offset.
12794 EVT ArithType = BaseAddr.getValueType();
12795 SDLoc DL(Origin);
12796 BaseAddr = DAG->getNode(ISD::ADD, DL, ArithType, BaseAddr,
12797 DAG->getConstant(Offset, DL, ArithType));
12798 }
12799
12800 // Create the type of the loaded slice according to its size.
12801 EVT SliceType = getLoadedType();
12802
12803 // Create the load for the slice.
12804 SDValue LastInst =
12805 DAG->getLoad(SliceType, SDLoc(Origin), Origin->getChain(), BaseAddr,
12806 Origin->getPointerInfo().getWithOffset(Offset),
12807 getAlignment(), Origin->getMemOperand()->getFlags());
12808 // If the final type is not the same as the loaded type, this means that
12809 // we have to pad with zero. Create a zero extend for that.
12810 EVT FinalType = Inst->getValueType(0);
12811 if (SliceType != FinalType)
12812 LastInst =
12813 DAG->getNode(ISD::ZERO_EXTEND, SDLoc(LastInst), FinalType, LastInst);
12814 return LastInst;
12815 }
12816
12817 /// Check if this slice can be merged with an expensive cross register
12818 /// bank copy. E.g.,
12819 /// i = load i32
12820 /// f = bitcast i32 i to float
12821 bool canMergeExpensiveCrossRegisterBankCopy() const {
12822 if (!Inst || !Inst->hasOneUse())
12823 return false;
12824 SDNode *Use = *Inst->use_begin();
12825 if (Use->getOpcode() != ISD::BITCAST)
12826 return false;
12827 assert(DAG && "Missing context")(static_cast <bool> (DAG && "Missing context") ?
void (0) : __assert_fail ("DAG && \"Missing context\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12827, __extension__ __PRETTY_FUNCTION__))
;
12828 const TargetLowering &TLI = DAG->getTargetLoweringInfo();
12829 EVT ResVT = Use->getValueType(0);
12830 const TargetRegisterClass *ResRC = TLI.getRegClassFor(ResVT.getSimpleVT());
12831 const TargetRegisterClass *ArgRC =
12832 TLI.getRegClassFor(Use->getOperand(0).getValueType().getSimpleVT());
12833 if (ArgRC == ResRC || !TLI.isOperationLegal(ISD::LOAD, ResVT))
12834 return false;
12835
12836 // At this point, we know that we perform a cross-register-bank copy.
12837 // Check if it is expensive.
12838 const TargetRegisterInfo *TRI = DAG->getSubtarget().getRegisterInfo();
12839 // Assume bitcasts are cheap, unless both register classes do not
12840 // explicitly share a common sub class.
12841 if (!TRI || TRI->getCommonSubClass(ArgRC, ResRC))
12842 return false;
12843
12844 // Check if it will be merged with the load.
12845 // 1. Check the alignment constraint.
12846 unsigned RequiredAlignment = DAG->getDataLayout().getABITypeAlignment(
12847 ResVT.getTypeForEVT(*DAG->getContext()));
12848
12849 if (RequiredAlignment > getAlignment())
12850 return false;
12851
12852 // 2. Check that the load is a legal operation for that type.
12853 if (!TLI.isOperationLegal(ISD::LOAD, ResVT))
12854 return false;
12855
12856 // 3. Check that we do not have a zext in the way.
12857 if (Inst->getValueType(0) != getLoadedType())
12858 return false;
12859
12860 return true;
12861 }
12862};
12863
12864} // end anonymous namespace
12865
12866/// Check that all bits set in \p UsedBits form a dense region, i.e.,
12867/// \p UsedBits looks like 0..0 1..1 0..0.
12868static bool areUsedBitsDense(const APInt &UsedBits) {
12869 // If all the bits are one, this is dense!
12870 if (UsedBits.isAllOnesValue())
12871 return true;
12872
12873 // Get rid of the unused bits on the right.
12874 APInt NarrowedUsedBits = UsedBits.lshr(UsedBits.countTrailingZeros());
12875 // Get rid of the unused bits on the left.
12876 if (NarrowedUsedBits.countLeadingZeros())
12877 NarrowedUsedBits = NarrowedUsedBits.trunc(NarrowedUsedBits.getActiveBits());
12878 // Check that the chunk of bits is completely used.
12879 return NarrowedUsedBits.isAllOnesValue();
12880}
12881
12882/// Check whether or not \p First and \p Second are next to each other
12883/// in memory. This means that there is no hole between the bits loaded
12884/// by \p First and the bits loaded by \p Second.
12885static bool areSlicesNextToEachOther(const LoadedSlice &First,
12886 const LoadedSlice &Second) {
12887 assert(First.Origin == Second.Origin && First.Origin &&(static_cast <bool> (First.Origin == Second.Origin &&
First.Origin && "Unable to match different memory origins."
) ? void (0) : __assert_fail ("First.Origin == Second.Origin && First.Origin && \"Unable to match different memory origins.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12888, __extension__ __PRETTY_FUNCTION__))
12888 "Unable to match different memory origins.")(static_cast <bool> (First.Origin == Second.Origin &&
First.Origin && "Unable to match different memory origins."
) ? void (0) : __assert_fail ("First.Origin == Second.Origin && First.Origin && \"Unable to match different memory origins.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12888, __extension__ __PRETTY_FUNCTION__))
;
12889 APInt UsedBits = First.getUsedBits();
12890 assert((UsedBits & Second.getUsedBits()) == 0 &&(static_cast <bool> ((UsedBits & Second.getUsedBits
()) == 0 && "Slices are not supposed to overlap.") ? void
(0) : __assert_fail ("(UsedBits & Second.getUsedBits()) == 0 && \"Slices are not supposed to overlap.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12891, __extension__ __PRETTY_FUNCTION__))
12891 "Slices are not supposed to overlap.")(static_cast <bool> ((UsedBits & Second.getUsedBits
()) == 0 && "Slices are not supposed to overlap.") ? void
(0) : __assert_fail ("(UsedBits & Second.getUsedBits()) == 0 && \"Slices are not supposed to overlap.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12891, __extension__ __PRETTY_FUNCTION__))
;
12892 UsedBits |= Second.getUsedBits();
12893 return areUsedBitsDense(UsedBits);
12894}
12895
12896/// Adjust the \p GlobalLSCost according to the target
12897/// paring capabilities and the layout of the slices.
12898/// \pre \p GlobalLSCost should account for at least as many loads as
12899/// there is in the slices in \p LoadedSlices.
12900static void adjustCostForPairing(SmallVectorImpl<LoadedSlice> &LoadedSlices,
12901 LoadedSlice::Cost &GlobalLSCost) {
12902 unsigned NumberOfSlices = LoadedSlices.size();
12903 // If there is less than 2 elements, no pairing is possible.
12904 if (NumberOfSlices < 2)
12905 return;
12906
12907 // Sort the slices so that elements that are likely to be next to each
12908 // other in memory are next to each other in the list.
12909 llvm::sort(LoadedSlices.begin(), LoadedSlices.end(),
12910 [](const LoadedSlice &LHS, const LoadedSlice &RHS) {
12911 assert(LHS.Origin == RHS.Origin && "Different bases not implemented.")(static_cast <bool> (LHS.Origin == RHS.Origin &&
"Different bases not implemented.") ? void (0) : __assert_fail
("LHS.Origin == RHS.Origin && \"Different bases not implemented.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12911, __extension__ __PRETTY_FUNCTION__))
;
12912 return LHS.getOffsetFromBase() < RHS.getOffsetFromBase();
12913 });
12914 const TargetLowering &TLI = LoadedSlices[0].DAG->getTargetLoweringInfo();
12915 // First (resp. Second) is the first (resp. Second) potentially candidate
12916 // to be placed in a paired load.
12917 const LoadedSlice *First = nullptr;
12918 const LoadedSlice *Second = nullptr;
12919 for (unsigned CurrSlice = 0; CurrSlice < NumberOfSlices; ++CurrSlice,
12920 // Set the beginning of the pair.
12921 First = Second) {
12922 Second = &LoadedSlices[CurrSlice];
12923
12924 // If First is NULL, it means we start a new pair.
12925 // Get to the next slice.
12926 if (!First)
12927 continue;
12928
12929 EVT LoadedType = First->getLoadedType();
12930
12931 // If the types of the slices are different, we cannot pair them.
12932 if (LoadedType != Second->getLoadedType())
12933 continue;
12934
12935 // Check if the target supplies paired loads for this type.
12936 unsigned RequiredAlignment = 0;
12937 if (!TLI.hasPairedLoad(LoadedType, RequiredAlignment)) {
12938 // move to the next pair, this type is hopeless.
12939 Second = nullptr;
12940 continue;
12941 }
12942 // Check if we meet the alignment requirement.
12943 if (RequiredAlignment > First->getAlignment())
12944 continue;
12945
12946 // Check that both loads are next to each other in memory.
12947 if (!areSlicesNextToEachOther(*First, *Second))
12948 continue;
12949
12950 assert(GlobalLSCost.Loads > 0 && "We save more loads than we created!")(static_cast <bool> (GlobalLSCost.Loads > 0 &&
"We save more loads than we created!") ? void (0) : __assert_fail
("GlobalLSCost.Loads > 0 && \"We save more loads than we created!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12950, __extension__ __PRETTY_FUNCTION__))
;
12951 --GlobalLSCost.Loads;
12952 // Move to the next pair.
12953 Second = nullptr;
12954 }
12955}
12956
12957/// Check the profitability of all involved LoadedSlice.
12958/// Currently, it is considered profitable if there is exactly two
12959/// involved slices (1) which are (2) next to each other in memory, and
12960/// whose cost (\see LoadedSlice::Cost) is smaller than the original load (3).
12961///
12962/// Note: The order of the elements in \p LoadedSlices may be modified, but not
12963/// the elements themselves.
12964///
12965/// FIXME: When the cost model will be mature enough, we can relax
12966/// constraints (1) and (2).
12967static bool isSlicingProfitable(SmallVectorImpl<LoadedSlice> &LoadedSlices,
12968 const APInt &UsedBits, bool ForCodeSize) {
12969 unsigned NumberOfSlices = LoadedSlices.size();
12970 if (StressLoadSlicing)
12971 return NumberOfSlices > 1;
12972
12973 // Check (1).
12974 if (NumberOfSlices != 2)
12975 return false;
12976
12977 // Check (2).
12978 if (!areUsedBitsDense(UsedBits))
12979 return false;
12980
12981 // Check (3).
12982 LoadedSlice::Cost OrigCost(ForCodeSize), GlobalSlicingCost(ForCodeSize);
12983 // The original code has one big load.
12984 OrigCost.Loads = 1;
12985 for (unsigned CurrSlice = 0; CurrSlice < NumberOfSlices; ++CurrSlice) {
12986 const LoadedSlice &LS = LoadedSlices[CurrSlice];
12987 // Accumulate the cost of all the slices.
12988 LoadedSlice::Cost SliceCost(LS, ForCodeSize);
12989 GlobalSlicingCost += SliceCost;
12990
12991 // Account as cost in the original configuration the gain obtained
12992 // with the current slices.
12993 OrigCost.addSliceGain(LS);
12994 }
12995
12996 // If the target supports paired load, adjust the cost accordingly.
12997 adjustCostForPairing(LoadedSlices, GlobalSlicingCost);
12998 return OrigCost > GlobalSlicingCost;
12999}
13000
13001/// If the given load, \p LI, is used only by trunc or trunc(lshr)
13002/// operations, split it in the various pieces being extracted.
13003///
13004/// This sort of thing is introduced by SROA.
13005/// This slicing takes care not to insert overlapping loads.
13006/// \pre LI is a simple load (i.e., not an atomic or volatile load).
13007bool DAGCombiner::SliceUpLoad(SDNode *N) {
13008 if (Level < AfterLegalizeDAG)
13009 return false;
13010
13011 LoadSDNode *LD = cast<LoadSDNode>(N);
13012 if (LD->isVolatile() || !ISD::isNormalLoad(LD) ||
13013 !LD->getValueType(0).isInteger())
13014 return false;
13015
13016 // Keep track of already used bits to detect overlapping values.
13017 // In that case, we will just abort the transformation.
13018 APInt UsedBits(LD->getValueSizeInBits(0), 0);
13019
13020 SmallVector<LoadedSlice, 4> LoadedSlices;
13021
13022 // Check if this load is used as several smaller chunks of bits.
13023 // Basically, look for uses in trunc or trunc(lshr) and record a new chain
13024 // of computation for each trunc.
13025 for (SDNode::use_iterator UI = LD->use_begin(), UIEnd = LD->use_end();
13026 UI != UIEnd; ++UI) {
13027 // Skip the uses of the chain.
13028 if (UI.getUse().getResNo() != 0)
13029 continue;
13030
13031 SDNode *User = *UI;
13032 unsigned Shift = 0;
13033
13034 // Check if this is a trunc(lshr).
13035 if (User->getOpcode() == ISD::SRL && User->hasOneUse() &&
13036 isa<ConstantSDNode>(User->getOperand(1))) {
13037 Shift = User->getConstantOperandVal(1);
13038 User = *User->use_begin();
13039 }
13040
13041 // At this point, User is a Truncate, iff we encountered, trunc or
13042 // trunc(lshr).
13043 if (User->getOpcode() != ISD::TRUNCATE)
13044 return false;
13045
13046 // The width of the type must be a power of 2 and greater than 8-bits.
13047 // Otherwise the load cannot be represented in LLVM IR.
13048 // Moreover, if we shifted with a non-8-bits multiple, the slice
13049 // will be across several bytes. We do not support that.
13050 unsigned Width = User->getValueSizeInBits(0);
13051 if (Width < 8 || !isPowerOf2_32(Width) || (Shift & 0x7))
13052 return false;
13053
13054 // Build the slice for this chain of computations.
13055 LoadedSlice LS(User, LD, Shift, &DAG);
13056 APInt CurrentUsedBits = LS.getUsedBits();
13057
13058 // Check if this slice overlaps with another.
13059 if ((CurrentUsedBits & UsedBits) != 0)
13060 return false;
13061 // Update the bits used globally.
13062 UsedBits |= CurrentUsedBits;
13063
13064 // Check if the new slice would be legal.
13065 if (!LS.isLegal())
13066 return false;
13067
13068 // Record the slice.
13069 LoadedSlices.push_back(LS);
13070 }
13071
13072 // Abort slicing if it does not seem to be profitable.
13073 if (!isSlicingProfitable(LoadedSlices, UsedBits, ForCodeSize))
13074 return false;
13075
13076 ++SlicedLoads;
13077
13078 // Rewrite each chain to use an independent load.
13079 // By construction, each chain can be represented by a unique load.
13080
13081 // Prepare the argument for the new token factor for all the slices.
13082 SmallVector<SDValue, 8> ArgChains;
13083 for (SmallVectorImpl<LoadedSlice>::const_iterator
13084 LSIt = LoadedSlices.begin(),
13085 LSItEnd = LoadedSlices.end();
13086 LSIt != LSItEnd; ++LSIt) {
13087 SDValue SliceInst = LSIt->loadSlice();
13088 CombineTo(LSIt->Inst, SliceInst, true);
13089 if (SliceInst.getOpcode() != ISD::LOAD)
13090 SliceInst = SliceInst.getOperand(0);
13091 assert(SliceInst->getOpcode() == ISD::LOAD &&(static_cast <bool> (SliceInst->getOpcode() == ISD::
LOAD && "It takes more than a zext to get to the loaded slice!!"
) ? void (0) : __assert_fail ("SliceInst->getOpcode() == ISD::LOAD && \"It takes more than a zext to get to the loaded slice!!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 13092, __extension__ __PRETTY_FUNCTION__))
13092 "It takes more than a zext to get to the loaded slice!!")(static_cast <bool> (SliceInst->getOpcode() == ISD::
LOAD && "It takes more than a zext to get to the loaded slice!!"
) ? void (0) : __assert_fail ("SliceInst->getOpcode() == ISD::LOAD && \"It takes more than a zext to get to the loaded slice!!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 13092, __extension__ __PRETTY_FUNCTION__))
;
13093 ArgChains.push_back(SliceInst.getValue(1));
13094 }
13095
13096 SDValue Chain = DAG.getNode(ISD::TokenFactor, SDLoc(LD), MVT::Other,
13097 ArgChains);
13098 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain);
13099 AddToWorklist(Chain.getNode());
13100 return true;
13101}
13102
13103/// Check to see if V is (and load (ptr), imm), where the load is having
13104/// specific bytes cleared out. If so, return the byte size being masked out
13105/// and the shift amount.
13106static std::pair<unsigned, unsigned>
13107CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
13108 std::pair<unsigned, unsigned> Result(0, 0);
13109
13110 // Check for the structure we're looking for.
13111 if (V->getOpcode() != ISD::AND ||
13112 !isa<ConstantSDNode>(V->getOperand(1)) ||
13113 !ISD::isNormalLoad(V->getOperand(0).getNode()))
13114 return Result;
13115
13116 // Check the chain and pointer.
13117 LoadSDNode *LD = cast<LoadSDNode>(V->getOperand(0));
13118 if (LD->getBasePtr() != Ptr) return Result; // Not from same pointer.
13119
13120 // This only handles simple types.
13121 if (V.getValueType() != MVT::i16 &&
13122 V.getValueType() != MVT::i32 &&
13123 V.getValueType() != MVT::i64)
13124 return Result;
13125
13126 // Check the constant mask. Invert it so that the bits being masked out are
13127 // 0 and the bits being kept are 1. Use getSExtValue so that leading bits
13128 // follow the sign bit for uniformity.
13129 uint64_t NotMask = ~cast<ConstantSDNode>(V->getOperand(1))->getSExtValue();
13130 unsigned NotMaskLZ = countLeadingZeros(NotMask);
13131 if (NotMaskLZ & 7) return Result; // Must be multiple of a byte.
13132 unsigned NotMaskTZ = countTrailingZeros(NotMask);
13133 if (NotMaskTZ & 7) return Result; // Must be multiple of a byte.
13134 if (NotMaskLZ == 64) return Result; // All zero mask.
13135
13136 // See if we have a continuous run of bits. If so, we have 0*1+0*
13137 if (countTrailingOnes(NotMask >> NotMaskTZ) + NotMaskTZ + NotMaskLZ != 64)
13138 return Result;
13139
13140 // Adjust NotMaskLZ down to be from the actual size of the int instead of i64.
13141 if (V.getValueType() != MVT::i64 && NotMaskLZ)
13142 NotMaskLZ -= 64-V.getValueSizeInBits();
13143
13144 unsigned MaskedBytes = (V.getValueSizeInBits()-NotMaskLZ-NotMaskTZ)/8;
13145 switch (MaskedBytes) {
13146 case 1:
13147 case 2:
13148 case 4: break;
13149 default: return Result; // All one mask, or 5-byte mask.
13150 }
13151
13152 // Verify that the first bit starts at a multiple of mask so that the access
13153 // is aligned the same as the access width.
13154 if (NotMaskTZ && NotMaskTZ/8 % MaskedBytes) return Result;
13155
13156 // For narrowing to be valid, it must be the case that the load the
13157 // immediately preceeding memory operation before the store.
13158 if (LD == Chain.getNode())
13159 ; // ok.
13160 else if (Chain->getOpcode() == ISD::TokenFactor &&
13161 SDValue(LD, 1).hasOneUse()) {
13162 // LD has only 1 chain use so they are no indirect dependencies.
13163 bool isOk = false;
13164 for (const SDValue &ChainOp : Chain->op_values())
13165 if (ChainOp.getNode() == LD) {
13166 isOk = true;
13167 break;
13168 }
13169 if (!isOk)
13170 return Result;
13171 } else
13172 return Result; // Fail.
13173
13174 Result.first = MaskedBytes;
13175 Result.second = NotMaskTZ/8;
13176 return Result;
13177}
13178
13179/// Check to see if IVal is something that provides a value as specified by
13180/// MaskInfo. If so, replace the specified store with a narrower store of
13181/// truncated IVal.
13182static SDNode *
13183ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
13184 SDValue IVal, StoreSDNode *St,
13185 DAGCombiner *DC) {
13186 unsigned NumBytes = MaskInfo.first;
13187 unsigned ByteShift = MaskInfo.second;
13188 SelectionDAG &DAG = DC->getDAG();
13189
13190 // Check to see if IVal is all zeros in the part being masked in by the 'or'
13191 // that uses this. If not, this is not a replacement.
13192 APInt Mask = ~APInt::getBitsSet(IVal.getValueSizeInBits(),
13193 ByteShift*8, (ByteShift+NumBytes)*8);
13194 if (!DAG.MaskedValueIsZero(IVal, Mask)) return nullptr;
13195
13196 // Check that it is legal on the target to do this. It is legal if the new
13197 // VT we're shrinking to (i8/i16/i32) is legal or we're still before type
13198 // legalization.
13199 MVT VT = MVT::getIntegerVT(NumBytes*8);
13200 if (!DC->isTypeLegal(VT))
13201 return nullptr;
13202
13203 // Okay, we can do this! Replace the 'St' store with a store of IVal that is
13204 // shifted by ByteShift and truncated down to NumBytes.
13205 if (ByteShift) {
13206 SDLoc DL(IVal);
13207 IVal = DAG.getNode(ISD::SRL, DL, IVal.getValueType(), IVal,
13208 DAG.getConstant(ByteShift*8, DL,
13209 DC->getShiftAmountTy(IVal.getValueType())));
13210 }
13211
13212 // Figure out the offset for the store and the alignment of the access.
13213 unsigned StOffset;
13214 unsigned NewAlign = St->getAlignment();
13215
13216 if (DAG.getDataLayout().isLittleEndian())
13217 StOffset = ByteShift;
13218 else
13219 StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes;
13220
13221 SDValue Ptr = St->getBasePtr();
13222 if (StOffset) {
13223 SDLoc DL(IVal);
13224 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(),
13225 Ptr, DAG.getConstant(StOffset, DL, Ptr.getValueType()));
13226 NewAlign = MinAlign(NewAlign, StOffset);
13227 }
13228
13229 // Truncate down to the new size.
13230 IVal = DAG.getNode(ISD::TRUNCATE, SDLoc(IVal), VT, IVal);
13231
13232 ++OpsNarrowed;
13233 return DAG
13234 .getStore(St->getChain(), SDLoc(St), IVal, Ptr,
13235 St->getPointerInfo().getWithOffset(StOffset), NewAlign)
13236 .getNode();
13237}
13238
13239/// Look for sequence of load / op / store where op is one of 'or', 'xor', and
13240/// 'and' of immediates. If 'op' is only touching some of the loaded bits, try
13241/// narrowing the load and store if it would end up being a win for performance
13242/// or code size.
13243SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
13244 StoreSDNode *ST = cast<StoreSDNode>(N);
13245 if (ST->isVolatile())
13246 return SDValue();
13247
13248 SDValue Chain = ST->getChain();
13249 SDValue Value = ST->getValue();
13250 SDValue Ptr = ST->getBasePtr();
13251 EVT VT = Value.getValueType();
13252
13253 if (ST->isTruncatingStore() || VT.isVector() || !Value.hasOneUse())
13254 return SDValue();
13255
13256 unsigned Opc = Value.getOpcode();
13257
13258 // If this is "store (or X, Y), P" and X is "(and (load P), cst)", where cst
13259 // is a byte mask indicating a consecutive number of bytes, check to see if
13260 // Y is known to provide just those bytes. If so, we try to replace the
13261 // load + replace + store sequence with a single (narrower) store, which makes
13262 // the load dead.
13263 if (Opc == ISD::OR) {
13264 std::pair<unsigned, unsigned> MaskedLoad;
13265 MaskedLoad = CheckForMaskedLoad(Value.getOperand(0), Ptr, Chain);
13266 if (MaskedLoad.first)
13267 if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad,
13268 Value.getOperand(1), ST,this))
13269 return SDValue(NewST, 0);
13270
13271 // Or is commutative, so try swapping X and Y.
13272 MaskedLoad = CheckForMaskedLoad(Value.getOperand(1), Ptr, Chain);
13273 if (MaskedLoad.first)
13274 if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad,
13275 Value.getOperand(0), ST,this))
13276 return SDValue(NewST, 0);
13277 }
13278
13279 if ((Opc != ISD::OR && Opc != ISD::XOR && Opc != ISD::AND) ||
13280 Value.getOperand(1).getOpcode() != ISD::Constant)
13281 return SDValue();
13282
13283 SDValue N0 = Value.getOperand(0);
13284 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
13285 Chain == SDValue(N0.getNode(), 1)) {
13286 LoadSDNode *LD = cast<LoadSDNode>(N0);
13287 if (LD->getBasePtr() != Ptr ||
13288 LD->getPointerInfo().getAddrSpace() !=
13289 ST->getPointerInfo().getAddrSpace())
13290 return SDValue();
13291
13292 // Find the type to narrow it the load / op / store to.
13293 SDValue N1 = Value.getOperand(1);
13294 unsigned BitWidth = N1.getValueSizeInBits();
13295 APInt Imm = cast<ConstantSDNode>(N1)->getAPIntValue();
13296 if (Opc == ISD::AND)
13297 Imm ^= APInt::getAllOnesValue(BitWidth);
13298 if (Imm == 0 || Imm.isAllOnesValue())
13299 return SDValue();
13300 unsigned ShAmt = Imm.countTrailingZeros();
13301 unsigned MSB = BitWidth - Imm.countLeadingZeros() - 1;
13302 unsigned NewBW = NextPowerOf2(MSB - ShAmt);
13303 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW);
13304 // The narrowing should be profitable, the load/store operation should be
13305 // legal (or custom) and the store size should be equal to the NewVT width.
13306 while (NewBW < BitWidth &&
13307 (NewVT.getStoreSizeInBits() != NewBW ||
13308 !TLI.isOperationLegalOrCustom(Opc, NewVT) ||
13309 !TLI.isNarrowingProfitable(VT, NewVT))) {
13310 NewBW = NextPowerOf2(NewBW);
13311 NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW);
13312 }
13313 if (NewBW >= BitWidth)
13314 return SDValue();
13315
13316 // If the lsb changed does not start at the type bitwidth boundary,
13317 // start at the previous one.
13318 if (ShAmt % NewBW)
13319 ShAmt = (((ShAmt + NewBW - 1) / NewBW) * NewBW) - NewBW;
13320 APInt Mask = APInt::getBitsSet(BitWidth, ShAmt,
13321 std::min(BitWidth, ShAmt + NewBW));
13322 if ((Imm & Mask) == Imm) {
13323 APInt NewImm = (Imm & Mask).lshr(ShAmt).trunc(NewBW);
13324 if (Opc == ISD::AND)
13325 NewImm ^= APInt::getAllOnesValue(NewBW);
13326 uint64_t PtrOff = ShAmt / 8;
13327 // For big endian targets, we need to adjust the offset to the pointer to
13328 // load the correct bytes.
13329 if (DAG.getDataLayout().isBigEndian())
13330 PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff;
13331
13332 unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff);
13333 Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
13334 if (NewAlign < DAG.getDataLayout().getABITypeAlignment(NewVTTy))
13335 return SDValue();
13336
13337 SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(LD),
13338 Ptr.getValueType(), Ptr,
13339 DAG.getConstant(PtrOff, SDLoc(LD),
13340 Ptr.getValueType()));
13341 SDValue NewLD =
13342 DAG.getLoad(NewVT, SDLoc(N0), LD->getChain(), NewPtr,
13343 LD->getPointerInfo().getWithOffset(PtrOff), NewAlign,
13344 LD->getMemOperand()->getFlags(), LD->getAAInfo());
13345 SDValue NewVal = DAG.getNode(Opc, SDLoc(Value), NewVT, NewLD,
13346 DAG.getConstant(NewImm, SDLoc(Value),
13347 NewVT));
13348 SDValue NewST =
13349 DAG.getStore(Chain, SDLoc(N), NewVal, NewPtr,
13350 ST->getPointerInfo().getWithOffset(PtrOff), NewAlign);
13351
13352 AddToWorklist(NewPtr.getNode());
13353 AddToWorklist(NewLD.getNode());
13354 AddToWorklist(NewVal.getNode());
13355 WorklistRemover DeadNodes(*this);
13356 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLD.getValue(1));
13357 ++OpsNarrowed;
13358 return NewST;
13359 }
13360 }
13361
13362 return SDValue();
13363}
13364
13365/// For a given floating point load / store pair, if the load value isn't used
13366/// by any other operations, then consider transforming the pair to integer
13367/// load / store operations if the target deems the transformation profitable.
13368SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) {
13369 StoreSDNode *ST = cast<StoreSDNode>(N);
13370 SDValue Chain = ST->getChain();
13371 SDValue Value = ST->getValue();
13372 if (ISD::isNormalStore(ST) && ISD::isNormalLoad(Value.getNode()) &&
13373 Value.hasOneUse() &&
13374 Chain == SDValue(Value.getNode(), 1)) {
13375 LoadSDNode *LD = cast<LoadSDNode>(Value);
13376 EVT VT = LD->getMemoryVT();
13377 if (!VT.isFloatingPoint() ||
13378 VT != ST->getMemoryVT() ||
13379 LD->isNonTemporal() ||
13380 ST->isNonTemporal() ||
13381 LD->getPointerInfo().getAddrSpace() != 0 ||
13382 ST->getPointerInfo().getAddrSpace() != 0)
13383 return SDValue();
13384
13385 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
13386 if (!TLI.isOperationLegal(ISD::LOAD, IntVT) ||
13387 !TLI.isOperationLegal(ISD::STORE, IntVT) ||
13388 !TLI.isDesirableToTransformToIntegerOp(ISD::LOAD, VT) ||
13389 !TLI.isDesirableToTransformToIntegerOp(ISD::STORE, VT))
13390 return SDValue();
13391
13392 unsigned LDAlign = LD->getAlignment();
13393 unsigned STAlign = ST->getAlignment();
13394 Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext());
13395 unsigned ABIAlign = DAG.getDataLayout().getABITypeAlignment(IntVTTy);
13396 if (LDAlign < ABIAlign || STAlign < ABIAlign)
13397 return SDValue();
13398
13399 SDValue NewLD =
13400 DAG.getLoad(IntVT, SDLoc(Value), LD->getChain(), LD->getBasePtr(),
13401 LD->getPointerInfo(), LDAlign);
13402
13403 SDValue NewST =
13404 DAG.getStore(NewLD.getValue(1), SDLoc(N), NewLD, ST->getBasePtr(),
13405 ST->getPointerInfo(), STAlign);
13406
13407 AddToWorklist(NewLD.getNode());
13408 AddToWorklist(NewST.getNode());
13409 WorklistRemover DeadNodes(*this);
13410 DAG.ReplaceAllUsesOfValueWith(Value.getValue(1), NewLD.getValue(1));
13411 ++LdStFP2Int;
13412 return NewST;
13413 }
13414
13415 return SDValue();
13416}
13417
13418// This is a helper function for visitMUL to check the profitability
13419// of folding (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2).
13420// MulNode is the original multiply, AddNode is (add x, c1),
13421// and ConstNode is c2.
13422//
13423// If the (add x, c1) has multiple uses, we could increase
13424// the number of adds if we make this transformation.
13425// It would only be worth doing this if we can remove a
13426// multiply in the process. Check for that here.
13427// To illustrate:
13428// (A + c1) * c3
13429// (A + c2) * c3
13430// We're checking for cases where we have common "c3 * A" expressions.
13431bool DAGCombiner::isMulAddWithConstProfitable(SDNode *MulNode,
13432 SDValue &AddNode,
13433 SDValue &ConstNode) {
13434 APInt Val;
13435
13436 // If the add only has one use, this would be OK to do.
13437 if (AddNode.getNode()->hasOneUse())
13438 return true;
13439
13440 // Walk all the users of the constant with which we're multiplying.
13441 for (SDNode *Use : ConstNode->uses()) {
13442 if (Use == MulNode) // This use is the one we're on right now. Skip it.
13443 continue;
13444
13445 if (Use->getOpcode() == ISD::MUL) { // We have another multiply use.
13446 SDNode *OtherOp;
13447 SDNode *MulVar = AddNode.getOperand(0).getNode();
13448
13449 // OtherOp is what we're multiplying against the constant.
13450 if (Use->getOperand(0) == ConstNode)
13451 OtherOp = Use->getOperand(1).getNode();
13452 else
13453 OtherOp = Use->getOperand(0).getNode();
13454
13455 // Check to see if multiply is with the same operand of our "add".
13456 //
13457 // ConstNode = CONST
13458 // Use = ConstNode * A <-- visiting Use. OtherOp is A.
13459 // ...
13460 // AddNode = (A + c1) <-- MulVar is A.
13461 // = AddNode * ConstNode <-- current visiting instruction.
13462 //
13463 // If we make this transformation, we will have a common
13464 // multiply (ConstNode * A) that we can save.
13465 if (OtherOp == MulVar)
13466 return true;
13467
13468 // Now check to see if a future expansion will give us a common
13469 // multiply.
13470 //
13471 // ConstNode = CONST
13472 // AddNode = (A + c1)
13473 // ... = AddNode * ConstNode <-- current visiting instruction.
13474 // ...
13475 // OtherOp = (A + c2)
13476 // Use = OtherOp * ConstNode <-- visiting Use.
13477 //
13478 // If we make this transformation, we will have a common
13479 // multiply (CONST * A) after we also do the same transformation
13480 // to the "t2" instruction.
13481 if (OtherOp->getOpcode() == ISD::ADD &&
13482 DAG.isConstantIntBuildVectorOrConstantInt(OtherOp->getOperand(1)) &&
13483 OtherOp->getOperand(0).getNode() == MulVar)
13484 return true;
13485 }
13486 }
13487
13488 // Didn't find a case where this would be profitable.
13489 return false;
13490}
13491
13492SDValue DAGCombiner::getMergeStoreChains(SmallVectorImpl<MemOpLink> &StoreNodes,
13493 unsigned NumStores) {
13494 SmallVector<SDValue, 8> Chains;
13495 SmallPtrSet<const SDNode *, 8> Visited;
13496 SDLoc StoreDL(StoreNodes[0].MemNode);
13497
13498 for (unsigned i = 0; i < NumStores; ++i) {
13499 Visited.insert(StoreNodes[i].MemNode);
13500 }
13501
13502 // don't include nodes that are children
13503 for (unsigned i = 0; i < NumStores; ++i) {
13504 if (Visited.count(StoreNodes[i].MemNode->getChain().getNode()) == 0)
13505 Chains.push_back(StoreNodes[i].MemNode->getChain());
13506 }
13507
13508 assert(Chains.size() > 0 && "Chain should have generated a chain")(static_cast <bool> (Chains.size() > 0 && "Chain should have generated a chain"
) ? void (0) : __assert_fail ("Chains.size() > 0 && \"Chain should have generated a chain\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 13508, __extension__ __PRETTY_FUNCTION__))
;
13509 return DAG.getNode(ISD::TokenFactor, StoreDL, MVT::Other, Chains);
13510}
13511
13512bool DAGCombiner::MergeStoresOfConstantsOrVecElts(
13513 SmallVectorImpl<MemOpLink> &StoreNodes, EVT MemVT, unsigned NumStores,
13514 bool IsConstantSrc, bool UseVector, bool UseTrunc) {
13515 // Make sure we have something to merge.
13516 if (NumStores < 2)
13517 return false;
13518
13519 // The latest Node in the DAG.
13520 SDLoc DL(StoreNodes[0].MemNode);
13521
13522 int64_t ElementSizeBits = MemVT.getStoreSizeInBits();
13523 unsigned SizeInBits = NumStores * ElementSizeBits;
13524 unsigned NumMemElts = MemVT.isVector() ? MemVT.getVectorNumElements() : 1;
13525
13526 EVT StoreTy;
13527 if (UseVector) {
13528 unsigned Elts = NumStores * NumMemElts;
13529 // Get the type for the merged vector store.
13530 StoreTy = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), Elts);
13531 } else
13532 StoreTy = EVT::getIntegerVT(*DAG.getContext(), SizeInBits);
13533
13534 SDValue StoredVal;
13535 if (UseVector) {
13536 if (IsConstantSrc) {
13537 SmallVector<SDValue, 8> BuildVector;
13538 for (unsigned I = 0; I != NumStores; ++I) {
13539 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[I].MemNode);
13540 SDValue Val = St->getValue();
13541 // If constant is of the wrong type, convert it now.
13542 if (MemVT != Val.getValueType()) {
13543 Val = peekThroughBitcast(Val);
13544 // Deal with constants of wrong size.
13545 if (ElementSizeBits != Val.getValueSizeInBits()) {
13546 EVT IntMemVT =
13547 EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
13548 if (isa<ConstantFPSDNode>(Val)) {
13549 // Not clear how to truncate FP values.
13550 return false;
13551 } else if (auto *C = dyn_cast<ConstantSDNode>(Val))
13552 Val = DAG.getConstant(C->getAPIntValue()
13553 .zextOrTrunc(Val.getValueSizeInBits())
13554 .zextOrTrunc(ElementSizeBits),
13555 SDLoc(C), IntMemVT);
13556 }
13557 // Make sure correctly size type is the correct type.
13558 Val = DAG.getBitcast(MemVT, Val);
13559 }
13560 BuildVector.push_back(Val);
13561 }
13562 StoredVal = DAG.getNode(MemVT.isVector() ? ISD::CONCAT_VECTORS
13563 : ISD::BUILD_VECTOR,
13564 DL, StoreTy, BuildVector);
13565 } else {
13566 SmallVector<SDValue, 8> Ops;
13567 for (unsigned i = 0; i < NumStores; ++i) {
13568 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
13569 SDValue Val = peekThroughBitcast(St->getValue());
13570 // All operands of BUILD_VECTOR / CONCAT_VECTOR must be of
13571 // type MemVT. If the underlying value is not the correct
13572 // type, but it is an extraction of an appropriate vector we
13573 // can recast Val to be of the correct type. This may require
13574 // converting between EXTRACT_VECTOR_ELT and
13575 // EXTRACT_SUBVECTOR.
13576 if ((MemVT != Val.getValueType()) &&
13577 (Val.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
13578 Val.getOpcode() == ISD::EXTRACT_SUBVECTOR)) {
13579 SDValue Vec = Val.getOperand(0);
13580 EVT MemVTScalarTy = MemVT.getScalarType();
13581 // We may need to add a bitcast here to get types to line up.
13582 if (MemVTScalarTy != Vec.getValueType()) {
13583 unsigned Elts = Vec.getValueType().getSizeInBits() /
13584 MemVTScalarTy.getSizeInBits();
13585 EVT NewVecTy =
13586 EVT::getVectorVT(*DAG.getContext(), MemVTScalarTy, Elts);
13587 Vec = DAG.getBitcast(NewVecTy, Vec);
13588 }
13589 auto OpC = (MemVT.isVector()) ? ISD::EXTRACT_SUBVECTOR
13590 : ISD::EXTRACT_VECTOR_ELT;
13591 Val = DAG.getNode(OpC, SDLoc(Val), MemVT, Vec, Val.getOperand(1));
13592 }
13593 Ops.push_back(Val);
13594 }
13595
13596 // Build the extracted vector elements back into a vector.
13597 StoredVal = DAG.getNode(MemVT.isVector() ? ISD::CONCAT_VECTORS
13598 : ISD::BUILD_VECTOR,
13599 DL, StoreTy, Ops);
13600 }
13601 } else {
13602 // We should always use a vector store when merging extracted vector
13603 // elements, so this path implies a store of constants.
13604 assert(IsConstantSrc && "Merged vector elements should use vector store")(static_cast <bool> (IsConstantSrc && "Merged vector elements should use vector store"
) ? void (0) : __assert_fail ("IsConstantSrc && \"Merged vector elements should use vector store\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 13604, __extension__ __PRETTY_FUNCTION__))
;
13605
13606 APInt StoreInt(SizeInBits, 0);
13607
13608 // Construct a single integer constant which is made of the smaller
13609 // constant inputs.
13610 bool IsLE = DAG.getDataLayout().isLittleEndian();
13611 for (unsigned i = 0; i < NumStores; ++i) {
13612 unsigned Idx = IsLE ? (NumStores - 1 - i) : i;
13613 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[Idx].MemNode);
13614
13615 SDValue Val = St->getValue();
13616 Val = peekThroughBitcast(Val);
13617 StoreInt <<= ElementSizeBits;
13618 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) {
13619 StoreInt |= C->getAPIntValue()
13620 .zextOrTrunc(ElementSizeBits)
13621 .zextOrTrunc(SizeInBits);
13622 } else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val)) {
13623 StoreInt |= C->getValueAPF()
13624 .bitcastToAPInt()
13625 .zextOrTrunc(ElementSizeBits)
13626 .zextOrTrunc(SizeInBits);
13627 // If fp truncation is necessary give up for now.
13628 if (MemVT.getSizeInBits() != ElementSizeBits)
13629 return false;
13630 } else {
13631 llvm_unreachable("Invalid constant element type")::llvm::llvm_unreachable_internal("Invalid constant element type"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 13631)
;
13632 }
13633 }
13634
13635 // Create the new Load and Store operations.
13636 StoredVal = DAG.getConstant(StoreInt, DL, StoreTy);
13637 }
13638
13639 LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
13640 SDValue NewChain = getMergeStoreChains(StoreNodes, NumStores);
13641
13642 // make sure we use trunc store if it's necessary to be legal.
13643 SDValue NewStore;
13644 if (!UseTrunc) {
13645 NewStore = DAG.getStore(NewChain, DL, StoredVal, FirstInChain->getBasePtr(),
13646 FirstInChain->getPointerInfo(),
13647 FirstInChain->getAlignment());
13648 } else { // Must be realized as a trunc store
13649 EVT LegalizedStoredValTy =
13650 TLI.getTypeToTransformTo(*DAG.getContext(), StoredVal.getValueType());
13651 unsigned LegalizedStoreSize = LegalizedStoredValTy.getSizeInBits();
13652 ConstantSDNode *C = cast<ConstantSDNode>(StoredVal);
13653 SDValue ExtendedStoreVal =
13654 DAG.getConstant(C->getAPIntValue().zextOrTrunc(LegalizedStoreSize), DL,
13655 LegalizedStoredValTy);
13656 NewStore = DAG.getTruncStore(
13657 NewChain, DL, ExtendedStoreVal, FirstInChain->getBasePtr(),
13658 FirstInChain->getPointerInfo(), StoredVal.getValueType() /*TVT*/,
13659 FirstInChain->getAlignment(),
13660 FirstInChain->getMemOperand()->getFlags());
13661 }
13662
13663 // Replace all merged stores with the new store.
13664 for (unsigned i = 0; i < NumStores; ++i)
13665 CombineTo(StoreNodes[i].MemNode, NewStore);
13666
13667 AddToWorklist(NewChain.getNode());
13668 return true;
13669}
13670
13671void DAGCombiner::getStoreMergeCandidates(
13672 StoreSDNode *St, SmallVectorImpl<MemOpLink> &StoreNodes,
13673 SDNode *&RootNode) {
13674 // This holds the base pointer, index, and the offset in bytes from the base
13675 // pointer.
13676 BaseIndexOffset BasePtr = BaseIndexOffset::match(St, DAG);
13677 EVT MemVT = St->getMemoryVT();
13678
13679 SDValue Val = peekThroughBitcast(St->getValue());
13680 // We must have a base and an offset.
13681 if (!BasePtr.getBase().getNode())
12
Assuming the condition is true
13
Taking true branch
13682 return;
14
Returning without writing to 'RootNode'
13683
13684 // Do not handle stores to undef base pointers.
13685 if (BasePtr.getBase().isUndef())
13686 return;
13687
13688 bool IsConstantSrc = isa<ConstantSDNode>(Val) || isa<ConstantFPSDNode>(Val);
13689 bool IsExtractVecSrc = (Val.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
13690 Val.getOpcode() == ISD::EXTRACT_SUBVECTOR);
13691 bool IsLoadSrc = isa<LoadSDNode>(Val);
13692 BaseIndexOffset LBasePtr;
13693 // Match on loadbaseptr if relevant.
13694 EVT LoadVT;
13695 if (IsLoadSrc) {
13696 auto *Ld = cast<LoadSDNode>(Val);
13697 LBasePtr = BaseIndexOffset::match(Ld, DAG);
13698 LoadVT = Ld->getMemoryVT();
13699 // Load and store should be the same type.
13700 if (MemVT != LoadVT)
13701 return;
13702 // Loads must only have one use.
13703 if (!Ld->hasNUsesOfValue(1, 0))
13704 return;
13705 // The memory operands must not be volatile.
13706 if (Ld->isVolatile() || Ld->isIndexed())
13707 return;
13708 }
13709 auto CandidateMatch = [&](StoreSDNode *Other, BaseIndexOffset &Ptr,
13710 int64_t &Offset) -> bool {
13711 if (Other->isVolatile() || Other->isIndexed())
13712 return false;
13713 SDValue Val = peekThroughBitcast(Other->getValue());
13714 // Allow merging constants of different types as integers.
13715 bool NoTypeMatch = (MemVT.isInteger()) ? !MemVT.bitsEq(Other->getMemoryVT())
13716 : Other->getMemoryVT() != MemVT;
13717 if (IsLoadSrc) {
13718 if (NoTypeMatch)
13719 return false;
13720 // The Load's Base Ptr must also match
13721 if (LoadSDNode *OtherLd = dyn_cast<LoadSDNode>(Val)) {
13722 auto LPtr = BaseIndexOffset::match(OtherLd, DAG);
13723 if (LoadVT != OtherLd->getMemoryVT())
13724 return false;
13725 // Loads must only have one use.
13726 if (!OtherLd->hasNUsesOfValue(1, 0))
13727 return false;
13728 // The memory operands must not be volatile.
13729 if (OtherLd->isVolatile() || OtherLd->isIndexed())
13730 return false;
13731 if (!(LBasePtr.equalBaseIndex(LPtr, DAG)))
13732 return false;
13733 } else
13734 return false;
13735 }
13736 if (IsConstantSrc) {
13737 if (NoTypeMatch)
13738 return false;
13739 if (!(isa<ConstantSDNode>(Val) || isa<ConstantFPSDNode>(Val)))
13740 return false;
13741 }
13742 if (IsExtractVecSrc) {
13743 // Do not merge truncated stores here.
13744 if (Other->isTruncatingStore())
13745 return false;
13746 if (!MemVT.bitsEq(Val.getValueType()))
13747 return false;
13748 if (Val.getOpcode() != ISD::EXTRACT_VECTOR_ELT &&
13749 Val.getOpcode() != ISD::EXTRACT_SUBVECTOR)
13750 return false;
13751 }
13752 Ptr = BaseIndexOffset::match(Other, DAG);
13753 return (BasePtr.equalBaseIndex(Ptr, DAG, Offset));
13754 };
13755
13756 // We looking for a root node which is an ancestor to all mergable
13757 // stores. We search up through a load, to our root and then down
13758 // through all children. For instance we will find Store{1,2,3} if
13759 // St is Store1, Store2. or Store3 where the root is not a load
13760 // which always true for nonvolatile ops. TODO: Expand
13761 // the search to find all valid candidates through multiple layers of loads.
13762 //
13763 // Root
13764 // |-------|-------|
13765 // Load Load Store3
13766 // | |
13767 // Store1 Store2
13768 //
13769 // FIXME: We should be able to climb and
13770 // descend TokenFactors to find candidates as well.
13771
13772 RootNode = St->getChain().getNode();
13773
13774 if (LoadSDNode *Ldn = dyn_cast<LoadSDNode>(RootNode)) {
13775 RootNode = Ldn->getChain().getNode();
13776 for (auto I = RootNode->use_begin(), E = RootNode->use_end(); I != E; ++I)
13777 if (I.getOperandNo() == 0 && isa<LoadSDNode>(*I)) // walk down chain
13778 for (auto I2 = (*I)->use_begin(), E2 = (*I)->use_end(); I2 != E2; ++I2)
13779 if (I2.getOperandNo() == 0)
13780 if (StoreSDNode *OtherST = dyn_cast<StoreSDNode>(*I2)) {
13781 BaseIndexOffset Ptr;
13782 int64_t PtrDiff;
13783 if (CandidateMatch(OtherST, Ptr, PtrDiff))
13784 StoreNodes.push_back(MemOpLink(OtherST, PtrDiff));
13785 }
13786 } else
13787 for (auto I = RootNode->use_begin(), E = RootNode->use_end(); I != E; ++I)
13788 if (I.getOperandNo() == 0)
13789 if (StoreSDNode *OtherST = dyn_cast<StoreSDNode>(*I)) {
13790 BaseIndexOffset Ptr;
13791 int64_t PtrDiff;
13792 if (CandidateMatch(OtherST, Ptr, PtrDiff))
13793 StoreNodes.push_back(MemOpLink(OtherST, PtrDiff));
13794 }
13795}
13796
13797// We need to check that merging these stores does not cause a loop in
13798// the DAG. Any store candidate may depend on another candidate
13799// indirectly through its operand (we already consider dependencies
13800// through the chain). Check in parallel by searching up from
13801// non-chain operands of candidates.
13802bool DAGCombiner::checkMergeStoreCandidatesForDependencies(
13803 SmallVectorImpl<MemOpLink> &StoreNodes, unsigned NumStores,
13804 SDNode *RootNode) {
13805 // FIXME: We should be able to truncate a full search of
13806 // predecessors by doing a BFS and keeping tabs the originating
13807 // stores from which worklist nodes come from in a similar way to
13808 // TokenFactor simplfication.
13809
13810 SmallPtrSet<const SDNode *, 32> Visited;
13811 SmallVector<const SDNode *, 8> Worklist;
13812
13813 // RootNode is a predecessor to all candidates so we need not search
13814 // past it. Add RootNode (peeking through TokenFactors). Do not count
13815 // these towards size check.
13816
13817 Worklist.push_back(RootNode);
13818 while (!Worklist.empty()) {
13819 auto N = Worklist.pop_back_val();
13820 if (N->getOpcode() == ISD::TokenFactor) {
13821 for (SDValue Op : N->ops())
13822 Worklist.push_back(Op.getNode());
13823 }
13824 Visited.insert(N);
13825 }
13826
13827 // Don't count pruning nodes towards max.
13828 unsigned int Max = 1024 + Visited.size();
13829 // Search Ops of store candidates.
13830 for (unsigned i = 0; i < NumStores; ++i) {
13831 SDNode *N = StoreNodes[i].MemNode;
13832 // Of the 4 Store Operands:
13833 // * Chain (Op 0) -> We have already considered these
13834 // in candidate selection and can be
13835 // safely ignored
13836 // * Value (Op 1) -> Cycles may happen (e.g. through load chains)
13837 // * Address (Op 2) -> Merged addresses may only vary by a fixed constant
13838 // and so no cycles are possible.
13839 // * (Op 3) -> appears to always be undef. Cannot be source of cycle.
13840 //
13841 // Thus we need only check predecessors of the value operands.
13842 auto *Op = N->getOperand(1).getNode();
13843 if (Visited.insert(Op).second)
13844 Worklist.push_back(Op);
13845 }
13846 // Search through DAG. We can stop early if we find a store node.
13847 for (unsigned i = 0; i < NumStores; ++i)
13848 if (SDNode::hasPredecessorHelper(StoreNodes[i].MemNode, Visited, Worklist,
13849 Max))
13850 return false;
13851 return true;
13852}
13853
13854bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
13855 if (OptLevel == CodeGenOpt::None)
1
Assuming the condition is false
2
Taking false branch
13856 return false;
13857
13858 EVT MemVT = St->getMemoryVT();
13859 int64_t ElementSizeBytes = MemVT.getStoreSize();
13860 unsigned NumMemElts = MemVT.isVector() ? MemVT.getVectorNumElements() : 1;
3
'?' condition is false
13861
13862 if (MemVT.getSizeInBits() * 2 > MaximumLegalStoreInBits)
4
Assuming the condition is false
5
Taking false branch
13863 return false;
13864
13865 bool NoVectors = DAG.getMachineFunction().getFunction().hasFnAttribute(
13866 Attribute::NoImplicitFloat);
13867
13868 // This function cannot currently deal with non-byte-sized memory sizes.
13869 if (ElementSizeBytes * 8 != MemVT.getSizeInBits())
6
Assuming the condition is false
7
Taking false branch
13870 return false;
13871
13872 if (!MemVT.isSimple())
8
Taking false branch
13873 return false;
13874
13875 // Perform an early exit check. Do not bother looking at stored values that
13876 // are not constants, loads, or extracted vector elements.
13877 SDValue StoredVal = peekThroughBitcast(St->getValue());
13878 bool IsLoadSrc = isa<LoadSDNode>(StoredVal);
13879 bool IsConstantSrc = isa<ConstantSDNode>(StoredVal) ||
13880 isa<ConstantFPSDNode>(StoredVal);
13881 bool IsExtractVecSrc = (StoredVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
9
Assuming the condition is true
13882 StoredVal.getOpcode() == ISD::EXTRACT_SUBVECTOR);
13883
13884 if (!IsConstantSrc && !IsLoadSrc && !IsExtractVecSrc)
13885 return false;
13886
13887 SmallVector<MemOpLink, 8> StoreNodes;
13888 SDNode *RootNode;
10
'RootNode' declared without an initial value
13889 // Find potential store merge candidates by searching through chain sub-DAG
13890 getStoreMergeCandidates(St, StoreNodes, RootNode);
11
Calling 'DAGCombiner::getStoreMergeCandidates'
15
Returning from 'DAGCombiner::getStoreMergeCandidates'
13891
13892 // Check if there is anything to merge.
13893 if (StoreNodes.size() < 2)
16
Assuming the condition is false
17
Taking false branch
13894 return false;
13895
13896 // Sort the memory operands according to their distance from the
13897 // base pointer.
13898 llvm::sort(StoreNodes.begin(), StoreNodes.end(),
13899 [](MemOpLink LHS, MemOpLink RHS) {
13900 return LHS.OffsetFromBase < RHS.OffsetFromBase;
13901 });
13902
13903 // Store Merge attempts to merge the lowest stores. This generally
13904 // works out as if successful, as the remaining stores are checked
13905 // after the first collection of stores is merged. However, in the
13906 // case that a non-mergeable store is found first, e.g., {p[-2],
13907 // p[0], p[1], p[2], p[3]}, we would fail and miss the subsequent
13908 // mergeable cases. To prevent this, we prune such stores from the
13909 // front of StoreNodes here.
13910
13911 bool RV = false;
13912 while (StoreNodes.size() > 1) {
18
Assuming the condition is true
19
Loop condition is true. Entering loop body
13913 unsigned StartIdx = 0;
13914 while ((StartIdx + 1 < StoreNodes.size()) &&
21
Loop condition is false. Execution continues on line 13920
13915 StoreNodes[StartIdx].OffsetFromBase + ElementSizeBytes !=
20
Assuming the condition is false
13916 StoreNodes[StartIdx + 1].OffsetFromBase)
13917 ++StartIdx;
13918
13919 // Bail if we don't have enough candidates to merge.
13920 if (StartIdx + 1 >= StoreNodes.size())
22
Assuming the condition is false
23
Taking false branch
13921 return RV;
13922
13923 if (StartIdx)
24
Taking false branch
13924 StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + StartIdx);
13925
13926 // Scan the memory operations on the chain and find the first
13927 // non-consecutive store memory address.
13928 unsigned NumConsecutiveStores = 1;
13929 int64_t StartAddress = StoreNodes[0].OffsetFromBase;
13930 // Check that the addresses are consecutive starting from the second
13931 // element in the list of stores.
13932 for (unsigned i = 1, e = StoreNodes.size(); i < e; ++i) {
25
Assuming 'i' is < 'e'
26
Loop condition is true. Entering loop body
29
Assuming 'i' is >= 'e'
30
Loop condition is false. Execution continues on line 13939
13933 int64_t CurrAddress = StoreNodes[i].OffsetFromBase;
13934 if (CurrAddress - StartAddress != (ElementSizeBytes * i))
27
Assuming the condition is false
28
Taking false branch
13935 break;
13936 NumConsecutiveStores = i + 1;
13937 }
13938
13939 if (NumConsecutiveStores < 2) {
31
Taking false branch
13940 StoreNodes.erase(StoreNodes.begin(),
13941 StoreNodes.begin() + NumConsecutiveStores);
13942 continue;
13943 }
13944
13945 // The node with the lowest store address.
13946 LLVMContext &Context = *DAG.getContext();
13947 const DataLayout &DL = DAG.getDataLayout();
13948
13949 // Store the constants into memory as one consecutive store.
13950 if (IsConstantSrc) {
32
Taking false branch
13951 while (NumConsecutiveStores >= 2) {
13952 LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
13953 unsigned FirstStoreAS = FirstInChain->getAddressSpace();
13954 unsigned FirstStoreAlign = FirstInChain->getAlignment();
13955 unsigned LastLegalType = 1;
13956 unsigned LastLegalVectorType = 1;
13957 bool LastIntegerTrunc = false;
13958 bool NonZero = false;
13959 unsigned FirstZeroAfterNonZero = NumConsecutiveStores;
13960 for (unsigned i = 0; i < NumConsecutiveStores; ++i) {
13961 StoreSDNode *ST = cast<StoreSDNode>(StoreNodes[i].MemNode);
13962 SDValue StoredVal = ST->getValue();
13963 bool IsElementZero = false;
13964 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(StoredVal))
13965 IsElementZero = C->isNullValue();
13966 else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(StoredVal))
13967 IsElementZero = C->getConstantFPValue()->isNullValue();
13968 if (IsElementZero) {
13969 if (NonZero && FirstZeroAfterNonZero == NumConsecutiveStores)
13970 FirstZeroAfterNonZero = i;
13971 }
13972 NonZero |= !IsElementZero;
13973
13974 // Find a legal type for the constant store.
13975 unsigned SizeInBits = (i + 1) * ElementSizeBytes * 8;
13976 EVT StoreTy = EVT::getIntegerVT(Context, SizeInBits);
13977 bool IsFast = false;
13978
13979 // Break early when size is too large to be legal.
13980 if (StoreTy.getSizeInBits() > MaximumLegalStoreInBits)
13981 break;
13982
13983 if (TLI.isTypeLegal(StoreTy) &&
13984 TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) &&
13985 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
13986 FirstStoreAlign, &IsFast) &&
13987 IsFast) {
13988 LastIntegerTrunc = false;
13989 LastLegalType = i + 1;
13990 // Or check whether a truncstore is legal.
13991 } else if (TLI.getTypeAction(Context, StoreTy) ==
13992 TargetLowering::TypePromoteInteger) {
13993 EVT LegalizedStoredValTy =
13994 TLI.getTypeToTransformTo(Context, StoredVal.getValueType());
13995 if (TLI.isTruncStoreLegal(LegalizedStoredValTy, StoreTy) &&
13996 TLI.canMergeStoresTo(FirstStoreAS, LegalizedStoredValTy, DAG) &&
13997 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
13998 FirstStoreAlign, &IsFast) &&
13999 IsFast) {
14000 LastIntegerTrunc = true;
14001 LastLegalType = i + 1;
14002 }
14003 }
14004
14005 // We only use vectors if the constant is known to be zero or the
14006 // target allows it and the function is not marked with the
14007 // noimplicitfloat attribute.
14008 if ((!NonZero ||
14009 TLI.storeOfVectorConstantIsCheap(MemVT, i + 1, FirstStoreAS)) &&
14010 !NoVectors) {
14011 // Find a legal type for the vector store.
14012 unsigned Elts = (i + 1) * NumMemElts;
14013 EVT Ty = EVT::getVectorVT(Context, MemVT.getScalarType(), Elts);
14014 if (TLI.isTypeLegal(Ty) && TLI.isTypeLegal(MemVT) &&
14015 TLI.canMergeStoresTo(FirstStoreAS, Ty, DAG) &&
14016 TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS,
14017 FirstStoreAlign, &IsFast) &&
14018 IsFast)
14019 LastLegalVectorType = i + 1;
14020 }
14021 }
14022
14023 bool UseVector = (LastLegalVectorType > LastLegalType) && !NoVectors;
14024 unsigned NumElem = (UseVector) ? LastLegalVectorType : LastLegalType;
14025
14026 // Check if we found a legal integer type that creates a meaningful
14027 // merge.
14028 if (NumElem < 2) {
14029 // We know that candidate stores are in order and of correct
14030 // shape. While there is no mergeable sequence from the
14031 // beginning one may start later in the sequence. The only
14032 // reason a merge of size N could have failed where another of
14033 // the same size would not have, is if the alignment has
14034 // improved or we've dropped a non-zero value. Drop as many
14035 // candidates as we can here.
14036 unsigned NumSkip = 1;
14037 while (
14038 (NumSkip < NumConsecutiveStores) &&
14039 (NumSkip < FirstZeroAfterNonZero) &&
14040 (StoreNodes[NumSkip].MemNode->getAlignment() <= FirstStoreAlign))
14041 NumSkip++;
14042
14043 StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumSkip);
14044 NumConsecutiveStores -= NumSkip;
14045 continue;
14046 }
14047
14048 // Check that we can merge these candidates without causing a cycle.
14049 if (!checkMergeStoreCandidatesForDependencies(StoreNodes, NumElem,
14050 RootNode)) {
14051 StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumElem);
14052 NumConsecutiveStores -= NumElem;
14053 continue;
14054 }
14055
14056 RV |= MergeStoresOfConstantsOrVecElts(StoreNodes, MemVT, NumElem, true,
14057 UseVector, LastIntegerTrunc);
14058
14059 // Remove merged stores for next iteration.
14060 StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumElem);
14061 NumConsecutiveStores -= NumElem;
14062 }
14063 continue;
14064 }
14065
14066 // When extracting multiple vector elements, try to store them
14067 // in one vector store rather than a sequence of scalar stores.
14068 if (IsExtractVecSrc) {
33
Taking true branch
14069 // Loop on Consecutive Stores on success.
14070 while (NumConsecutiveStores >= 2) {
34
Loop condition is true. Entering loop body
14071 LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
14072 unsigned FirstStoreAS = FirstInChain->getAddressSpace();
14073 unsigned FirstStoreAlign = FirstInChain->getAlignment();
14074 unsigned NumStoresToMerge = 1;
14075 for (unsigned i = 0; i < NumConsecutiveStores; ++i) {
35
Loop condition is true. Entering loop body
38
Loop condition is true. Entering loop body
45
Loop condition is false. Execution continues on line 14096
14076 // Find a legal type for the vector store.
14077 unsigned Elts = (i + 1) * NumMemElts;
14078 EVT Ty =
14079 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), Elts);
14080 bool IsFast;
14081
14082 // Break early when size is too large to be legal.
14083 if (Ty.getSizeInBits() > MaximumLegalStoreInBits)
36
Assuming the condition is false
37
Taking false branch
39
Assuming the condition is false
40
Taking false branch
14084 break;
14085
14086 if (TLI.isTypeLegal(Ty) &&
44
Taking true branch
14087 TLI.canMergeStoresTo(FirstStoreAS, Ty, DAG) &&
41
Assuming the condition is true
14088 TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS,
42
Assuming the condition is true
14089 FirstStoreAlign, &IsFast) &&
14090 IsFast)
43
Assuming 'IsFast' is not equal to 0
14091 NumStoresToMerge = i + 1;
14092 }
14093
14094 // Check if we found a legal integer type creating a meaningful
14095 // merge.
14096 if (NumStoresToMerge < 2) {
46
Taking false branch
14097 // We know that candidate stores are in order and of correct
14098 // shape. While there is no mergeable sequence from the
14099 // beginning one may start later in the sequence. The only
14100 // reason a merge of size N could have failed where another of
14101 // the same size would not have, is if the alignment has
14102 // improved. Drop as many candidates as we can here.
14103 unsigned NumSkip = 1;
14104 while (
14105 (NumSkip < NumConsecutiveStores) &&
14106 (StoreNodes[NumSkip].MemNode->getAlignment() <= FirstStoreAlign))
14107 NumSkip++;
14108
14109 StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumSkip);
14110 NumConsecutiveStores -= NumSkip;
14111 continue;
14112 }
14113
14114 // Check that we can merge these candidates without causing a cycle.
14115 if (!checkMergeStoreCandidatesForDependencies(
47
3rd function call argument is an uninitialized value
14116 StoreNodes, NumStoresToMerge, RootNode)) {
14117 StoreNodes.erase(StoreNodes.begin(),
14118 StoreNodes.begin() + NumStoresToMerge);
14119 NumConsecutiveStores -= NumStoresToMerge;
14120 continue;
14121 }
14122
14123 RV |= MergeStoresOfConstantsOrVecElts(
14124 StoreNodes, MemVT, NumStoresToMerge, false, true, false);
14125
14126 StoreNodes.erase(StoreNodes.begin(),
14127 StoreNodes.begin() + NumStoresToMerge);
14128 NumConsecutiveStores -= NumStoresToMerge;
14129 }
14130 continue;
14131 }
14132
14133 // Below we handle the case of multiple consecutive stores that
14134 // come from multiple consecutive loads. We merge them into a single
14135 // wide load and a single wide store.
14136
14137 // Look for load nodes which are used by the stored values.
14138 SmallVector<MemOpLink, 8> LoadNodes;
14139
14140 // Find acceptable loads. Loads need to have the same chain (token factor),
14141 // must not be zext, volatile, indexed, and they must be consecutive.
14142 BaseIndexOffset LdBasePtr;
14143
14144 for (unsigned i = 0; i < NumConsecutiveStores; ++i) {
14145 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
14146 SDValue Val = peekThroughBitcast(St->getValue());
14147 LoadSDNode *Ld = cast<LoadSDNode>(Val);
14148
14149 BaseIndexOffset LdPtr = BaseIndexOffset::match(Ld, DAG);
14150 // If this is not the first ptr that we check.
14151 int64_t LdOffset = 0;
14152 if (LdBasePtr.getBase().getNode()) {
14153 // The base ptr must be the same.
14154 if (!LdBasePtr.equalBaseIndex(LdPtr, DAG, LdOffset))
14155 break;
14156 } else {
14157 // Check that all other base pointers are the same as this one.
14158 LdBasePtr = LdPtr;
14159 }
14160
14161 // We found a potential memory operand to merge.
14162 LoadNodes.push_back(MemOpLink(Ld, LdOffset));
14163 }
14164
14165 while (NumConsecutiveStores >= 2 && LoadNodes.size() >= 2) {
14166 // If we have load/store pair instructions and we only have two values,
14167 // don't bother merging.
14168 unsigned RequiredAlignment;
14169 if (LoadNodes.size() == 2 &&
14170 TLI.hasPairedLoad(MemVT, RequiredAlignment) &&
14171 StoreNodes[0].MemNode->getAlignment() >= RequiredAlignment) {
14172 StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + 2);
14173 LoadNodes.erase(LoadNodes.begin(), LoadNodes.begin() + 2);
14174 break;
14175 }
14176 LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
14177 unsigned FirstStoreAS = FirstInChain->getAddressSpace();
14178 unsigned FirstStoreAlign = FirstInChain->getAlignment();
14179 LoadSDNode *FirstLoad = cast<LoadSDNode>(LoadNodes[0].MemNode);
14180 unsigned FirstLoadAS = FirstLoad->getAddressSpace();
14181 unsigned FirstLoadAlign = FirstLoad->getAlignment();
14182
14183 // Scan the memory operations on the chain and find the first
14184 // non-consecutive load memory address. These variables hold the index in
14185 // the store node array.
14186
14187 unsigned LastConsecutiveLoad = 1;
14188
14189 // This variable refers to the size and not index in the array.
14190 unsigned LastLegalVectorType = 1;
14191 unsigned LastLegalIntegerType = 1;
14192 bool isDereferenceable = true;
14193 bool DoIntegerTruncate = false;
14194 StartAddress = LoadNodes[0].OffsetFromBase;
14195 SDValue FirstChain = FirstLoad->getChain();
14196 for (unsigned i = 1; i < LoadNodes.size(); ++i) {
14197 // All loads must share the same chain.
14198 if (LoadNodes[i].MemNode->getChain() != FirstChain)
14199 break;
14200
14201 int64_t CurrAddress = LoadNodes[i].OffsetFromBase;
14202 if (CurrAddress - StartAddress != (ElementSizeBytes * i))
14203 break;
14204 LastConsecutiveLoad = i;
14205
14206 if (isDereferenceable && !LoadNodes[i].MemNode->isDereferenceable())
14207 isDereferenceable = false;
14208
14209 // Find a legal type for the vector store.
14210 unsigned Elts = (i + 1) * NumMemElts;
14211 EVT StoreTy = EVT::getVectorVT(Context, MemVT.getScalarType(), Elts);
14212
14213 // Break early when size is too large to be legal.
14214 if (StoreTy.getSizeInBits() > MaximumLegalStoreInBits)
14215 break;
14216
14217 bool IsFastSt, IsFastLd;
14218 if (TLI.isTypeLegal(StoreTy) &&
14219 TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) &&
14220 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
14221 FirstStoreAlign, &IsFastSt) &&
14222 IsFastSt &&
14223 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS,
14224 FirstLoadAlign, &IsFastLd) &&
14225 IsFastLd) {
14226 LastLegalVectorType = i + 1;
14227 }
14228
14229 // Find a legal type for the integer store.
14230 unsigned SizeInBits = (i + 1) * ElementSizeBytes * 8;
14231 StoreTy = EVT::getIntegerVT(Context, SizeInBits);
14232 if (TLI.isTypeLegal(StoreTy) &&
14233 TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) &&
14234 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
14235 FirstStoreAlign, &IsFastSt) &&
14236 IsFastSt &&
14237 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS,
14238 FirstLoadAlign, &IsFastLd) &&
14239 IsFastLd) {
14240 LastLegalIntegerType = i + 1;
14241 DoIntegerTruncate = false;
14242 // Or check whether a truncstore and extload is legal.
14243 } else if (TLI.getTypeAction(Context, StoreTy) ==
14244 TargetLowering::TypePromoteInteger) {
14245 EVT LegalizedStoredValTy = TLI.getTypeToTransformTo(Context, StoreTy);
14246 if (TLI.isTruncStoreLegal(LegalizedStoredValTy, StoreTy) &&
14247 TLI.canMergeStoresTo(FirstStoreAS, LegalizedStoredValTy, DAG) &&
14248 TLI.isLoadExtLegal(ISD::ZEXTLOAD, LegalizedStoredValTy,
14249 StoreTy) &&
14250 TLI.isLoadExtLegal(ISD::SEXTLOAD, LegalizedStoredValTy,
14251 StoreTy) &&
14252 TLI.isLoadExtLegal(ISD::EXTLOAD, LegalizedStoredValTy, StoreTy) &&
14253 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
14254 FirstStoreAlign, &IsFastSt) &&
14255 IsFastSt &&
14256 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS,
14257 FirstLoadAlign, &IsFastLd) &&
14258 IsFastLd) {
14259 LastLegalIntegerType = i + 1;
14260 DoIntegerTruncate = true;
14261 }
14262 }
14263 }
14264
14265 // Only use vector types if the vector type is larger than the integer
14266 // type. If they are the same, use integers.
14267 bool UseVectorTy =
14268 LastLegalVectorType > LastLegalIntegerType && !NoVectors;
14269 unsigned LastLegalType =
14270 std::max(LastLegalVectorType, LastLegalIntegerType);
14271
14272 // We add +1 here because the LastXXX variables refer to location while
14273 // the NumElem refers to array/index size.
14274 unsigned NumElem =
14275 std::min(NumConsecutiveStores, LastConsecutiveLoad + 1);
14276 NumElem = std::min(LastLegalType, NumElem);
14277
14278 if (NumElem < 2) {
14279 // We know that candidate stores are in order and of correct
14280 // shape. While there is no mergeable sequence from the
14281 // beginning one may start later in the sequence. The only
14282 // reason a merge of size N could have failed where another of
14283 // the same size would not have is if the alignment or either
14284 // the load or store has improved. Drop as many candidates as we
14285 // can here.
14286 unsigned NumSkip = 1;
14287 while ((NumSkip < LoadNodes.size()) &&
14288 (LoadNodes[NumSkip].MemNode->getAlignment() <= FirstLoadAlign) &&
14289 (StoreNodes[NumSkip].MemNode->getAlignment() <= FirstStoreAlign))
14290 NumSkip++;
14291 StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumSkip);
14292 LoadNodes.erase(LoadNodes.begin(), LoadNodes.begin() + NumSkip);
14293 NumConsecutiveStores -= NumSkip;
14294 continue;
14295 }
14296
14297 // Check that we can merge these candidates without causing a cycle.
14298 if (!checkMergeStoreCandidatesForDependencies(StoreNodes, NumElem,
14299 RootNode)) {
14300 StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumElem);
14301 LoadNodes.erase(LoadNodes.begin(), LoadNodes.begin() + NumElem);
14302 NumConsecutiveStores -= NumElem;
14303 continue;
14304 }
14305
14306 // Find if it is better to use vectors or integers to load and store
14307 // to memory.
14308 EVT JointMemOpVT;
14309 if (UseVectorTy) {
14310 // Find a legal type for the vector store.
14311 unsigned Elts = NumElem * NumMemElts;
14312 JointMemOpVT = EVT::getVectorVT(Context, MemVT.getScalarType(), Elts);
14313 } else {
14314 unsigned SizeInBits = NumElem * ElementSizeBytes * 8;
14315 JointMemOpVT = EVT::getIntegerVT(Context, SizeInBits);
14316 }
14317
14318 SDLoc LoadDL(LoadNodes[0].MemNode);
14319 SDLoc StoreDL(StoreNodes[0].MemNode);
14320
14321 // The merged loads are required to have the same incoming chain, so
14322 // using the first's chain is acceptable.
14323
14324 SDValue NewStoreChain = getMergeStoreChains(StoreNodes, NumElem);
14325 AddToWorklist(NewStoreChain.getNode());
14326
14327 MachineMemOperand::Flags MMOFlags =
14328 isDereferenceable ? MachineMemOperand::MODereferenceable
14329 : MachineMemOperand::MONone;
14330
14331 SDValue NewLoad, NewStore;
14332 if (UseVectorTy || !DoIntegerTruncate) {
14333 NewLoad =
14334 DAG.getLoad(JointMemOpVT, LoadDL, FirstLoad->getChain(),
14335 FirstLoad->getBasePtr(), FirstLoad->getPointerInfo(),
14336 FirstLoadAlign, MMOFlags);
14337 NewStore = DAG.getStore(
14338 NewStoreChain, StoreDL, NewLoad, FirstInChain->getBasePtr(),
14339 FirstInChain->getPointerInfo(), FirstStoreAlign);
14340 } else { // This must be the truncstore/extload case
14341 EVT ExtendedTy =
14342 TLI.getTypeToTransformTo(*DAG.getContext(), JointMemOpVT);
14343 NewLoad = DAG.getExtLoad(ISD::EXTLOAD, LoadDL, ExtendedTy,
14344 FirstLoad->getChain(), FirstLoad->getBasePtr(),
14345 FirstLoad->getPointerInfo(), JointMemOpVT,
14346 FirstLoadAlign, MMOFlags);
14347 NewStore = DAG.getTruncStore(NewStoreChain, StoreDL, NewLoad,
14348 FirstInChain->getBasePtr(),
14349 FirstInChain->getPointerInfo(),
14350 JointMemOpVT, FirstInChain->getAlignment(),
14351 FirstInChain->getMemOperand()->getFlags());
14352 }
14353
14354 // Transfer chain users from old loads to the new load.
14355 for (unsigned i = 0; i < NumElem; ++i) {
14356 LoadSDNode *Ld = cast<LoadSDNode>(LoadNodes[i].MemNode);
14357 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1),
14358 SDValue(NewLoad.getNode(), 1));
14359 }
14360
14361 // Replace the all stores with the new store. Recursively remove
14362 // corresponding value if its no longer used.
14363 for (unsigned i = 0; i < NumElem; ++i) {
14364 SDValue Val = StoreNodes[i].MemNode->getOperand(1);
14365 CombineTo(StoreNodes[i].MemNode, NewStore);
14366 if (Val.getNode()->use_empty())
14367 recursivelyDeleteUnusedNodes(Val.getNode());
14368 }
14369
14370 RV = true;
14371 StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumElem);
14372 LoadNodes.erase(LoadNodes.begin(), LoadNodes.begin() + NumElem);
14373 NumConsecutiveStores -= NumElem;
14374 }
14375 }
14376 return RV;
14377}
14378
14379SDValue DAGCombiner::replaceStoreChain(StoreSDNode *ST, SDValue BetterChain) {
14380 SDLoc SL(ST);
14381 SDValue ReplStore;
14382
14383 // Replace the chain to avoid dependency.
14384 if (ST->isTruncatingStore()) {
14385 ReplStore = DAG.getTruncStore(BetterChain, SL, ST->getValue(),
14386 ST->getBasePtr(), ST->getMemoryVT(),
14387 ST->getMemOperand());
14388 } else {
14389 ReplStore = DAG.getStore(BetterChain, SL, ST->getValue(), ST->getBasePtr(),
14390 ST->getMemOperand());
14391 }
14392
14393 // Create token to keep both nodes around.
14394 SDValue Token = DAG.getNode(ISD::TokenFactor, SL,
14395 MVT::Other, ST->getChain(), ReplStore);
14396
14397 // Make sure the new and old chains are cleaned up.
14398 AddToWorklist(Token.getNode());
14399
14400 // Don't add users to work list.
14401 return CombineTo(ST, Token, false);
14402}
14403
14404SDValue DAGCombiner::replaceStoreOfFPConstant(StoreSDNode *ST) {
14405 SDValue Value = ST->getValue();
14406 if (Value.getOpcode() == ISD::TargetConstantFP)
14407 return SDValue();
14408
14409 SDLoc DL(ST);
14410
14411 SDValue Chain = ST->getChain();
14412 SDValue Ptr = ST->getBasePtr();
14413
14414 const ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Value);
14415
14416 // NOTE: If the original store is volatile, this transform must not increase
14417 // the number of stores. For example, on x86-32 an f64 can be stored in one
14418 // processor operation but an i64 (which is not legal) requires two. So the
14419 // transform should not be done in this case.
14420
14421 SDValue Tmp;
14422 switch (CFP->getSimpleValueType(0).SimpleTy) {
14423 default:
14424 llvm_unreachable("Unknown FP type")::llvm::llvm_unreachable_internal("Unknown FP type", "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 14424)
;
14425 case MVT::f16: // We don't do this for these yet.
14426 case MVT::f80:
14427 case MVT::f128:
14428 case MVT::ppcf128:
14429 return SDValue();
14430 case MVT::f32:
14431 if ((isTypeLegal(MVT::i32) && !LegalOperations && !ST->isVolatile()) ||
14432 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
14433 ;
14434 Tmp = DAG.getConstant((uint32_t)CFP->getValueAPF().
14435 bitcastToAPInt().getZExtValue(), SDLoc(CFP),
14436 MVT::i32);
14437 return DAG.getStore(Chain, DL, Tmp, Ptr, ST->getMemOperand());
14438 }
14439
14440 return SDValue();
14441 case MVT::f64:
14442 if ((TLI.isTypeLegal(MVT::i64) && !LegalOperations &&
14443 !ST->isVolatile()) ||
14444 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i64)) {
14445 ;
14446 Tmp = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
14447 getZExtValue(), SDLoc(CFP), MVT::i64);
14448 return DAG.getStore(Chain, DL, Tmp,
14449 Ptr, ST->getMemOperand());
14450 }
14451
14452 if (!ST->isVolatile() &&
14453 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
14454 // Many FP stores are not made apparent until after legalize, e.g. for
14455 // argument passing. Since this is so common, custom legalize the
14456 // 64-bit integer store into two 32-bit stores.
14457 uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
14458 SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, SDLoc(CFP), MVT::i32);
14459 SDValue Hi = DAG.getConstant(Val >> 32, SDLoc(CFP), MVT::i32);
14460 if (DAG.getDataLayout().isBigEndian())
14461 std::swap(Lo, Hi);
14462
14463 unsigned Alignment = ST->getAlignment();
14464 MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags();
14465 AAMDNodes AAInfo = ST->getAAInfo();
14466
14467 SDValue St0 = DAG.getStore(Chain, DL, Lo, Ptr, ST->getPointerInfo(),
14468 ST->getAlignment(), MMOFlags, AAInfo);
14469 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
14470 DAG.getConstant(4, DL, Ptr.getValueType()));
14471 Alignment = MinAlign(Alignment, 4U);
14472 SDValue St1 = DAG.getStore(Chain, DL, Hi, Ptr,
14473 ST->getPointerInfo().getWithOffset(4),
14474 Alignment, MMOFlags, AAInfo);
14475 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
14476 St0, St1);
14477 }
14478
14479 return SDValue();
14480 }
14481}
14482
14483SDValue DAGCombiner::visitSTORE(SDNode *N) {
14484 StoreSDNode *ST = cast<StoreSDNode>(N);
14485 SDValue Chain = ST->getChain();
14486 SDValue Value = ST->getValue();
14487 SDValue Ptr = ST->getBasePtr();
14488
14489 // If this is a store of a bit convert, store the input value if the
14490 // resultant store does not need a higher alignment than the original.
14491 if (Value.getOpcode() == ISD::BITCAST && !ST->isTruncatingStore() &&
14492 ST->isUnindexed()) {
14493 EVT SVT = Value.getOperand(0).getValueType();
14494 if (((!LegalOperations && !ST->isVolatile()) ||
14495 TLI.isOperationLegalOrCustom(ISD::STORE, SVT)) &&
14496 TLI.isStoreBitCastBeneficial(Value.getValueType(), SVT)) {
14497 unsigned OrigAlign = ST->getAlignment();
14498 bool Fast = false;
14499 if (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), SVT,
14500 ST->getAddressSpace(), OrigAlign, &Fast) &&
14501 Fast) {
14502 return DAG.getStore(Chain, SDLoc(N), Value.getOperand(0), Ptr,
14503 ST->getPointerInfo(), OrigAlign,
14504 ST->getMemOperand()->getFlags(), ST->getAAInfo());
14505 }
14506 }
14507 }
14508
14509 // Turn 'store undef, Ptr' -> nothing.
14510 if (Value.isUndef() && ST->isUnindexed())
14511 return Chain;
14512
14513 // Try to infer better alignment information than the store already has.
14514 if (OptLevel != CodeGenOpt::None && ST->isUnindexed()) {
14515 if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
14516 if (Align > ST->getAlignment() && ST->getSrcValueOffset() % Align == 0) {
14517 SDValue NewStore =
14518 DAG.getTruncStore(Chain, SDLoc(N), Value, Ptr, ST->getPointerInfo(),
14519 ST->getMemoryVT(), Align,
14520 ST->getMemOperand()->getFlags(), ST->getAAInfo());
14521 // NewStore will always be N as we are only refining the alignment
14522 assert(NewStore.getNode() == N)(static_cast <bool> (NewStore.getNode() == N) ? void (0
) : __assert_fail ("NewStore.getNode() == N", "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 14522, __extension__ __PRETTY_FUNCTION__))
;
14523 (void)NewStore;
14524 }
14525 }
14526 }
14527
14528 // Try transforming a pair floating point load / store ops to integer
14529 // load / store ops.
14530 if (SDValue NewST = TransformFPLoadStorePair(N))
14531 return NewST;
14532
14533 if (ST->isUnindexed()) {
14534 // Walk up chain skipping non-aliasing memory nodes, on this store and any
14535 // adjacent stores.
14536 if (findBetterNeighborChains(ST)) {
14537 // replaceStoreChain uses CombineTo, which handled all of the worklist
14538 // manipulation. Return the original node to not do anything else.
14539 return SDValue(ST, 0);
14540 }
14541 Chain = ST->getChain();
14542 }
14543
14544 // FIXME: is there such a thing as a truncating indexed store?
14545 if (ST->isTruncatingStore() && ST->isUnindexed() &&
14546 Value.getValueType().isInteger()) {
14547 // See if we can simplify the input to this truncstore with knowledge that
14548 // only the low bits are being used. For example:
14549 // "truncstore (or (shl x, 8), y), i8" -> "truncstore y, i8"
14550 SDValue Shorter = DAG.GetDemandedBits(
14551 Value, APInt::getLowBitsSet(Value.getScalarValueSizeInBits(),
14552 ST->getMemoryVT().getScalarSizeInBits()));
14553 AddToWorklist(Value.getNode());
14554 if (Shorter.getNode())
14555 return DAG.getTruncStore(Chain, SDLoc(N), Shorter,
14556 Ptr, ST->getMemoryVT(), ST->getMemOperand());
14557
14558 // Otherwise, see if we can simplify the operation with
14559 // SimplifyDemandedBits, which only works if the value has a single use.
14560 if (SimplifyDemandedBits(
14561 Value,
14562 APInt::getLowBitsSet(Value.getScalarValueSizeInBits(),
14563 ST->getMemoryVT().getScalarSizeInBits()))) {
14564 // Re-visit the store if anything changed and the store hasn't been merged
14565 // with another node (N is deleted) SimplifyDemandedBits will add Value's
14566 // node back to the worklist if necessary, but we also need to re-visit
14567 // the Store node itself.
14568 if (N->getOpcode() != ISD::DELETED_NODE)
14569 AddToWorklist(N);
14570 return SDValue(N, 0);
14571 }
14572 }
14573
14574 // If this is a load followed by a store to the same location, then the store
14575 // is dead/noop.
14576 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Value)) {
14577 if (Ld->getBasePtr() == Ptr && ST->getMemoryVT() == Ld->getMemoryVT() &&
14578 ST->isUnindexed() && !ST->isVolatile() &&
14579 // There can't be any side effects between the load and store, such as
14580 // a call or store.
14581 Chain.reachesChainWithoutSideEffects(SDValue(Ld, 1))) {
14582 // The store is dead, remove it.
14583 return Chain;
14584 }
14585 }
14586
14587 if (StoreSDNode *ST1 = dyn_cast<StoreSDNode>(Chain)) {
14588 if (ST->isUnindexed() && !ST->isVolatile() && ST1->isUnindexed() &&
14589 !ST1->isVolatile() && ST1->getBasePtr() == Ptr &&
14590 ST->getMemoryVT() == ST1->getMemoryVT()) {
14591 // If this is a store followed by a store with the same value to the same
14592 // location, then the store is dead/noop.
14593 if (ST1->getValue() == Value) {
14594 // The store is dead, remove it.
14595 return Chain;
14596 }
14597
14598 // If this is a store who's preceeding store to the same location
14599 // and no one other node is chained to that store we can effectively
14600 // drop the store. Do not remove stores to undef as they may be used as
14601 // data sinks.
14602 if (OptLevel != CodeGenOpt::None && ST1->hasOneUse() &&
14603 !ST1->getBasePtr().isUndef()) {
14604 // ST1 is fully overwritten and can be elided. Combine with it's chain
14605 // value.
14606 CombineTo(ST1, ST1->getChain());
14607 return SDValue();
14608 }
14609 }
14610 }
14611
14612 // If this is an FP_ROUND or TRUNC followed by a store, fold this into a
14613 // truncating store. We can do this even if this is already a truncstore.
14614 if ((Value.getOpcode() == ISD::FP_ROUND || Value.getOpcode() == ISD::TRUNCATE)
14615 && Value.getNode()->hasOneUse() && ST->isUnindexed() &&
14616 TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
14617 ST->getMemoryVT())) {
14618 return DAG.getTruncStore(Chain, SDLoc(N), Value.getOperand(0),
14619 Ptr, ST->getMemoryVT(), ST->getMemOperand());
14620 }
14621
14622 // Always perform this optimization before types are legal. If the target
14623 // prefers, also try this after legalization to catch stores that were created
14624 // by intrinsics or other nodes.
14625 if (!LegalTypes || (TLI.mergeStoresAfterLegalization())) {
14626 while (true) {
14627 // There can be multiple store sequences on the same chain.
14628 // Keep trying to merge store sequences until we are unable to do so
14629 // or until we merge the last store on the chain.
14630 bool Changed = MergeConsecutiveStores(ST);
14631 if (!Changed) break;
14632 // Return N as merge only uses CombineTo and no worklist clean
14633 // up is necessary.
14634 if (N->getOpcode() == ISD::DELETED_NODE || !isa<StoreSDNode>(N))
14635 return SDValue(N, 0);
14636 }
14637 }
14638
14639 // Try transforming N to an indexed store.
14640 if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
14641 return SDValue(N, 0);
14642
14643 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
14644 //
14645 // Make sure to do this only after attempting to merge stores in order to
14646 // avoid changing the types of some subset of stores due to visit order,
14647 // preventing their merging.
14648 if (isa<ConstantFPSDNode>(ST->getValue())) {
14649 if (SDValue NewSt = replaceStoreOfFPConstant(ST))
14650 return NewSt;
14651 }
14652
14653 if (SDValue NewSt = splitMergedValStore(ST))
14654 return NewSt;
14655
14656 return ReduceLoadOpStoreWidth(N);
14657}
14658
14659/// For the instruction sequence of store below, F and I values
14660/// are bundled together as an i64 value before being stored into memory.
14661/// Sometimes it is more efficent to generate separate stores for F and I,
14662/// which can remove the bitwise instructions or sink them to colder places.
14663///
14664/// (store (or (zext (bitcast F to i32) to i64),
14665/// (shl (zext I to i64), 32)), addr) -->
14666/// (store F, addr) and (store I, addr+4)
14667///
14668/// Similarly, splitting for other merged store can also be beneficial, like:
14669/// For pair of {i32, i32}, i64 store --> two i32 stores.
14670/// For pair of {i32, i16}, i64 store --> two i32 stores.
14671/// For pair of {i16, i16}, i32 store --> two i16 stores.
14672/// For pair of {i16, i8}, i32 store --> two i16 stores.
14673/// For pair of {i8, i8}, i16 store --> two i8 stores.
14674///
14675/// We allow each target to determine specifically which kind of splitting is
14676/// supported.
14677///
14678/// The store patterns are commonly seen from the simple code snippet below
14679/// if only std::make_pair(...) is sroa transformed before inlined into hoo.
14680/// void goo(const std::pair<int, float> &);
14681/// hoo() {
14682/// ...
14683/// goo(std::make_pair(tmp, ftmp));
14684/// ...
14685/// }
14686///
14687SDValue DAGCombiner::splitMergedValStore(StoreSDNode *ST) {
14688 if (OptLevel == CodeGenOpt::None)
14689 return SDValue();
14690
14691 SDValue Val = ST->getValue();
14692 SDLoc DL(ST);
14693
14694 // Match OR operand.
14695 if (!Val.getValueType().isScalarInteger() || Val.getOpcode() != ISD::OR)
14696 return SDValue();
14697
14698 // Match SHL operand and get Lower and Higher parts of Val.
14699 SDValue Op1 = Val.getOperand(0);
14700 SDValue Op2 = Val.getOperand(1);
14701 SDValue Lo, Hi;
14702 if (Op1.getOpcode() != ISD::SHL) {
14703 std::swap(Op1, Op2);
14704 if (Op1.getOpcode() != ISD::SHL)
14705 return SDValue();
14706 }
14707 Lo = Op2;
14708 Hi = Op1.getOperand(0);
14709 if (!Op1.hasOneUse())
14710 return SDValue();
14711
14712 // Match shift amount to HalfValBitSize.
14713 unsigned HalfValBitSize = Val.getValueSizeInBits() / 2;
14714 ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(Op1.getOperand(1));
14715 if (!ShAmt || ShAmt->getAPIntValue() != HalfValBitSize)
14716 return SDValue();
14717
14718 // Lo and Hi are zero-extended from int with size less equal than 32
14719 // to i64.
14720 if (Lo.getOpcode() != ISD::ZERO_EXTEND || !Lo.hasOneUse() ||
14721 !Lo.getOperand(0).getValueType().isScalarInteger() ||
14722 Lo.getOperand(0).getValueSizeInBits() > HalfValBitSize ||
14723 Hi.getOpcode() != ISD::ZERO_EXTEND || !Hi.hasOneUse() ||
14724 !Hi.getOperand(0).getValueType().isScalarInteger() ||
14725 Hi.getOperand(0).getValueSizeInBits() > HalfValBitSize)
14726 return SDValue();
14727
14728 // Use the EVT of low and high parts before bitcast as the input
14729 // of target query.
14730 EVT LowTy = (Lo.getOperand(0).getOpcode() == ISD::BITCAST)
14731 ? Lo.getOperand(0).getValueType()
14732 : Lo.getValueType();
14733 EVT HighTy = (Hi.getOperand(0).getOpcode() == ISD::BITCAST)
14734 ? Hi.getOperand(0).getValueType()
14735 : Hi.getValueType();
14736 if (!TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy))
14737 return SDValue();
14738
14739 // Start to split store.
14740 unsigned Alignment = ST->getAlignment();
14741 MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags();
14742 AAMDNodes AAInfo = ST->getAAInfo();
14743
14744 // Change the sizes of Lo and Hi's value types to HalfValBitSize.
14745 EVT VT = EVT::getIntegerVT(*DAG.getContext(), HalfValBitSize);
14746 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Lo.getOperand(0));
14747 Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Hi.getOperand(0));
14748
14749 SDValue Chain = ST->getChain();
14750 SDValue Ptr = ST->getBasePtr();
14751 // Lower value store.
14752 SDValue St0 = DAG.getStore(Chain, DL, Lo, Ptr, ST->getPointerInfo(),
14753 ST->getAlignment(), MMOFlags, AAInfo);
14754 Ptr =
14755 DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
14756 DAG.getConstant(HalfValBitSize / 8, DL, Ptr.getValueType()));
14757 // Higher value store.
14758 SDValue St1 =
14759 DAG.getStore(St0, DL, Hi, Ptr,
14760 ST->getPointerInfo().getWithOffset(HalfValBitSize / 8),
14761 Alignment / 2, MMOFlags, AAInfo);
14762 return St1;
14763}
14764
14765/// Convert a disguised subvector insertion into a shuffle:
14766/// insert_vector_elt V, (bitcast X from vector type), IdxC -->
14767/// bitcast(shuffle (bitcast V), (extended X), Mask)
14768/// Note: We do not use an insert_subvector node because that requires a legal
14769/// subvector type.
14770SDValue DAGCombiner::combineInsertEltToShuffle(SDNode *N, unsigned InsIndex) {
14771 SDValue InsertVal = N->getOperand(1);
14772 if (InsertVal.getOpcode() != ISD::BITCAST || !InsertVal.hasOneUse() ||
14773 !InsertVal.getOperand(0).getValueType().isVector())
14774 return SDValue();
14775
14776 SDValue SubVec = InsertVal.getOperand(0);
14777 SDValue DestVec = N->getOperand(0);
14778 EVT SubVecVT = SubVec.getValueType();
14779 EVT VT = DestVec.getValueType();
14780 unsigned NumSrcElts = SubVecVT.getVectorNumElements();
14781 unsigned ExtendRatio = VT.getSizeInBits() / SubVecVT.getSizeInBits();
14782 unsigned NumMaskVals = ExtendRatio * NumSrcElts;
14783
14784 // Step 1: Create a shuffle mask that implements this insert operation. The
14785 // vector that we are inserting into will be operand 0 of the shuffle, so
14786 // those elements are just 'i'. The inserted subvector is in the first
14787 // positions of operand 1 of the shuffle. Example:
14788 // insert v4i32 V, (v2i16 X), 2 --> shuffle v8i16 V', X', {0,1,2,3,8,9,6,7}
14789 SmallVector<int, 16> Mask(NumMaskVals);
14790 for (unsigned i = 0; i != NumMaskVals; ++i) {
14791 if (i / NumSrcElts == InsIndex)
14792 Mask[i] = (i % NumSrcElts) + NumMaskVals;
14793 else
14794 Mask[i] = i;
14795 }
14796
14797 // Bail out if the target can not handle the shuffle we want to create.
14798 EVT SubVecEltVT = SubVecVT.getVectorElementType();
14799 EVT ShufVT = EVT::getVectorVT(*DAG.getContext(), SubVecEltVT, NumMaskVals);
14800 if (!TLI.isShuffleMaskLegal(Mask, ShufVT))
14801 return SDValue();
14802
14803 // Step 2: Create a wide vector from the inserted source vector by appending
14804 // undefined elements. This is the same size as our destination vector.
14805 SDLoc DL(N);
14806 SmallVector<SDValue, 8> ConcatOps(ExtendRatio, DAG.getUNDEF(SubVecVT));
14807 ConcatOps[0] = SubVec;
14808 SDValue PaddedSubV = DAG.getNode(ISD::CONCAT_VECTORS, DL, ShufVT, ConcatOps);
14809
14810 // Step 3: Shuffle in the padded subvector.
14811 SDValue DestVecBC = DAG.getBitcast(ShufVT, DestVec);
14812 SDValue Shuf = DAG.getVectorShuffle(ShufVT, DL, DestVecBC, PaddedSubV, Mask);
14813 AddToWorklist(PaddedSubV.getNode());
14814 AddToWorklist(DestVecBC.getNode());
14815 AddToWorklist(Shuf.getNode());
14816 return DAG.getBitcast(VT, Shuf);
14817}
14818
14819SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
14820 SDValue InVec = N->getOperand(0);
14821 SDValue InVal = N->getOperand(1);
14822 SDValue EltNo = N->getOperand(2);
14823 SDLoc DL(N);
14824
14825 // If the inserted element is an UNDEF, just use the input vector.
14826 if (InVal.isUndef())
14827 return InVec;
14828
14829 EVT VT = InVec.getValueType();
14830
14831 // Remove redundant insertions:
14832 // (insert_vector_elt x (extract_vector_elt x idx) idx) -> x
14833 if (InVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
14834 InVec == InVal.getOperand(0) && EltNo == InVal.getOperand(1))
14835 return InVec;
14836
14837 // We must know which element is being inserted for folds below here.
14838 auto *IndexC = dyn_cast<ConstantSDNode>(EltNo);
14839 if (!IndexC)
14840 return SDValue();
14841 unsigned Elt = IndexC->getZExtValue();
14842
14843 if (SDValue Shuf = combineInsertEltToShuffle(N, Elt))
14844 return Shuf;
14845
14846 // Canonicalize insert_vector_elt dag nodes.
14847 // Example:
14848 // (insert_vector_elt (insert_vector_elt A, Idx0), Idx1)
14849 // -> (insert_vector_elt (insert_vector_elt A, Idx1), Idx0)
14850 //
14851 // Do this only if the child insert_vector node has one use; also
14852 // do this only if indices are both constants and Idx1 < Idx0.
14853 if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT && InVec.hasOneUse()
14854 && isa<ConstantSDNode>(InVec.getOperand(2))) {
14855 unsigned OtherElt = InVec.getConstantOperandVal(2);
14856 if (Elt < OtherElt) {
14857 // Swap nodes.
14858 SDValue NewOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT,
14859 InVec.getOperand(0), InVal, EltNo);
14860 AddToWorklist(NewOp.getNode());
14861 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(InVec.getNode()),
14862 VT, NewOp, InVec.getOperand(1), InVec.getOperand(2));
14863 }
14864 }
14865
14866 // If we can't generate a legal BUILD_VECTOR, exit
14867 if (LegalOperations && !TLI.isOperationLegal(ISD::BUILD_VECTOR, VT))
14868 return SDValue();
14869
14870 // Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially
14871 // be converted to a BUILD_VECTOR). Fill in the Ops vector with the
14872 // vector elements.
14873 SmallVector<SDValue, 8> Ops;
14874 // Do not combine these two vectors if the output vector will not replace
14875 // the input vector.
14876 if (InVec.getOpcode() == ISD::BUILD_VECTOR && InVec.hasOneUse()) {
14877 Ops.append(InVec.getNode()->op_begin(),
14878 InVec.getNode()->op_end());
14879 } else if (InVec.isUndef()) {
14880 unsigned NElts = VT.getVectorNumElements();
14881 Ops.append(NElts, DAG.getUNDEF(InVal.getValueType()));
14882 } else {
14883 return SDValue();
14884 }
14885
14886 // Insert the element
14887 if (Elt < Ops.size()) {
14888 // All the operands of BUILD_VECTOR must have the same type;
14889 // we enforce that here.
14890 EVT OpVT = Ops[0].getValueType();
14891 Ops[Elt] = OpVT.isInteger() ? DAG.getAnyExtOrTrunc(InVal, DL, OpVT) : InVal;
14892 }
14893
14894 // Return the new vector
14895 return DAG.getBuildVector(VT, DL, Ops);
14896}
14897
14898SDValue DAGCombiner::ReplaceExtractVectorEltOfLoadWithNarrowedLoad(
14899 SDNode *EVE, EVT InVecVT, SDValue EltNo, LoadSDNode *OriginalLoad) {
14900 assert(!OriginalLoad->isVolatile())(static_cast <bool> (!OriginalLoad->isVolatile()) ? void
(0) : __assert_fail ("!OriginalLoad->isVolatile()", "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 14900, __extension__ __PRETTY_FUNCTION__))
;
14901
14902 EVT ResultVT = EVE->getValueType(0);
14903 EVT VecEltVT = InVecVT.getVectorElementType();
14904 unsigned Align = OriginalLoad->getAlignment();
14905 unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
14906 VecEltVT.getTypeForEVT(*DAG.getContext()));
14907
14908 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VecEltVT))
14909 return SDValue();
14910
14911 ISD::LoadExtType ExtTy = ResultVT.bitsGT(VecEltVT) ?
14912 ISD::NON_EXTLOAD : ISD::EXTLOAD;
14913 if (!TLI.shouldReduceLoadWidth(OriginalLoad, ExtTy, VecEltVT))
14914 return SDValue();
14915
14916 Align = NewAlign;
14917
14918 SDValue NewPtr = OriginalLoad->getBasePtr();
14919 SDValue Offset;
14920 EVT PtrType = NewPtr.getValueType();
14921 MachinePointerInfo MPI;
14922 SDLoc DL(EVE);
14923 if (auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo)) {
14924 int Elt = ConstEltNo->getZExtValue();
14925 unsigned PtrOff = VecEltVT.getSizeInBits() * Elt / 8;
14926 Offset = DAG.getConstant(PtrOff, DL, PtrType);
14927 MPI = OriginalLoad->getPointerInfo().getWithOffset(PtrOff);
14928 } else {
14929 Offset = DAG.getZExtOrTrunc(EltNo, DL, PtrType);
14930 Offset = DAG.getNode(
14931 ISD::MUL, DL, PtrType, Offset,
14932 DAG.getConstant(VecEltVT.getStoreSize(), DL, PtrType));
14933 MPI = OriginalLoad->getPointerInfo();
14934 }
14935 NewPtr = DAG.getNode(ISD::ADD, DL, PtrType, NewPtr, Offset);
14936
14937 // The replacement we need to do here is a little tricky: we need to
14938 // replace an extractelement of a load with a load.
14939 // Use ReplaceAllUsesOfValuesWith to do the replacement.
14940 // Note that this replacement assumes that the extractvalue is the only
14941 // use of the load; that's okay because we don't want to perform this
14942 // transformation in other cases anyway.
14943 SDValue Load;
14944 SDValue Chain;
14945 if (ResultVT.bitsGT(VecEltVT)) {
14946 // If the result type of vextract is wider than the load, then issue an
14947 // extending load instead.
14948 ISD::LoadExtType ExtType = TLI.isLoadExtLegal(ISD::ZEXTLOAD, ResultVT,
14949 VecEltVT)
14950 ? ISD::ZEXTLOAD
14951 : ISD::EXTLOAD;
14952 Load = DAG.getExtLoad(ExtType, SDLoc(EVE), ResultVT,
14953 OriginalLoad->getChain(), NewPtr, MPI, VecEltVT,
14954 Align, OriginalLoad->getMemOperand()->getFlags(),
14955 OriginalLoad->getAAInfo());
14956 Chain = Load.getValue(1);
14957 } else {
14958 Load = DAG.getLoad(VecEltVT, SDLoc(EVE), OriginalLoad->getChain(), NewPtr,
14959 MPI, Align, OriginalLoad->getMemOperand()->getFlags(),
14960 OriginalLoad->getAAInfo());
14961 Chain = Load.getValue(1);
14962 if (ResultVT.bitsLT(VecEltVT))
14963 Load = DAG.getNode(ISD::TRUNCATE, SDLoc(EVE), ResultVT, Load);
14964 else
14965 Load = DAG.getBitcast(ResultVT, Load);
14966 }
14967 WorklistRemover DeadNodes(*this);
14968 SDValue From[] = { SDValue(EVE, 0), SDValue(OriginalLoad, 1) };
14969 SDValue To[] = { Load, Chain };
14970 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
14971 // Since we're explicitly calling ReplaceAllUses, add the new node to the
14972 // worklist explicitly as well.
14973 AddToWorklist(Load.getNode());
14974 AddUsersToWorklist(Load.getNode()); // Add users too
14975 // Make sure to revisit this node to clean it up; it will usually be dead.
14976 AddToWorklist(EVE);
14977 ++OpsNarrowed;
14978 return SDValue(EVE, 0);
14979}
14980
14981SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
14982 // (vextract (scalar_to_vector val, 0) -> val
14983 SDValue InVec = N->getOperand(0);
14984 EVT VT = InVec.getValueType();
14985 EVT NVT = N->getValueType(0);
14986
14987 if (InVec.isUndef())
14988 return DAG.getUNDEF(NVT);
14989
14990 if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) {
14991 // Check if the result type doesn't match the inserted element type. A
14992 // SCALAR_TO_VECTOR may truncate the inserted element and the
14993 // EXTRACT_VECTOR_ELT may widen the extracted vector.
14994 SDValue InOp = InVec.getOperand(0);
14995 if (InOp.getValueType() != NVT) {
14996 assert(InOp.getValueType().isInteger() && NVT.isInteger())(static_cast <bool> (InOp.getValueType().isInteger() &&
NVT.isInteger()) ? void (0) : __assert_fail ("InOp.getValueType().isInteger() && NVT.isInteger()"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 14996, __extension__ __PRETTY_FUNCTION__))
;
14997 return DAG.getSExtOrTrunc(InOp, SDLoc(InVec), NVT);
14998 }
14999 return InOp;
15000 }
15001
15002 SDValue EltNo = N->getOperand(1);
15003 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
15004
15005 // extract_vector_elt of out-of-bounds element -> UNDEF
15006 if (ConstEltNo && ConstEltNo->getAPIntValue().uge(VT.getVectorNumElements()))
15007 return DAG.getUNDEF(NVT);
15008
15009 // extract_vector_elt (build_vector x, y), 1 -> y
15010 if (ConstEltNo &&
15011 InVec.getOpcode() == ISD::BUILD_VECTOR &&
15012 TLI.isTypeLegal(VT) &&
15013 (InVec.hasOneUse() ||
15014 TLI.aggressivelyPreferBuildVectorSources(VT))) {
15015 SDValue Elt = InVec.getOperand(ConstEltNo->getZExtValue());
15016 EVT InEltVT = Elt.getValueType();
15017
15018 // Sometimes build_vector's scalar input types do not match result type.
15019 if (NVT == InEltVT)
15020 return Elt;
15021
15022 // TODO: It may be useful to truncate if free if the build_vector implicitly
15023 // converts.
15024 }
15025
15026 // extract_vector_elt (v2i32 (bitcast i64:x)), EltTrunc -> i32 (trunc i64:x)
15027 bool isLE = DAG.getDataLayout().isLittleEndian();
15028 unsigned EltTrunc = isLE ? 0 : VT.getVectorNumElements() - 1;
15029 if (ConstEltNo && InVec.getOpcode() == ISD::BITCAST && InVec.hasOneUse() &&
15030 ConstEltNo->getZExtValue() == EltTrunc && VT.isInteger()) {
15031 SDValue BCSrc = InVec.getOperand(0);
15032 if (BCSrc.getValueType().isScalarInteger())
15033 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), NVT, BCSrc);
15034 }
15035
15036 // extract_vector_elt (insert_vector_elt vec, val, idx), idx) -> val
15037 //
15038 // This only really matters if the index is non-constant since other combines
15039 // on the constant elements already work.
15040 if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT &&
15041 EltNo == InVec.getOperand(2)) {
15042 SDValue Elt = InVec.getOperand(1);
15043 return VT.isInteger() ? DAG.getAnyExtOrTrunc(Elt, SDLoc(N), NVT) : Elt;
15044 }
15045
15046 // Transform: (EXTRACT_VECTOR_ELT( VECTOR_SHUFFLE )) -> EXTRACT_VECTOR_ELT.
15047 // We only perform this optimization before the op legalization phase because
15048 // we may introduce new vector instructions which are not backed by TD
15049 // patterns. For example on AVX, extracting elements from a wide vector
15050 // without using extract_subvector. However, if we can find an underlying
15051 // scalar value, then we can always use that.
15052 if (ConstEltNo && InVec.getOpcode() == ISD::VECTOR_SHUFFLE) {
15053 int NumElem = VT.getVectorNumElements();
15054 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(InVec);
15055 // Find the new index to extract from.
15056 int OrigElt = SVOp->getMaskElt(ConstEltNo->getZExtValue());
15057
15058 // Extracting an undef index is undef.
15059 if (OrigElt == -1)
15060 return DAG.getUNDEF(NVT);
15061
15062 // Select the right vector half to extract from.
15063 SDValue SVInVec;
15064 if (OrigElt < NumElem) {
15065 SVInVec = InVec->getOperand(0);
15066 } else {
15067 SVInVec = InVec->getOperand(1);
15068 OrigElt -= NumElem;
15069 }
15070
15071 if (SVInVec.getOpcode() == ISD::BUILD_VECTOR) {
15072 SDValue InOp = SVInVec.getOperand(OrigElt);
15073 if (InOp.getValueType() != NVT) {
15074 assert(InOp.getValueType().isInteger() && NVT.isInteger())(static_cast <bool> (InOp.getValueType().isInteger() &&
NVT.isInteger()) ? void (0) : __assert_fail ("InOp.getValueType().isInteger() && NVT.isInteger()"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15074, __extension__ __PRETTY_FUNCTION__))
;
15075 InOp = DAG.getSExtOrTrunc(InOp, SDLoc(SVInVec), NVT);
15076 }
15077
15078 return InOp;
15079 }
15080
15081 // FIXME: We should handle recursing on other vector shuffles and
15082 // scalar_to_vector here as well.
15083
15084 if (!LegalOperations ||
15085 // FIXME: Should really be just isOperationLegalOrCustom.
15086 TLI.isOperationLegal(ISD::EXTRACT_VECTOR_ELT, VT) ||
15087 TLI.isOperationExpand(ISD::VECTOR_SHUFFLE, VT)) {
15088 EVT IndexTy = TLI.getVectorIdxTy(DAG.getDataLayout());
15089 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), NVT, SVInVec,
15090 DAG.getConstant(OrigElt, SDLoc(SVOp), IndexTy));
15091 }
15092 }
15093
15094 // If only EXTRACT_VECTOR_ELT nodes use the source vector we can
15095 // simplify it based on the (valid) extraction indices.
15096 if (llvm::all_of(InVec->uses(), [&](SDNode *Use) {
15097 return Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
15098 Use->getOperand(0) == InVec &&
15099 isa<ConstantSDNode>(Use->getOperand(1));
15100 })) {
15101 APInt DemandedElts = APInt::getNullValue(VT.getVectorNumElements());
15102 for (SDNode *Use : InVec->uses()) {
15103 auto *CstElt = cast<ConstantSDNode>(Use->getOperand(1));
15104 if (CstElt->getAPIntValue().ult(VT.getVectorNumElements()))
15105 DemandedElts.setBit(CstElt->getZExtValue());
15106 }
15107 if (SimplifyDemandedVectorElts(InVec, DemandedElts, true))
15108 return SDValue(N, 0);
15109 }
15110
15111 bool BCNumEltsChanged = false;
15112 EVT ExtVT = VT.getVectorElementType();
15113 EVT LVT = ExtVT;
15114
15115 // If the result of load has to be truncated, then it's not necessarily
15116 // profitable.
15117 if (NVT.bitsLT(LVT) && !TLI.isTruncateFree(LVT, NVT))
15118 return SDValue();
15119
15120 if (InVec.getOpcode() == ISD::BITCAST) {
15121 // Don't duplicate a load with other uses.
15122 if (!InVec.hasOneUse())
15123 return SDValue();
15124
15125 EVT BCVT = InVec.getOperand(0).getValueType();
15126 if (!BCVT.isVector() || ExtVT.bitsGT(BCVT.getVectorElementType()))
15127 return SDValue();
15128 if (VT.getVectorNumElements() != BCVT.getVectorNumElements())
15129 BCNumEltsChanged = true;
15130 InVec = InVec.getOperand(0);
15131 ExtVT = BCVT.getVectorElementType();
15132 }
15133
15134 // (vextract (vN[if]M load $addr), i) -> ([if]M load $addr + i * size)
15135 if (!LegalOperations && !ConstEltNo && InVec.hasOneUse() &&
15136 ISD::isNormalLoad(InVec.getNode()) &&
15137 !N->getOperand(1)->hasPredecessor(InVec.getNode())) {
15138 SDValue Index = N->getOperand(1);
15139 if (LoadSDNode *OrigLoad = dyn_cast<LoadSDNode>(InVec)) {
15140 if (!OrigLoad->isVolatile()) {
15141 return ReplaceExtractVectorEltOfLoadWithNarrowedLoad(N, VT, Index,
15142 OrigLoad);
15143 }
15144 }
15145 }
15146
15147 // Perform only after legalization to ensure build_vector / vector_shuffle
15148 // optimizations have already been done.
15149 if (!LegalOperations) return SDValue();
15150
15151 // (vextract (v4f32 load $addr), c) -> (f32 load $addr+c*size)
15152 // (vextract (v4f32 s2v (f32 load $addr)), c) -> (f32 load $addr+c*size)
15153 // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), 0) -> (f32 load $addr)
15154
15155 if (ConstEltNo) {
15156 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
15157
15158 LoadSDNode *LN0 = nullptr;
15159 const ShuffleVectorSDNode *SVN = nullptr;
15160 if (ISD::isNormalLoad(InVec.getNode())) {
15161 LN0 = cast<LoadSDNode>(InVec);
15162 } else if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR &&
15163 InVec.getOperand(0).getValueType() == ExtVT &&
15164 ISD::isNormalLoad(InVec.getOperand(0).getNode())) {
15165 // Don't duplicate a load with other uses.
15166 if (!InVec.hasOneUse())
15167 return SDValue();
15168
15169 LN0 = cast<LoadSDNode>(InVec.getOperand(0));
15170 } else if ((SVN = dyn_cast<ShuffleVectorSDNode>(InVec))) {
15171 // (vextract (vector_shuffle (load $addr), v2, <1, u, u, u>), 1)
15172 // =>
15173 // (load $addr+1*size)
15174
15175 // Don't duplicate a load with other uses.
15176 if (!InVec.hasOneUse())
15177 return SDValue();
15178
15179 // If the bit convert changed the number of elements, it is unsafe
15180 // to examine the mask.
15181 if (BCNumEltsChanged)
15182 return SDValue();
15183
15184 // Select the input vector, guarding against out of range extract vector.
15185 unsigned NumElems = VT.getVectorNumElements();
15186 int Idx = (Elt > (int)NumElems) ? -1 : SVN->getMaskElt(Elt);
15187 InVec = (Idx < (int)NumElems) ? InVec.getOperand(0) : InVec.getOperand(1);
15188
15189 if (InVec.getOpcode() == ISD::BITCAST) {
15190 // Don't duplicate a load with other uses.
15191 if (!InVec.hasOneUse())
15192 return SDValue();
15193
15194 InVec = InVec.getOperand(0);
15195 }
15196 if (ISD::isNormalLoad(InVec.getNode())) {
15197 LN0 = cast<LoadSDNode>(InVec);
15198 Elt = (Idx < (int)NumElems) ? Idx : Idx - (int)NumElems;
15199 EltNo = DAG.getConstant(Elt, SDLoc(EltNo), EltNo.getValueType());
15200 }
15201 }
15202
15203 // Make sure we found a non-volatile load and the extractelement is
15204 // the only use.
15205 if (!LN0 || !LN0->hasNUsesOfValue(1,0) || LN0->isVolatile())
15206 return SDValue();
15207
15208 // If Idx was -1 above, Elt is going to be -1, so just return undef.
15209 if (Elt == -1)
15210 return DAG.getUNDEF(LVT);
15211
15212 return ReplaceExtractVectorEltOfLoadWithNarrowedLoad(N, VT, EltNo, LN0);
15213 }
15214
15215 return SDValue();
15216}
15217
15218// Simplify (build_vec (ext )) to (bitcast (build_vec ))
15219SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) {
15220 // We perform this optimization post type-legalization because
15221 // the type-legalizer often scalarizes integer-promoted vectors.
15222 // Performing this optimization before may create bit-casts which
15223 // will be type-legalized to complex code sequences.
15224 // We perform this optimization only before the operation legalizer because we
15225 // may introduce illegal operations.
15226 if (Level != AfterLegalizeVectorOps && Level != AfterLegalizeTypes)
15227 return SDValue();
15228
15229 unsigned NumInScalars = N->getNumOperands();
15230 SDLoc DL(N);
15231 EVT VT = N->getValueType(0);
15232
15233 // Check to see if this is a BUILD_VECTOR of a bunch of values
15234 // which come from any_extend or zero_extend nodes. If so, we can create
15235 // a new BUILD_VECTOR using bit-casts which may enable other BUILD_VECTOR
15236 // optimizations. We do not handle sign-extend because we can't fill the sign
15237 // using shuffles.
15238 EVT SourceType = MVT::Other;
15239 bool AllAnyExt = true;
15240
15241 for (unsigned i = 0; i != NumInScalars; ++i) {
15242 SDValue In = N->getOperand(i);
15243 // Ignore undef inputs.
15244 if (In.isUndef()) continue;
15245
15246 bool AnyExt = In.getOpcode() == ISD::ANY_EXTEND;
15247 bool ZeroExt = In.getOpcode() == ISD::ZERO_EXTEND;
15248
15249 // Abort if the element is not an extension.
15250 if (!ZeroExt && !AnyExt) {
15251 SourceType = MVT::Other;
15252 break;
15253 }
15254
15255 // The input is a ZeroExt or AnyExt. Check the original type.
15256 EVT InTy = In.getOperand(0).getValueType();
15257
15258 // Check that all of the widened source types are the same.
15259 if (SourceType == MVT::Other)
15260 // First time.
15261 SourceType = InTy;
15262 else if (InTy != SourceType) {
15263 // Multiple income types. Abort.
15264 SourceType = MVT::Other;
15265 break;
15266 }
15267
15268 // Check if all of the extends are ANY_EXTENDs.
15269 AllAnyExt &= AnyExt;
15270 }
15271
15272 // In order to have valid types, all of the inputs must be extended from the
15273 // same source type and all of the inputs must be any or zero extend.
15274 // Scalar sizes must be a power of two.
15275 EVT OutScalarTy = VT.getScalarType();
15276 bool ValidTypes = SourceType != MVT::Other &&
15277 isPowerOf2_32(OutScalarTy.getSizeInBits()) &&
15278 isPowerOf2_32(SourceType.getSizeInBits());
15279
15280 // Create a new simpler BUILD_VECTOR sequence which other optimizations can
15281 // turn into a single shuffle instruction.
15282 if (!ValidTypes)
15283 return SDValue();
15284
15285 bool isLE = DAG.getDataLayout().isLittleEndian();
15286 unsigned ElemRatio = OutScalarTy.getSizeInBits()/SourceType.getSizeInBits();
15287 assert(ElemRatio > 1 && "Invalid element size ratio")(static_cast <bool> (ElemRatio > 1 && "Invalid element size ratio"
) ? void (0) : __assert_fail ("ElemRatio > 1 && \"Invalid element size ratio\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15287, __extension__ __PRETTY_FUNCTION__))
;
15288 SDValue Filler = AllAnyExt ? DAG.getUNDEF(SourceType):
15289 DAG.getConstant(0, DL, SourceType);
15290
15291 unsigned NewBVElems = ElemRatio * VT.getVectorNumElements();
15292 SmallVector<SDValue, 8> Ops(NewBVElems, Filler);
15293
15294 // Populate the new build_vector
15295 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
15296 SDValue Cast = N->getOperand(i);
15297 assert((Cast.getOpcode() == ISD::ANY_EXTEND ||(static_cast <bool> ((Cast.getOpcode() == ISD::ANY_EXTEND
|| Cast.getOpcode() == ISD::ZERO_EXTEND || Cast.isUndef()) &&
"Invalid cast opcode") ? void (0) : __assert_fail ("(Cast.getOpcode() == ISD::ANY_EXTEND || Cast.getOpcode() == ISD::ZERO_EXTEND || Cast.isUndef()) && \"Invalid cast opcode\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15299, __extension__ __PRETTY_FUNCTION__))
15298 Cast.getOpcode() == ISD::ZERO_EXTEND ||(static_cast <bool> ((Cast.getOpcode() == ISD::ANY_EXTEND
|| Cast.getOpcode() == ISD::ZERO_EXTEND || Cast.isUndef()) &&
"Invalid cast opcode") ? void (0) : __assert_fail ("(Cast.getOpcode() == ISD::ANY_EXTEND || Cast.getOpcode() == ISD::ZERO_EXTEND || Cast.isUndef()) && \"Invalid cast opcode\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15299, __extension__ __PRETTY_FUNCTION__))
15299 Cast.isUndef()) && "Invalid cast opcode")(static_cast <bool> ((Cast.getOpcode() == ISD::ANY_EXTEND
|| Cast.getOpcode() == ISD::ZERO_EXTEND || Cast.isUndef()) &&
"Invalid cast opcode") ? void (0) : __assert_fail ("(Cast.getOpcode() == ISD::ANY_EXTEND || Cast.getOpcode() == ISD::ZERO_EXTEND || Cast.isUndef()) && \"Invalid cast opcode\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15299, __extension__ __PRETTY_FUNCTION__))
;
15300 SDValue In;
15301 if (Cast.isUndef())
15302 In = DAG.getUNDEF(SourceType);
15303 else
15304 In = Cast->getOperand(0);
15305 unsigned Index = isLE ? (i * ElemRatio) :
15306 (i * ElemRatio + (ElemRatio - 1));
15307
15308 assert(Index < Ops.size() && "Invalid index")(static_cast <bool> (Index < Ops.size() && "Invalid index"
) ? void (0) : __assert_fail ("Index < Ops.size() && \"Invalid index\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15308, __extension__ __PRETTY_FUNCTION__))
;
15309 Ops[Index] = In;
15310 }
15311
15312 // The type of the new BUILD_VECTOR node.
15313 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SourceType, NewBVElems);
15314 assert(VecVT.getSizeInBits() == VT.getSizeInBits() &&(static_cast <bool> (VecVT.getSizeInBits() == VT.getSizeInBits
() && "Invalid vector size") ? void (0) : __assert_fail
("VecVT.getSizeInBits() == VT.getSizeInBits() && \"Invalid vector size\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15315, __extension__ __PRETTY_FUNCTION__))
15315 "Invalid vector size")(static_cast <bool> (VecVT.getSizeInBits() == VT.getSizeInBits
() && "Invalid vector size") ? void (0) : __assert_fail
("VecVT.getSizeInBits() == VT.getSizeInBits() && \"Invalid vector size\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15315, __extension__ __PRETTY_FUNCTION__))
;
15316 // Check if the new vector type is legal.
15317 if (!isTypeLegal(VecVT) ||
15318 (!TLI.isOperationLegal(ISD::BUILD_VECTOR, VecVT) &&
15319 TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)))
15320 return SDValue();
15321
15322 // Make the new BUILD_VECTOR.
15323 SDValue BV = DAG.getBuildVector(VecVT, DL, Ops);
15324
15325 // The new BUILD_VECTOR node has the potential to be further optimized.
15326 AddToWorklist(BV.getNode());
15327 // Bitcast to the desired type.
15328 return DAG.getBitcast(VT, BV);
15329}
15330
15331SDValue DAGCombiner::reduceBuildVecConvertToConvertBuildVec(SDNode *N) {
15332 EVT VT = N->getValueType(0);
15333
15334 unsigned NumInScalars = N->getNumOperands();
15335 SDLoc DL(N);
15336
15337 EVT SrcVT = MVT::Other;
15338 unsigned Opcode = ISD::DELETED_NODE;
15339 unsigned NumDefs = 0;
15340
15341 for (unsigned i = 0; i != NumInScalars; ++i) {
15342 SDValue In = N->getOperand(i);
15343 unsigned Opc = In.getOpcode();
15344
15345 if (Opc == ISD::UNDEF)
15346 continue;
15347
15348 // If all scalar values are floats and converted from integers.
15349 if (Opcode == ISD::DELETED_NODE &&
15350 (Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP)) {
15351 Opcode = Opc;
15352 }
15353
15354 if (Opc != Opcode)
15355 return SDValue();
15356
15357 EVT InVT = In.getOperand(0).getValueType();
15358
15359 // If all scalar values are typed differently, bail out. It's chosen to
15360 // simplify BUILD_VECTOR of integer types.
15361 if (SrcVT == MVT::Other)
15362 SrcVT = InVT;
15363 if (SrcVT != InVT)
15364 return SDValue();
15365 NumDefs++;
15366 }
15367
15368 // If the vector has just one element defined, it's not worth to fold it into
15369 // a vectorized one.
15370 if (NumDefs < 2)
15371 return SDValue();
15372
15373 assert((Opcode == ISD::UINT_TO_FP || Opcode == ISD::SINT_TO_FP)(static_cast <bool> ((Opcode == ISD::UINT_TO_FP || Opcode
== ISD::SINT_TO_FP) && "Should only handle conversion from integer to float."
) ? void (0) : __assert_fail ("(Opcode == ISD::UINT_TO_FP || Opcode == ISD::SINT_TO_FP) && \"Should only handle conversion from integer to float.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15374, __extension__ __PRETTY_FUNCTION__))
15374 && "Should only handle conversion from integer to float.")(static_cast <bool> ((Opcode == ISD::UINT_TO_FP || Opcode
== ISD::SINT_TO_FP) && "Should only handle conversion from integer to float."
) ? void (0) : __assert_fail ("(Opcode == ISD::UINT_TO_FP || Opcode == ISD::SINT_TO_FP) && \"Should only handle conversion from integer to float.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15374, __extension__ __PRETTY_FUNCTION__))
;
15375 assert(SrcVT != MVT::Other && "Cannot determine source type!")(static_cast <bool> (SrcVT != MVT::Other && "Cannot determine source type!"
) ? void (0) : __assert_fail ("SrcVT != MVT::Other && \"Cannot determine source type!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15375, __extension__ __PRETTY_FUNCTION__))
;
15376
15377 EVT NVT = EVT::getVectorVT(*DAG.getContext(), SrcVT, NumInScalars);
15378
15379 if (!TLI.isOperationLegalOrCustom(Opcode, NVT))
15380 return SDValue();
15381
15382 // Just because the floating-point vector type is legal does not necessarily
15383 // mean that the corresponding integer vector type is.
15384 if (!isTypeLegal(NVT))
15385 return SDValue();
15386
15387 SmallVector<SDValue, 8> Opnds;
15388 for (unsigned i = 0; i != NumInScalars; ++i) {
15389 SDValue In = N->getOperand(i);
15390
15391 if (In.isUndef())
15392 Opnds.push_back(DAG.getUNDEF(SrcVT));
15393 else
15394 Opnds.push_back(In.getOperand(0));
15395 }
15396 SDValue BV = DAG.getBuildVector(NVT, DL, Opnds);
15397 AddToWorklist(BV.getNode());
15398
15399 return DAG.getNode(Opcode, DL, VT, BV);
15400}
15401
15402SDValue DAGCombiner::createBuildVecShuffle(const SDLoc &DL, SDNode *N,
15403 ArrayRef<int> VectorMask,
15404 SDValue VecIn1, SDValue VecIn2,
15405 unsigned LeftIdx) {
15406 MVT IdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
15407 SDValue ZeroIdx = DAG.getConstant(0, DL, IdxTy);
15408
15409 EVT VT = N->getValueType(0);
15410 EVT InVT1 = VecIn1.getValueType();
15411 EVT InVT2 = VecIn2.getNode() ? VecIn2.getValueType() : InVT1;
15412
15413 unsigned Vec2Offset = 0;
15414 unsigned NumElems = VT.getVectorNumElements();
15415 unsigned ShuffleNumElems = NumElems;
15416
15417 // In case both the input vectors are extracted from same base
15418 // vector we do not need extra addend (Vec2Offset) while
15419 // computing shuffle mask.
15420 if (!VecIn2 || !(VecIn1.getOpcode() == ISD::EXTRACT_SUBVECTOR) ||
15421 !(VecIn2.getOpcode() == ISD::EXTRACT_SUBVECTOR) ||
15422 !(VecIn1.getOperand(0) == VecIn2.getOperand(0)))
15423 Vec2Offset = InVT1.getVectorNumElements();
15424
15425 // We can't generate a shuffle node with mismatched input and output types.
15426 // Try to make the types match the type of the output.
15427 if (InVT1 != VT || InVT2 != VT) {
15428 if ((VT.getSizeInBits() % InVT1.getSizeInBits() == 0) && InVT1 == InVT2) {
15429 // If the output vector length is a multiple of both input lengths,
15430 // we can concatenate them and pad the rest with undefs.
15431 unsigned NumConcats = VT.getSizeInBits() / InVT1.getSizeInBits();
15432 assert(NumConcats >= 2 && "Concat needs at least two inputs!")(static_cast <bool> (NumConcats >= 2 && "Concat needs at least two inputs!"
) ? void (0) : __assert_fail ("NumConcats >= 2 && \"Concat needs at least two inputs!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15432, __extension__ __PRETTY_FUNCTION__))
;
15433 SmallVector<SDValue, 2> ConcatOps(NumConcats, DAG.getUNDEF(InVT1));
15434 ConcatOps[0] = VecIn1;
15435 ConcatOps[1] = VecIn2 ? VecIn2 : DAG.getUNDEF(InVT1);
15436 VecIn1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps);
15437 VecIn2 = SDValue();
15438 } else if (InVT1.getSizeInBits() == VT.getSizeInBits() * 2) {
15439 if (!TLI.isExtractSubvectorCheap(VT, InVT1, NumElems))
15440 return SDValue();
15441
15442 if (!VecIn2.getNode()) {
15443 // If we only have one input vector, and it's twice the size of the
15444 // output, split it in two.
15445 VecIn2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, VecIn1,
15446 DAG.getConstant(NumElems, DL, IdxTy));
15447 VecIn1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, VecIn1, ZeroIdx);
15448 // Since we now have shorter input vectors, adjust the offset of the
15449 // second vector's start.
15450 Vec2Offset = NumElems;
15451 } else if (InVT2.getSizeInBits() <= InVT1.getSizeInBits()) {
15452 // VecIn1 is wider than the output, and we have another, possibly
15453 // smaller input. Pad the smaller input with undefs, shuffle at the
15454 // input vector width, and extract the output.
15455 // The shuffle type is different than VT, so check legality again.
15456 if (LegalOperations &&
15457 !TLI.isOperationLegal(ISD::VECTOR_SHUFFLE, InVT1))
15458 return SDValue();
15459
15460 // Legalizing INSERT_SUBVECTOR is tricky - you basically have to
15461 // lower it back into a BUILD_VECTOR. So if the inserted type is
15462 // illegal, don't even try.
15463 if (InVT1 != InVT2) {
15464 if (!TLI.isTypeLegal(InVT2))
15465 return SDValue();
15466 VecIn2 = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT1,
15467 DAG.getUNDEF(InVT1), VecIn2, ZeroIdx);
15468 }
15469 ShuffleNumElems = NumElems * 2;
15470 } else {
15471 // Both VecIn1 and VecIn2 are wider than the output, and VecIn2 is wider
15472 // than VecIn1. We can't handle this for now - this case will disappear
15473 // when we start sorting the vectors by type.
15474 return SDValue();
15475 }
15476 } else if (InVT2.getSizeInBits() * 2 == VT.getSizeInBits() &&
15477 InVT1.getSizeInBits() == VT.getSizeInBits()) {
15478 SmallVector<SDValue, 2> ConcatOps(2, DAG.getUNDEF(InVT2));
15479 ConcatOps[0] = VecIn2;
15480 VecIn2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps);
15481 } else {
15482 // TODO: Support cases where the length mismatch isn't exactly by a
15483 // factor of 2.
15484 // TODO: Move this check upwards, so that if we have bad type
15485 // mismatches, we don't create any DAG nodes.
15486 return SDValue();
15487 }
15488 }
15489
15490 // Initialize mask to undef.
15491 SmallVector<int, 8> Mask(ShuffleNumElems, -1);
15492
15493 // Only need to run up to the number of elements actually used, not the
15494 // total number of elements in the shuffle - if we are shuffling a wider
15495 // vector, the high lanes should be set to undef.
15496 for (unsigned i = 0; i != NumElems; ++i) {
15497 if (VectorMask[i] <= 0)
15498 continue;
15499
15500 unsigned ExtIndex = N->getOperand(i).getConstantOperandVal(1);
15501 if (VectorMask[i] == (int)LeftIdx) {
15502 Mask[i] = ExtIndex;
15503 } else if (VectorMask[i] == (int)LeftIdx + 1) {
15504 Mask[i] = Vec2Offset + ExtIndex;
15505 }
15506 }
15507
15508 // The type the input vectors may have changed above.
15509 InVT1 = VecIn1.getValueType();
15510
15511 // If we already have a VecIn2, it should have the same type as VecIn1.
15512 // If we don't, get an undef/zero vector of the appropriate type.
15513 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(InVT1);
15514 assert(InVT1 == VecIn2.getValueType() && "Unexpected second input type.")(static_cast <bool> (InVT1 == VecIn2.getValueType() &&
"Unexpected second input type.") ? void (0) : __assert_fail (
"InVT1 == VecIn2.getValueType() && \"Unexpected second input type.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15514, __extension__ __PRETTY_FUNCTION__))
;
15515
15516 SDValue Shuffle = DAG.getVectorShuffle(InVT1, DL, VecIn1, VecIn2, Mask);
15517 if (ShuffleNumElems > NumElems)
15518 Shuffle = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuffle, ZeroIdx);
15519
15520 return Shuffle;
15521}
15522
15523// Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT
15524// operations. If the types of the vectors we're extracting from allow it,
15525// turn this into a vector_shuffle node.
15526SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) {
15527 SDLoc DL(N);
15528 EVT VT = N->getValueType(0);
15529
15530 // Only type-legal BUILD_VECTOR nodes are converted to shuffle nodes.
15531 if (!isTypeLegal(VT))
15532 return SDValue();
15533
15534 // May only combine to shuffle after legalize if shuffle is legal.
15535 if (LegalOperations && !TLI.isOperationLegal(ISD::VECTOR_SHUFFLE, VT))
15536 return SDValue();
15537
15538 bool UsesZeroVector = false;
15539 unsigned NumElems = N->getNumOperands();
15540
15541 // Record, for each element of the newly built vector, which input vector
15542 // that element comes from. -1 stands for undef, 0 for the zero vector,
15543 // and positive values for the input vectors.
15544 // VectorMask maps each element to its vector number, and VecIn maps vector
15545 // numbers to their initial SDValues.
15546
15547 SmallVector<int, 8> VectorMask(NumElems, -1);
15548 SmallVector<SDValue, 8> VecIn;
15549 VecIn.push_back(SDValue());
15550
15551 for (unsigned i = 0; i != NumElems; ++i) {
15552 SDValue Op = N->getOperand(i);
15553
15554 if (Op.isUndef())
15555 continue;
15556
15557 // See if we can use a blend with a zero vector.
15558 // TODO: Should we generalize this to a blend with an arbitrary constant
15559 // vector?
15560 if (isNullConstant(Op) || isNullFPConstant(Op)) {
15561 UsesZeroVector = true;
15562 VectorMask[i] = 0;
15563 continue;
15564 }
15565
15566 // Not an undef or zero. If the input is something other than an
15567 // EXTRACT_VECTOR_ELT with an in-range constant index, bail out.
15568 if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
15569 !isa<ConstantSDNode>(Op.getOperand(1)))
15570 return SDValue();
15571 SDValue ExtractedFromVec = Op.getOperand(0);
15572
15573 APInt ExtractIdx = cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue();
15574 if (ExtractIdx.uge(ExtractedFromVec.getValueType().getVectorNumElements()))
15575 return SDValue();
15576
15577 // All inputs must have the same element type as the output.
15578 if (VT.getVectorElementType() !=
15579 ExtractedFromVec.getValueType().getVectorElementType())
15580 return SDValue();
15581
15582 // Have we seen this input vector before?
15583 // The vectors are expected to be tiny (usually 1 or 2 elements), so using
15584 // a map back from SDValues to numbers isn't worth it.
15585 unsigned Idx = std::distance(
15586 VecIn.begin(), std::find(VecIn.begin(), VecIn.end(), ExtractedFromVec));
15587 if (Idx == VecIn.size())
15588 VecIn.push_back(ExtractedFromVec);
15589
15590 VectorMask[i] = Idx;
15591 }
15592
15593 // If we didn't find at least one input vector, bail out.
15594 if (VecIn.size() < 2)
15595 return SDValue();
15596
15597 // If all the Operands of BUILD_VECTOR extract from same
15598 // vector, then split the vector efficiently based on the maximum
15599 // vector access index and adjust the VectorMask and
15600 // VecIn accordingly.
15601 if (VecIn.size() == 2) {
15602 unsigned MaxIndex = 0;
15603 unsigned NearestPow2 = 0;
15604 SDValue Vec = VecIn.back();
15605 EVT InVT = Vec.getValueType();
15606 MVT IdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
15607 SmallVector<unsigned, 8> IndexVec(NumElems, 0);
15608
15609 for (unsigned i = 0; i < NumElems; i++) {
15610 if (VectorMask[i] <= 0)
15611 continue;
15612 unsigned Index = N->getOperand(i).getConstantOperandVal(1);
15613 IndexVec[i] = Index;
15614 MaxIndex = std::max(MaxIndex, Index);
15615 }
15616
15617 NearestPow2 = PowerOf2Ceil(MaxIndex);
15618 if (InVT.isSimple() && NearestPow2 > 2 && MaxIndex < NearestPow2 &&
15619 NumElems * 2 < NearestPow2) {
15620 unsigned SplitSize = NearestPow2 / 2;
15621 EVT SplitVT = EVT::getVectorVT(*DAG.getContext(),
15622 InVT.getVectorElementType(), SplitSize);
15623 if (TLI.isTypeLegal(SplitVT)) {
15624 SDValue VecIn2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, Vec,
15625 DAG.getConstant(SplitSize, DL, IdxTy));
15626 SDValue VecIn1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, Vec,
15627 DAG.getConstant(0, DL, IdxTy));
15628 VecIn.pop_back();
15629 VecIn.push_back(VecIn1);
15630 VecIn.push_back(VecIn2);
15631
15632 for (unsigned i = 0; i < NumElems; i++) {
15633 if (VectorMask[i] <= 0)
15634 continue;
15635 VectorMask[i] = (IndexVec[i] < SplitSize) ? 1 : 2;
15636 }
15637 }
15638 }
15639 }
15640
15641 // TODO: We want to sort the vectors by descending length, so that adjacent
15642 // pairs have similar length, and the longer vector is always first in the
15643 // pair.
15644
15645 // TODO: Should this fire if some of the input vectors has illegal type (like
15646 // it does now), or should we let legalization run its course first?
15647
15648 // Shuffle phase:
15649 // Take pairs of vectors, and shuffle them so that the result has elements
15650 // from these vectors in the correct places.
15651 // For example, given:
15652 // t10: i32 = extract_vector_elt t1, Constant:i64<0>
15653 // t11: i32 = extract_vector_elt t2, Constant:i64<0>
15654 // t12: i32 = extract_vector_elt t3, Constant:i64<0>
15655 // t13: i32 = extract_vector_elt t1, Constant:i64<1>
15656 // t14: v4i32 = BUILD_VECTOR t10, t11, t12, t13
15657 // We will generate:
15658 // t20: v4i32 = vector_shuffle<0,4,u,1> t1, t2
15659 // t21: v4i32 = vector_shuffle<u,u,0,u> t3, undef
15660 SmallVector<SDValue, 4> Shuffles;
15661 for (unsigned In = 0, Len = (VecIn.size() / 2); In < Len; ++In) {
15662 unsigned LeftIdx = 2 * In + 1;
15663 SDValue VecLeft = VecIn[LeftIdx];
15664 SDValue VecRight =
15665 (LeftIdx + 1) < VecIn.size() ? VecIn[LeftIdx + 1] : SDValue();
15666
15667 if (SDValue Shuffle = createBuildVecShuffle(DL, N, VectorMask, VecLeft,
15668 VecRight, LeftIdx))
15669 Shuffles.push_back(Shuffle);
15670 else
15671 return SDValue();
15672 }
15673
15674 // If we need the zero vector as an "ingredient" in the blend tree, add it
15675 // to the list of shuffles.
15676 if (UsesZeroVector)
15677 Shuffles.push_back(VT.isInteger() ? DAG.getConstant(0, DL, VT)
15678 : DAG.getConstantFP(0.0, DL, VT));
15679
15680 // If we only have one shuffle, we're done.
15681 if (Shuffles.size() == 1)
15682 return Shuffles[0];
15683
15684 // Update the vector mask to point to the post-shuffle vectors.
15685 for (int &Vec : VectorMask)
15686 if (Vec == 0)
15687 Vec = Shuffles.size() - 1;
15688 else
15689 Vec = (Vec - 1) / 2;
15690
15691 // More than one shuffle. Generate a binary tree of blends, e.g. if from
15692 // the previous step we got the set of shuffles t10, t11, t12, t13, we will
15693 // generate:
15694 // t10: v8i32 = vector_shuffle<0,8,u,u,u,u,u,u> t1, t2
15695 // t11: v8i32 = vector_shuffle<u,u,0,8,u,u,u,u> t3, t4
15696 // t12: v8i32 = vector_shuffle<u,u,u,u,0,8,u,u> t5, t6
15697 // t13: v8i32 = vector_shuffle<u,u,u,u,u,u,0,8> t7, t8
15698 // t20: v8i32 = vector_shuffle<0,1,10,11,u,u,u,u> t10, t11
15699 // t21: v8i32 = vector_shuffle<u,u,u,u,4,5,14,15> t12, t13
15700 // t30: v8i32 = vector_shuffle<0,1,2,3,12,13,14,15> t20, t21
15701
15702 // Make sure the initial size of the shuffle list is even.
15703 if (Shuffles.size() % 2)
15704 Shuffles.push_back(DAG.getUNDEF(VT));
15705
15706 for (unsigned CurSize = Shuffles.size(); CurSize > 1; CurSize /= 2) {
15707 if (CurSize % 2) {
15708 Shuffles[CurSize] = DAG.getUNDEF(VT);
15709 CurSize++;
15710 }
15711 for (unsigned In = 0, Len = CurSize / 2; In < Len; ++In) {
15712 int Left = 2 * In;
15713 int Right = 2 * In + 1;
15714 SmallVector<int, 8> Mask(NumElems, -1);
15715 for (unsigned i = 0; i != NumElems; ++i) {
15716 if (VectorMask[i] == Left) {
15717 Mask[i] = i;
15718 VectorMask[i] = In;
15719 } else if (VectorMask[i] == Right) {
15720 Mask[i] = i + NumElems;
15721 VectorMask[i] = In;
15722 }
15723 }
15724
15725 Shuffles[In] =
15726 DAG.getVectorShuffle(VT, DL, Shuffles[Left], Shuffles[Right], Mask);
15727 }
15728 }
15729 return Shuffles[0];
15730}
15731
15732// Try to turn a build vector of zero extends of extract vector elts into a
15733// a vector zero extend and possibly an extract subvector.
15734// TODO: Support sign extend or any extend?
15735// TODO: Allow undef elements?
15736// TODO: Don't require the extracts to start at element 0.
15737SDValue DAGCombiner::convertBuildVecZextToZext(SDNode *N) {
15738 if (LegalOperations)
15739 return SDValue();
15740
15741 EVT VT = N->getValueType(0);
15742
15743 SDValue Op0 = N->getOperand(0);
15744 auto checkElem = [&](SDValue Op) -> int64_t {
15745 if (Op.getOpcode() == ISD::ZERO_EXTEND &&
15746 Op.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
15747 Op0.getOperand(0).getOperand(0) == Op.getOperand(0).getOperand(0))
15748 if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(0).getOperand(1)))
15749 return C->getZExtValue();
15750 return -1;
15751 };
15752
15753 // Make sure the first element matches
15754 // (zext (extract_vector_elt X, C))
15755 int64_t Offset = checkElem(Op0);
15756 if (Offset < 0)
15757 return SDValue();
15758
15759 unsigned NumElems = N->getNumOperands();
15760 SDValue In = Op0.getOperand(0).getOperand(0);
15761 EVT InSVT = In.getValueType().getScalarType();
15762 EVT InVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumElems);
15763
15764 // Don't create an illegal input type after type legalization.
15765 if (LegalTypes && !TLI.isTypeLegal(InVT))
15766 return SDValue();
15767
15768 // Ensure all the elements come from the same vector and are adjacent.
15769 for (unsigned i = 1; i != NumElems; ++i) {
15770 if ((Offset + i) != checkElem(N->getOperand(i)))
15771 return SDValue();
15772 }
15773
15774 SDLoc DL(N);
15775 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InVT, In,
15776 Op0.getOperand(0).getOperand(1));
15777 return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, In);
15778}
15779
15780SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
15781 EVT VT = N->getValueType(0);
15782
15783 // A vector built entirely of undefs is undef.
15784 if (ISD::allOperandsUndef(N))
15785 return DAG.getUNDEF(VT);
15786
15787 // If this is a splat of a bitcast from another vector, change to a
15788 // concat_vector.
15789 // For example:
15790 // (build_vector (i64 (bitcast (v2i32 X))), (i64 (bitcast (v2i32 X)))) ->
15791 // (v2i64 (bitcast (concat_vectors (v2i32 X), (v2i32 X))))
15792 //
15793 // If X is a build_vector itself, the concat can become a larger build_vector.
15794 // TODO: Maybe this is useful for non-splat too?
15795 if (!LegalOperations) {
15796 if (SDValue Splat = cast<BuildVectorSDNode>(N)->getSplatValue()) {
15797 Splat = peekThroughBitcast(Splat);
15798 EVT SrcVT = Splat.getValueType();
15799 if (SrcVT.isVector()) {
15800 unsigned NumElts = N->getNumOperands() * SrcVT.getVectorNumElements();
15801 EVT NewVT = EVT::getVectorVT(*DAG.getContext(),
15802 SrcVT.getVectorElementType(), NumElts);
15803 if (!LegalTypes || TLI.isTypeLegal(NewVT)) {
15804 SmallVector<SDValue, 8> Ops(N->getNumOperands(), Splat);
15805 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N),
15806 NewVT, Ops);
15807 return DAG.getBitcast(VT, Concat);
15808 }
15809 }
15810 }
15811 }
15812
15813 // Check if we can express BUILD VECTOR via subvector extract.
15814 if (!LegalTypes && (N->getNumOperands() > 1)) {
15815 SDValue Op0 = N->getOperand(0);
15816 auto checkElem = [&](SDValue Op) -> uint64_t {
15817 if ((Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT) &&
15818 (Op0.getOperand(0) == Op.getOperand(0)))
15819 if (auto CNode = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
15820 return CNode->getZExtValue();
15821 return -1;
15822 };
15823
15824 int Offset = checkElem(Op0);
15825 for (unsigned i = 0; i < N->getNumOperands(); ++i) {
15826 if (Offset + i != checkElem(N->getOperand(i))) {
15827 Offset = -1;
15828 break;
15829 }
15830 }
15831
15832 if ((Offset == 0) &&
15833 (Op0.getOperand(0).getValueType() == N->getValueType(0)))
15834 return Op0.getOperand(0);
15835 if ((Offset != -1) &&
15836 ((Offset % N->getValueType(0).getVectorNumElements()) ==
15837 0)) // IDX must be multiple of output size.
15838 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), N->getValueType(0),
15839 Op0.getOperand(0), Op0.getOperand(1));
15840 }
15841
15842 if (SDValue V = convertBuildVecZextToZext(N))
15843 return V;
15844
15845 if (SDValue V = reduceBuildVecExtToExtBuildVec(N))
15846 return V;
15847
15848 if (SDValue V = reduceBuildVecConvertToConvertBuildVec(N))
15849 return V;
15850
15851 if (SDValue V = reduceBuildVecToShuffle(N))
15852 return V;
15853
15854 return SDValue();
15855}
15856
15857static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) {
15858 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15859 EVT OpVT = N->getOperand(0).getValueType();
15860
15861 // If the operands are legal vectors, leave them alone.
15862 if (TLI.isTypeLegal(OpVT))
15863 return SDValue();
15864
15865 SDLoc DL(N);
15866 EVT VT = N->getValueType(0);
15867 SmallVector<SDValue, 8> Ops;
15868
15869 EVT SVT = EVT::getIntegerVT(*DAG.getContext(), OpVT.getSizeInBits());
15870 SDValue ScalarUndef = DAG.getNode(ISD::UNDEF, DL, SVT);
15871
15872 // Keep track of what we encounter.
15873 bool AnyInteger = false;
15874 bool AnyFP = false;
15875 for (const SDValue &Op : N->ops()) {
15876 if (ISD::BITCAST == Op.getOpcode() &&
15877 !Op.getOperand(0).getValueType().isVector())
15878 Ops.push_back(Op.getOperand(0));
15879 else if (ISD::UNDEF == Op.getOpcode())
15880 Ops.push_back(ScalarUndef);
15881 else
15882 return SDValue();
15883
15884 // Note whether we encounter an integer or floating point scalar.
15885 // If it's neither, bail out, it could be something weird like x86mmx.
15886 EVT LastOpVT = Ops.back().getValueType();
15887 if (LastOpVT.isFloatingPoint())
15888 AnyFP = true;
15889 else if (LastOpVT.isInteger())
15890 AnyInteger = true;
15891 else
15892 return SDValue();
15893 }
15894
15895 // If any of the operands is a floating point scalar bitcast to a vector,
15896 // use floating point types throughout, and bitcast everything.
15897 // Replace UNDEFs by another scalar UNDEF node, of the final desired type.
15898 if (AnyFP) {
15899 SVT = EVT::getFloatingPointVT(OpVT.getSizeInBits());
15900 ScalarUndef = DAG.getNode(ISD::UNDEF, DL, SVT);
15901 if (AnyInteger) {
15902 for (SDValue &Op : Ops) {
15903 if (Op.getValueType() == SVT)
15904 continue;
15905 if (Op.isUndef())
15906 Op = ScalarUndef;
15907 else
15908 Op = DAG.getBitcast(SVT, Op);
15909 }
15910 }
15911 }
15912
15913 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SVT,
15914 VT.getSizeInBits() / SVT.getSizeInBits());
15915 return DAG.getBitcast(VT, DAG.getBuildVector(VecVT, DL, Ops));
15916}
15917
15918// Check to see if this is a CONCAT_VECTORS of a bunch of EXTRACT_SUBVECTOR
15919// operations. If so, and if the EXTRACT_SUBVECTOR vector inputs come from at
15920// most two distinct vectors the same size as the result, attempt to turn this
15921// into a legal shuffle.
15922static SDValue combineConcatVectorOfExtracts(SDNode *N, SelectionDAG &DAG) {
15923 EVT VT = N->getValueType(0);
15924 EVT OpVT = N->getOperand(0).getValueType();
15925 int NumElts = VT.getVectorNumElements();
15926 int NumOpElts = OpVT.getVectorNumElements();
15927
15928 SDValue SV0 = DAG.getUNDEF(VT), SV1 = DAG.getUNDEF(VT);
15929 SmallVector<int, 8> Mask;
15930
15931 for (SDValue Op : N->ops()) {
15932 // Peek through any bitcast.
15933 Op = peekThroughBitcast(Op);
15934
15935 // UNDEF nodes convert to UNDEF shuffle mask values.
15936 if (Op.isUndef()) {
15937 Mask.append((unsigned)NumOpElts, -1);
15938 continue;
15939 }
15940
15941 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR)
15942 return SDValue();
15943
15944 // What vector are we extracting the subvector from and at what index?
15945 SDValue ExtVec = Op.getOperand(0);
15946
15947 // We want the EVT of the original extraction to correctly scale the
15948 // extraction index.
15949 EVT ExtVT = ExtVec.getValueType();
15950
15951 // Peek through any bitcast.
15952 ExtVec = peekThroughBitcast(ExtVec);
15953
15954 // UNDEF nodes convert to UNDEF shuffle mask values.
15955 if (ExtVec.isUndef()) {
15956 Mask.append((unsigned)NumOpElts, -1);
15957 continue;
15958 }
15959
15960 if (!isa<ConstantSDNode>(Op.getOperand(1)))
15961 return SDValue();
15962 int ExtIdx = Op.getConstantOperandVal(1);
15963
15964 // Ensure that we are extracting a subvector from a vector the same
15965 // size as the result.
15966 if (ExtVT.getSizeInBits() != VT.getSizeInBits())
15967 return SDValue();
15968
15969 // Scale the subvector index to account for any bitcast.
15970 int NumExtElts = ExtVT.getVectorNumElements();
15971 if (0 == (NumExtElts % NumElts))
15972 ExtIdx /= (NumExtElts / NumElts);
15973 else if (0 == (NumElts % NumExtElts))
15974 ExtIdx *= (NumElts / NumExtElts);
15975 else
15976 return SDValue();
15977
15978 // At most we can reference 2 inputs in the final shuffle.
15979 if (SV0.isUndef() || SV0 == ExtVec) {
15980 SV0 = ExtVec;
15981 for (int i = 0; i != NumOpElts; ++i)
15982 Mask.push_back(i + ExtIdx);
15983 } else if (SV1.isUndef() || SV1 == ExtVec) {
15984 SV1 = ExtVec;
15985 for (int i = 0; i != NumOpElts; ++i)
15986 Mask.push_back(i + ExtIdx + NumElts);
15987 } else {
15988 return SDValue();
15989 }
15990 }
15991
15992 if (!DAG.getTargetLoweringInfo().isShuffleMaskLegal(Mask, VT))
15993 return SDValue();
15994
15995 return DAG.getVectorShuffle(VT, SDLoc(N), DAG.getBitcast(VT, SV0),
15996 DAG.getBitcast(VT, SV1), Mask);
15997}
15998
15999SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
16000 // If we only have one input vector, we don't need to do any concatenation.
16001 if (N->getNumOperands() == 1)
16002 return N->getOperand(0);
16003
16004 // Check if all of the operands are undefs.
16005 EVT VT = N->getValueType(0);
16006 if (ISD::allOperandsUndef(N))
16007 return DAG.getUNDEF(VT);
16008
16009 // Optimize concat_vectors where all but the first of the vectors are undef.
16010 if (std::all_of(std::next(N->op_begin()), N->op_end(), [](const SDValue &Op) {
16011 return Op.isUndef();
16012 })) {
16013 SDValue In = N->getOperand(0);
16014 assert(In.getValueType().isVector() && "Must concat vectors")(static_cast <bool> (In.getValueType().isVector() &&
"Must concat vectors") ? void (0) : __assert_fail ("In.getValueType().isVector() && \"Must concat vectors\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16014, __extension__ __PRETTY_FUNCTION__))
;
16015
16016 // Transform: concat_vectors(scalar, undef) -> scalar_to_vector(sclr).
16017 if (In->getOpcode() == ISD::BITCAST &&
16018 !In->getOperand(0).getValueType().isVector()) {
16019 SDValue Scalar = In->getOperand(0);
16020
16021 // If the bitcast type isn't legal, it might be a trunc of a legal type;
16022 // look through the trunc so we can still do the transform:
16023 // concat_vectors(trunc(scalar), undef) -> scalar_to_vector(scalar)
16024 if (Scalar->getOpcode() == ISD::TRUNCATE &&
16025 !TLI.isTypeLegal(Scalar.getValueType()) &&
16026 TLI.isTypeLegal(Scalar->getOperand(0).getValueType()))
16027 Scalar = Scalar->getOperand(0);
16028
16029 EVT SclTy = Scalar->getValueType(0);
16030
16031 if (!SclTy.isFloatingPoint() && !SclTy.isInteger())
16032 return SDValue();
16033
16034 // Bail out if the vector size is not a multiple of the scalar size.
16035 if (VT.getSizeInBits() % SclTy.getSizeInBits())
16036 return SDValue();
16037
16038 unsigned VNTNumElms = VT.getSizeInBits() / SclTy.getSizeInBits();
16039 if (VNTNumElms < 2)
16040 return SDValue();
16041
16042 EVT NVT = EVT::getVectorVT(*DAG.getContext(), SclTy, VNTNumElms);
16043 if (!TLI.isTypeLegal(NVT) || !TLI.isTypeLegal(Scalar.getValueType()))
16044 return SDValue();
16045
16046 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), NVT, Scalar);
16047 return DAG.getBitcast(VT, Res);
16048 }
16049 }
16050
16051 // Fold any combination of BUILD_VECTOR or UNDEF nodes into one BUILD_VECTOR.
16052 // We have already tested above for an UNDEF only concatenation.
16053 // fold (concat_vectors (BUILD_VECTOR A, B, ...), (BUILD_VECTOR C, D, ...))
16054 // -> (BUILD_VECTOR A, B, ..., C, D, ...)
16055 auto IsBuildVectorOrUndef = [](const SDValue &Op) {
16056 return ISD::UNDEF == Op.getOpcode() || ISD::BUILD_VECTOR == Op.getOpcode();
16057 };
16058 if (llvm::all_of(N->ops(), IsBuildVectorOrUndef)) {
16059 SmallVector<SDValue, 8> Opnds;
16060 EVT SVT = VT.getScalarType();
16061
16062 EVT MinVT = SVT;
16063 if (!SVT.isFloatingPoint()) {
16064 // If BUILD_VECTOR are from built from integer, they may have different
16065 // operand types. Get the smallest type and truncate all operands to it.
16066 bool FoundMinVT = false;
16067 for (const SDValue &Op : N->ops())
16068 if (ISD::BUILD_VECTOR == Op.getOpcode()) {
16069 EVT OpSVT = Op.getOperand(0).getValueType();
16070 MinVT = (!FoundMinVT || OpSVT.bitsLE(MinVT)) ? OpSVT : MinVT;
16071 FoundMinVT = true;
16072 }
16073 assert(FoundMinVT && "Concat vector type mismatch")(static_cast <bool> (FoundMinVT && "Concat vector type mismatch"
) ? void (0) : __assert_fail ("FoundMinVT && \"Concat vector type mismatch\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16073, __extension__ __PRETTY_FUNCTION__))
;
16074 }
16075
16076 for (const SDValue &Op : N->ops()) {
16077 EVT OpVT = Op.getValueType();
16078 unsigned NumElts = OpVT.getVectorNumElements();
16079
16080 if (ISD::UNDEF == Op.getOpcode())
16081 Opnds.append(NumElts, DAG.getUNDEF(MinVT));
16082
16083 if (ISD::BUILD_VECTOR == Op.getOpcode()) {
16084 if (SVT.isFloatingPoint()) {
16085 assert(SVT == OpVT.getScalarType() && "Concat vector type mismatch")(static_cast <bool> (SVT == OpVT.getScalarType() &&
"Concat vector type mismatch") ? void (0) : __assert_fail ("SVT == OpVT.getScalarType() && \"Concat vector type mismatch\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16085, __extension__ __PRETTY_FUNCTION__))
;
16086 Opnds.append(Op->op_begin(), Op->op_begin() + NumElts);
16087 } else {
16088 for (unsigned i = 0; i != NumElts; ++i)
16089 Opnds.push_back(
16090 DAG.getNode(ISD::TRUNCATE, SDLoc(N), MinVT, Op.getOperand(i)));
16091 }
16092 }
16093 }
16094
16095 assert(VT.getVectorNumElements() == Opnds.size() &&(static_cast <bool> (VT.getVectorNumElements() == Opnds
.size() && "Concat vector type mismatch") ? void (0) :
__assert_fail ("VT.getVectorNumElements() == Opnds.size() && \"Concat vector type mismatch\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16096, __extension__ __PRETTY_FUNCTION__))
16096 "Concat vector type mismatch")(static_cast <bool> (VT.getVectorNumElements() == Opnds
.size() && "Concat vector type mismatch") ? void (0) :
__assert_fail ("VT.getVectorNumElements() == Opnds.size() && \"Concat vector type mismatch\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16096, __extension__ __PRETTY_FUNCTION__))
;
16097 return DAG.getBuildVector(VT, SDLoc(N), Opnds);
16098 }
16099
16100 // Fold CONCAT_VECTORS of only bitcast scalars (or undef) to BUILD_VECTOR.
16101 if (SDValue V = combineConcatVectorOfScalars(N, DAG))
16102 return V;
16103
16104 // Fold CONCAT_VECTORS of EXTRACT_SUBVECTOR (or undef) to VECTOR_SHUFFLE.
16105 if (Level < AfterLegalizeVectorOps && TLI.isTypeLegal(VT))
16106 if (SDValue V = combineConcatVectorOfExtracts(N, DAG))
16107 return V;
16108
16109 // Type legalization of vectors and DAG canonicalization of SHUFFLE_VECTOR
16110 // nodes often generate nop CONCAT_VECTOR nodes.
16111 // Scan the CONCAT_VECTOR operands and look for a CONCAT operations that
16112 // place the incoming vectors at the exact same location.
16113 SDValue SingleSource = SDValue();
16114 unsigned PartNumElem = N->getOperand(0).getValueType().getVectorNumElements();
16115
16116 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
16117 SDValue Op = N->getOperand(i);
16118
16119 if (Op.isUndef())
16120 continue;
16121
16122 // Check if this is the identity extract:
16123 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR)
16124 return SDValue();
16125
16126 // Find the single incoming vector for the extract_subvector.
16127 if (SingleSource.getNode()) {
16128 if (Op.getOperand(0) != SingleSource)
16129 return SDValue();
16130 } else {
16131 SingleSource = Op.getOperand(0);
16132
16133 // Check the source type is the same as the type of the result.
16134 // If not, this concat may extend the vector, so we can not
16135 // optimize it away.
16136 if (SingleSource.getValueType() != N->getValueType(0))
16137 return SDValue();
16138 }
16139
16140 unsigned IdentityIndex = i * PartNumElem;
16141 ConstantSDNode *CS = dyn_cast<ConstantSDNode>(Op.getOperand(1));
16142 // The extract index must be constant.
16143 if (!CS)
16144 return SDValue();
16145
16146 // Check that we are reading from the identity index.
16147 if (CS->getZExtValue() != IdentityIndex)
16148 return SDValue();
16149 }
16150
16151 if (SingleSource.getNode())
16152 return SingleSource;
16153
16154 return SDValue();
16155}
16156
16157/// If we are extracting a subvector produced by a wide binary operator with at
16158/// at least one operand that was the result of a vector concatenation, then try
16159/// to use the narrow vector operands directly to avoid the concatenation and
16160/// extraction.
16161static SDValue narrowExtractedVectorBinOp(SDNode *Extract, SelectionDAG &DAG) {
16162 // TODO: Refactor with the caller (visitEXTRACT_SUBVECTOR), so we can share
16163 // some of these bailouts with other transforms.
16164
16165 // The extract index must be a constant, so we can map it to a concat operand.
16166 auto *ExtractIndex = dyn_cast<ConstantSDNode>(Extract->getOperand(1));
16167 if (!ExtractIndex)
16168 return SDValue();
16169
16170 // Only handle the case where we are doubling and then halving. A larger ratio
16171 // may require more than two narrow binops to replace the wide binop.
16172 EVT VT = Extract->getValueType(0);
16173 unsigned NumElems = VT.getVectorNumElements();
16174 assert((ExtractIndex->getZExtValue() % NumElems) == 0 &&(static_cast <bool> ((ExtractIndex->getZExtValue() %
NumElems) == 0 && "Extract index is not a multiple of the vector length."
) ? void (0) : __assert_fail ("(ExtractIndex->getZExtValue() % NumElems) == 0 && \"Extract index is not a multiple of the vector length.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16175, __extension__ __PRETTY_FUNCTION__))
16175 "Extract index is not a multiple of the vector length.")(static_cast <bool> ((ExtractIndex->getZExtValue() %
NumElems) == 0 && "Extract index is not a multiple of the vector length."
) ? void (0) : __assert_fail ("(ExtractIndex->getZExtValue() % NumElems) == 0 && \"Extract index is not a multiple of the vector length.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16175, __extension__ __PRETTY_FUNCTION__))
;
16176 if (Extract->getOperand(0).getValueSizeInBits() != VT.getSizeInBits() * 2)
16177 return SDValue();
16178
16179 // We are looking for an optionally bitcasted wide vector binary operator
16180 // feeding an extract subvector.
16181 SDValue BinOp = peekThroughBitcast(Extract->getOperand(0));
16182
16183 // TODO: The motivating case for this transform is an x86 AVX1 target. That
16184 // target has temptingly almost legal versions of bitwise logic ops in 256-bit
16185 // flavors, but no other 256-bit integer support. This could be extended to
16186 // handle any binop, but that may require fixing/adding other folds to avoid
16187 // codegen regressions.
16188 unsigned BOpcode = BinOp.getOpcode();
16189 if (BOpcode != ISD::AND && BOpcode != ISD::OR && BOpcode != ISD::XOR)
16190 return SDValue();
16191
16192 // The binop must be a vector type, so we can chop it in half.
16193 EVT WideBVT = BinOp.getValueType();
16194 if (!WideBVT.isVector())
16195 return SDValue();
16196
16197 // Bail out if the target does not support a narrower version of the binop.
16198 EVT NarrowBVT = EVT::getVectorVT(*DAG.getContext(), WideBVT.getScalarType(),
16199 WideBVT.getVectorNumElements() / 2);
16200 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16201 if (!TLI.isOperationLegalOrCustomOrPromote(BOpcode, NarrowBVT))
16202 return SDValue();
16203
16204 // Peek through bitcasts of the binary operator operands if needed.
16205 SDValue LHS = peekThroughBitcast(BinOp.getOperand(0));
16206 SDValue RHS = peekThroughBitcast(BinOp.getOperand(1));
16207
16208 // We need at least one concatenation operation of a binop operand to make
16209 // this transform worthwhile. The concat must double the input vector sizes.
16210 // TODO: Should we also handle INSERT_SUBVECTOR patterns?
16211 bool ConcatL =
16212 LHS.getOpcode() == ISD::CONCAT_VECTORS && LHS.getNumOperands() == 2;
16213 bool ConcatR =
16214 RHS.getOpcode() == ISD::CONCAT_VECTORS && RHS.getNumOperands() == 2;
16215 if (!ConcatL && !ConcatR)
16216 return SDValue();
16217
16218 // If one of the binop operands was not the result of a concat, we must
16219 // extract a half-sized operand for our new narrow binop. We can't just reuse
16220 // the original extract index operand because we may have bitcasted.
16221 unsigned ConcatOpNum = ExtractIndex->getZExtValue() / NumElems;
16222 unsigned ExtBOIdx = ConcatOpNum * NarrowBVT.getVectorNumElements();
16223 EVT ExtBOIdxVT = Extract->getOperand(1).getValueType();
16224 SDLoc DL(Extract);
16225
16226 // extract (binop (concat X1, X2), (concat Y1, Y2)), N --> binop XN, YN
16227 // extract (binop (concat X1, X2), Y), N --> binop XN, (extract Y, N)
16228 // extract (binop X, (concat Y1, Y2)), N --> binop (extract X, N), YN
16229 SDValue X = ConcatL ? DAG.getBitcast(NarrowBVT, LHS.getOperand(ConcatOpNum))
16230 : DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NarrowBVT,
16231 BinOp.getOperand(0),
16232 DAG.getConstant(ExtBOIdx, DL, ExtBOIdxVT));
16233
16234 SDValue Y = ConcatR ? DAG.getBitcast(NarrowBVT, RHS.getOperand(ConcatOpNum))
16235 : DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NarrowBVT,
16236 BinOp.getOperand(1),
16237 DAG.getConstant(ExtBOIdx, DL, ExtBOIdxVT));
16238
16239 SDValue NarrowBinOp = DAG.getNode(BOpcode, DL, NarrowBVT, X, Y);
16240 return DAG.getBitcast(VT, NarrowBinOp);
16241}
16242
16243/// If we are extracting a subvector from a wide vector load, convert to a
16244/// narrow load to eliminate the extraction:
16245/// (extract_subvector (load wide vector)) --> (load narrow vector)
16246static SDValue narrowExtractedVectorLoad(SDNode *Extract, SelectionDAG &DAG) {
16247 // TODO: Add support for big-endian. The offset calculation must be adjusted.
16248 if (DAG.getDataLayout().isBigEndian())
16249 return SDValue();
16250
16251 // TODO: The one-use check is overly conservative. Check the cost of the
16252 // extract instead or remove that condition entirely.
16253 auto *Ld = dyn_cast<LoadSDNode>(Extract->getOperand(0));
16254 auto *ExtIdx = dyn_cast<ConstantSDNode>(Extract->getOperand(1));
16255 if (!Ld || !Ld->hasOneUse() || Ld->getExtensionType() || Ld->isVolatile() ||
16256 !ExtIdx)
16257 return SDValue();
16258
16259 // The narrow load will be offset from the base address of the old load if
16260 // we are extracting from something besides index 0 (little-endian).
16261 EVT VT = Extract->getValueType(0);
16262 SDLoc DL(Extract);
16263 SDValue BaseAddr = Ld->getOperand(1);
16264 unsigned Offset = ExtIdx->getZExtValue() * VT.getScalarType().getStoreSize();
16265
16266 // TODO: Use "BaseIndexOffset" to make this more effective.
16267 SDValue NewAddr = DAG.getMemBasePlusOffset(BaseAddr, Offset, DL);
16268 MachineFunction &MF = DAG.getMachineFunction();
16269 MachineMemOperand *MMO = MF.getMachineMemOperand(Ld->getMemOperand(), Offset,
16270 VT.getStoreSize());
16271 SDValue NewLd = DAG.getLoad(VT, DL, Ld->getChain(), NewAddr, MMO);
16272 DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
16273 return NewLd;
16274}
16275
16276SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) {
16277 EVT NVT = N->getValueType(0);
16278 SDValue V = N->getOperand(0);
16279
16280 // Extract from UNDEF is UNDEF.
16281 if (V.isUndef())
16282 return DAG.getUNDEF(NVT);
16283
16284 if (TLI.isOperationLegalOrCustomOrPromote(ISD::LOAD, NVT))
16285 if (SDValue NarrowLoad = narrowExtractedVectorLoad(N, DAG))
16286 return NarrowLoad;
16287
16288 // Combine:
16289 // (extract_subvec (concat V1, V2, ...), i)
16290 // Into:
16291 // Vi if possible
16292 // Only operand 0 is checked as 'concat' assumes all inputs of the same
16293 // type.
16294 if (V->getOpcode() == ISD::CONCAT_VECTORS &&
16295 isa<ConstantSDNode>(N->getOperand(1)) &&
16296 V->getOperand(0).getValueType() == NVT) {
16297 unsigned Idx = N->getConstantOperandVal(1);
16298 unsigned NumElems = NVT.getVectorNumElements();
16299 assert((Idx % NumElems) == 0 &&(static_cast <bool> ((Idx % NumElems) == 0 && "IDX in concat is not a multiple of the result vector length."
) ? void (0) : __assert_fail ("(Idx % NumElems) == 0 && \"IDX in concat is not a multiple of the result vector length.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16300, __extension__ __PRETTY_FUNCTION__))
16300 "IDX in concat is not a multiple of the result vector length.")(static_cast <bool> ((Idx % NumElems) == 0 && "IDX in concat is not a multiple of the result vector length."
) ? void (0) : __assert_fail ("(Idx % NumElems) == 0 && \"IDX in concat is not a multiple of the result vector length.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16300, __extension__ __PRETTY_FUNCTION__))
;
16301 return V->getOperand(Idx / NumElems);
16302 }
16303
16304 // Skip bitcasting
16305 V = peekThroughBitcast(V);
16306
16307 // If the input is a build vector. Try to make a smaller build vector.
16308 if (V->getOpcode() == ISD::BUILD_VECTOR) {
16309 if (auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
16310 EVT InVT = V->getValueType(0);
16311 unsigned ExtractSize = NVT.getSizeInBits();
16312 unsigned EltSize = InVT.getScalarSizeInBits();
16313 // Only do this if we won't split any elements.
16314 if (ExtractSize % EltSize == 0) {
16315 unsigned NumElems = ExtractSize / EltSize;
16316 EVT EltVT = InVT.getVectorElementType();
16317 EVT ExtractVT = NumElems == 1 ? EltVT :
16318 EVT::getVectorVT(*DAG.getContext(), EltVT, NumElems);
16319 if ((Level < AfterLegalizeDAG ||
16320 (NumElems == 1 ||
16321 TLI.isOperationLegal(ISD::BUILD_VECTOR, ExtractVT))) &&
16322 (!LegalTypes || TLI.isTypeLegal(ExtractVT))) {
16323 unsigned IdxVal = (Idx->getZExtValue() * NVT.getScalarSizeInBits()) /
16324 EltSize;
16325 if (NumElems == 1) {
16326 SDValue Src = V->getOperand(IdxVal);
16327 if (EltVT != Src.getValueType())
16328 Src = DAG.getNode(ISD::TRUNCATE, SDLoc(N), InVT, Src);
16329
16330 return DAG.getBitcast(NVT, Src);
16331 }
16332
16333 // Extract the pieces from the original build_vector.
16334 SDValue BuildVec = DAG.getBuildVector(ExtractVT, SDLoc(N),
16335 makeArrayRef(V->op_begin() + IdxVal,
16336 NumElems));
16337 return DAG.getBitcast(NVT, BuildVec);
16338 }
16339 }
16340 }
16341 }
16342
16343 if (V->getOpcode() == ISD::INSERT_SUBVECTOR) {
16344 // Handle only simple case where vector being inserted and vector
16345 // being extracted are of same size.
16346 EVT SmallVT = V->getOperand(1).getValueType();
16347 if (!NVT.bitsEq(SmallVT))
16348 return SDValue();
16349
16350 // Only handle cases where both indexes are constants.
16351 ConstantSDNode *ExtIdx = dyn_cast<ConstantSDNode>(N->getOperand(1));
16352 ConstantSDNode *InsIdx = dyn_cast<ConstantSDNode>(V->getOperand(2));
16353
16354 if (InsIdx && ExtIdx) {
16355 // Combine:
16356 // (extract_subvec (insert_subvec V1, V2, InsIdx), ExtIdx)
16357 // Into:
16358 // indices are equal or bit offsets are equal => V1
16359 // otherwise => (extract_subvec V1, ExtIdx)
16360 if (InsIdx->getZExtValue() * SmallVT.getScalarSizeInBits() ==
16361 ExtIdx->getZExtValue() * NVT.getScalarSizeInBits())
16362 return DAG.getBitcast(NVT, V->getOperand(1));
16363 return DAG.getNode(
16364 ISD::EXTRACT_SUBVECTOR, SDLoc(N), NVT,
16365 DAG.getBitcast(N->getOperand(0).getValueType(), V->getOperand(0)),
16366 N->getOperand(1));
16367 }
16368 }
16369
16370 if (SDValue NarrowBOp = narrowExtractedVectorBinOp(N, DAG))
16371 return NarrowBOp;
16372
16373 if (SimplifyDemandedVectorElts(SDValue(N, 0)))
16374 return SDValue(N, 0);
16375
16376 return SDValue();
16377}
16378
16379// Tries to turn a shuffle of two CONCAT_VECTORS into a single concat,
16380// or turn a shuffle of a single concat into simpler shuffle then concat.
16381static SDValue partitionShuffleOfConcats(SDNode *N, SelectionDAG &DAG) {
16382 EVT VT = N->getValueType(0);
16383 unsigned NumElts = VT.getVectorNumElements();
16384
16385 SDValue N0 = N->getOperand(0);
16386 SDValue N1 = N->getOperand(1);
16387 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
16388
16389 SmallVector<SDValue, 4> Ops;
16390 EVT ConcatVT = N0.getOperand(0).getValueType();
16391 unsigned NumElemsPerConcat = ConcatVT.getVectorNumElements();
16392 unsigned NumConcats = NumElts / NumElemsPerConcat;
16393
16394 // Special case: shuffle(concat(A,B)) can be more efficiently represented
16395 // as concat(shuffle(A,B),UNDEF) if the shuffle doesn't set any of the high
16396 // half vector elements.
16397 if (NumElemsPerConcat * 2 == NumElts && N1.isUndef() &&
16398 std::all_of(SVN->getMask().begin() + NumElemsPerConcat,
16399 SVN->getMask().end(), [](int i) { return i == -1; })) {
16400 N0 = DAG.getVectorShuffle(ConcatVT, SDLoc(N), N0.getOperand(0), N0.getOperand(1),
16401 makeArrayRef(SVN->getMask().begin(), NumElemsPerConcat));
16402 N1 = DAG.getUNDEF(ConcatVT);
16403 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, N0, N1);
16404 }
16405
16406 // Look at every vector that's inserted. We're looking for exact
16407 // subvector-sized copies from a concatenated vector
16408 for (unsigned I = 0; I != NumConcats; ++I) {
16409 // Make sure we're dealing with a copy.
16410 unsigned Begin = I * NumElemsPerConcat;
16411 bool AllUndef = true, NoUndef = true;
16412 for (unsigned J = Begin; J != Begin + NumElemsPerConcat; ++J) {
16413 if (SVN->getMaskElt(J) >= 0)
16414 AllUndef = false;
16415 else
16416 NoUndef = false;
16417 }
16418
16419 if (NoUndef) {
16420 if (SVN->getMaskElt(Begin) % NumElemsPerConcat != 0)
16421 return SDValue();
16422
16423 for (unsigned J = 1; J != NumElemsPerConcat; ++J)
16424 if (SVN->getMaskElt(Begin + J - 1) + 1 != SVN->getMaskElt(Begin + J))
16425 return SDValue();
16426
16427 unsigned FirstElt = SVN->getMaskElt(Begin) / NumElemsPerConcat;
16428 if (FirstElt < N0.getNumOperands())
16429 Ops.push_back(N0.getOperand(FirstElt));
16430 else
16431 Ops.push_back(N1.getOperand(FirstElt - N0.getNumOperands()));
16432
16433 } else if (AllUndef) {
16434 Ops.push_back(DAG.getUNDEF(N0.getOperand(0).getValueType()));
16435 } else { // Mixed with general masks and undefs, can't do optimization.
16436 return SDValue();
16437 }
16438 }
16439
16440 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Ops);
16441}
16442
16443// Attempt to combine a shuffle of 2 inputs of 'scalar sources' -
16444// BUILD_VECTOR or SCALAR_TO_VECTOR into a single BUILD_VECTOR.
16445//
16446// SHUFFLE(BUILD_VECTOR(), BUILD_VECTOR()) -> BUILD_VECTOR() is always
16447// a simplification in some sense, but it isn't appropriate in general: some
16448// BUILD_VECTORs are substantially cheaper than others. The general case
16449// of a BUILD_VECTOR requires inserting each element individually (or
16450// performing the equivalent in a temporary stack variable). A BUILD_VECTOR of
16451// all constants is a single constant pool load. A BUILD_VECTOR where each
16452// element is identical is a splat. A BUILD_VECTOR where most of the operands
16453// are undef lowers to a small number of element insertions.
16454//
16455// To deal with this, we currently use a bunch of mostly arbitrary heuristics.
16456// We don't fold shuffles where one side is a non-zero constant, and we don't
16457// fold shuffles if the resulting (non-splat) BUILD_VECTOR would have duplicate
16458// non-constant operands. This seems to work out reasonably well in practice.
16459static SDValue combineShuffleOfScalars(ShuffleVectorSDNode *SVN,
16460 SelectionDAG &DAG,
16461 const TargetLowering &TLI) {
16462 EVT VT = SVN->getValueType(0);
16463 unsigned NumElts = VT.getVectorNumElements();
16464 SDValue N0 = SVN->getOperand(0);
16465 SDValue N1 = SVN->getOperand(1);
16466
16467 if (!N0->hasOneUse() || !N1->hasOneUse())
16468 return SDValue();
16469
16470 // If only one of N1,N2 is constant, bail out if it is not ALL_ZEROS as
16471 // discussed above.
16472 if (!N1.isUndef()) {
16473 bool N0AnyConst = isAnyConstantBuildVector(N0.getNode());
16474 bool N1AnyConst = isAnyConstantBuildVector(N1.getNode());
16475 if (N0AnyConst && !N1AnyConst && !ISD::isBuildVectorAllZeros(N0.getNode()))
16476 return SDValue();
16477 if (!N0AnyConst && N1AnyConst && !ISD::isBuildVectorAllZeros(N1.getNode()))
16478 return SDValue();
16479 }
16480
16481 // If both inputs are splats of the same value then we can safely merge this
16482 // to a single BUILD_VECTOR with undef elements based on the shuffle mask.
16483 bool IsSplat = false;
16484 auto *BV0 = dyn_cast<BuildVectorSDNode>(N0);
16485 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
16486 if (BV0 && BV1)
16487 if (SDValue Splat0 = BV0->getSplatValue())
16488 IsSplat = (Splat0 == BV1->getSplatValue());
16489
16490 SmallVector<SDValue, 8> Ops;
16491 SmallSet<SDValue, 16> DuplicateOps;
16492 for (int M : SVN->getMask()) {
16493 SDValue Op = DAG.getUNDEF(VT.getScalarType());
16494 if (M >= 0) {
16495 int Idx = M < (int)NumElts ? M : M - NumElts;
16496 SDValue &S = (M < (int)NumElts ? N0 : N1);
16497 if (S.getOpcode() == ISD::BUILD_VECTOR) {
16498 Op = S.getOperand(Idx);
16499 } else if (S.getOpcode() == ISD::SCALAR_TO_VECTOR) {
16500 assert(Idx == 0 && "Unexpected SCALAR_TO_VECTOR operand index.")(static_cast <bool> (Idx == 0 && "Unexpected SCALAR_TO_VECTOR operand index."
) ? void (0) : __assert_fail ("Idx == 0 && \"Unexpected SCALAR_TO_VECTOR operand index.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16500, __extension__ __PRETTY_FUNCTION__))
;
16501 Op = S.getOperand(0);
16502 } else {
16503 // Operand can't be combined - bail out.
16504 return SDValue();
16505 }
16506 }
16507
16508 // Don't duplicate a non-constant BUILD_VECTOR operand unless we're
16509 // generating a splat; semantically, this is fine, but it's likely to
16510 // generate low-quality code if the target can't reconstruct an appropriate
16511 // shuffle.
16512 if (!Op.isUndef() && !isa<ConstantSDNode>(Op) && !isa<ConstantFPSDNode>(Op))
16513 if (!IsSplat && !DuplicateOps.insert(Op).second)
16514 return SDValue();
16515
16516 Ops.push_back(Op);
16517 }
16518
16519 // BUILD_VECTOR requires all inputs to be of the same type, find the
16520 // maximum type and extend them all.
16521 EVT SVT = VT.getScalarType();
16522 if (SVT.isInteger())
16523 for (SDValue &Op : Ops)
16524 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
16525 if (SVT != VT.getScalarType())
16526 for (SDValue &Op : Ops)
16527 Op = TLI.isZExtFree(Op.getValueType(), SVT)
16528 ? DAG.getZExtOrTrunc(Op, SDLoc(SVN), SVT)
16529 : DAG.getSExtOrTrunc(Op, SDLoc(SVN), SVT);
16530 return DAG.getBuildVector(VT, SDLoc(SVN), Ops);
16531}
16532
16533// Match shuffles that can be converted to any_vector_extend_in_reg.
16534// This is often generated during legalization.
16535// e.g. v4i32 <0,u,1,u> -> (v2i64 any_vector_extend_in_reg(v4i32 src))
16536// TODO Add support for ZERO_EXTEND_VECTOR_INREG when we have a test case.
16537static SDValue combineShuffleToVectorExtend(ShuffleVectorSDNode *SVN,
16538 SelectionDAG &DAG,
16539 const TargetLowering &TLI,
16540 bool LegalOperations,
16541 bool LegalTypes) {
16542 EVT VT = SVN->getValueType(0);
16543 bool IsBigEndian = DAG.getDataLayout().isBigEndian();
16544
16545 // TODO Add support for big-endian when we have a test case.
16546 if (!VT.isInteger() || IsBigEndian)
16547 return SDValue();
16548
16549 unsigned NumElts = VT.getVectorNumElements();
16550 unsigned EltSizeInBits = VT.getScalarSizeInBits();
16551 ArrayRef<int> Mask = SVN->getMask();
16552 SDValue N0 = SVN->getOperand(0);
16553
16554 // shuffle<0,-1,1,-1> == (v2i64 anyextend_vector_inreg(v4i32))
16555 auto isAnyExtend = [&Mask, &NumElts](unsigned Scale) {
16556 for (unsigned i = 0; i != NumElts; ++i) {
16557 if (Mask[i] < 0)
16558 continue;
16559 if ((i % Scale) == 0 && Mask[i] == (int)(i / Scale))
16560 continue;
16561 return false;
16562 }
16563 return true;
16564 };
16565
16566 // Attempt to match a '*_extend_vector_inreg' shuffle, we just search for
16567 // power-of-2 extensions as they are the most likely.
16568 for (unsigned Scale = 2; Scale < NumElts; Scale *= 2) {
16569 // Check for non power of 2 vector sizes
16570 if (NumElts % Scale != 0)
16571 continue;
16572 if (!isAnyExtend(Scale))
16573 continue;
16574
16575 EVT OutSVT = EVT::getIntegerVT(*DAG.getContext(), EltSizeInBits * Scale);
16576 EVT OutVT = EVT::getVectorVT(*DAG.getContext(), OutSVT, NumElts / Scale);
16577 if (!LegalTypes || TLI.isTypeLegal(OutVT))
16578 if (!LegalOperations ||
16579 TLI.isOperationLegalOrCustom(ISD::ANY_EXTEND_VECTOR_INREG, OutVT))
16580 return DAG.getBitcast(VT,
16581 DAG.getAnyExtendVectorInReg(N0, SDLoc(SVN), OutVT));
16582 }
16583
16584 return SDValue();
16585}
16586
16587// Detect 'truncate_vector_inreg' style shuffles that pack the lower parts of
16588// each source element of a large type into the lowest elements of a smaller
16589// destination type. This is often generated during legalization.
16590// If the source node itself was a '*_extend_vector_inreg' node then we should
16591// then be able to remove it.
16592static SDValue combineTruncationShuffle(ShuffleVectorSDNode *SVN,
16593 SelectionDAG &DAG) {
16594 EVT VT = SVN->getValueType(0);
16595 bool IsBigEndian = DAG.getDataLayout().isBigEndian();
16596
16597 // TODO Add support for big-endian when we have a test case.
16598 if (!VT.isInteger() || IsBigEndian)
16599 return SDValue();
16600
16601 SDValue N0 = peekThroughBitcast(SVN->getOperand(0));
16602
16603 unsigned Opcode = N0.getOpcode();
16604 if (Opcode != ISD::ANY_EXTEND_VECTOR_INREG &&
16605 Opcode != ISD::SIGN_EXTEND_VECTOR_INREG &&
16606 Opcode != ISD::ZERO_EXTEND_VECTOR_INREG)
16607 return SDValue();
16608
16609 SDValue N00 = N0.getOperand(0);
16610 ArrayRef<int> Mask = SVN->getMask();
16611 unsigned NumElts = VT.getVectorNumElements();
16612 unsigned EltSizeInBits = VT.getScalarSizeInBits();
16613 unsigned ExtSrcSizeInBits = N00.getScalarValueSizeInBits();
16614 unsigned ExtDstSizeInBits = N0.getScalarValueSizeInBits();
16615
16616 if (ExtDstSizeInBits % ExtSrcSizeInBits != 0)
16617 return SDValue();
16618 unsigned ExtScale = ExtDstSizeInBits / ExtSrcSizeInBits;
16619
16620 // (v4i32 truncate_vector_inreg(v2i64)) == shuffle<0,2-1,-1>
16621 // (v8i16 truncate_vector_inreg(v4i32)) == shuffle<0,2,4,6,-1,-1,-1,-1>
16622 // (v8i16 truncate_vector_inreg(v2i64)) == shuffle<0,4,-1,-1,-1,-1,-1,-1>
16623 auto isTruncate = [&Mask, &NumElts](unsigned Scale) {
16624 for (unsigned i = 0; i != NumElts; ++i) {
16625 if (Mask[i] < 0)
16626 continue;
16627 if ((i * Scale) < NumElts && Mask[i] == (int)(i * Scale))
16628 continue;
16629 return false;
16630 }
16631 return true;
16632 };
16633
16634 // At the moment we just handle the case where we've truncated back to the
16635 // same size as before the extension.
16636 // TODO: handle more extension/truncation cases as cases arise.
16637 if (EltSizeInBits != ExtSrcSizeInBits)
16638 return SDValue();
16639
16640 // We can remove *extend_vector_inreg only if the truncation happens at
16641 // the same scale as the extension.
16642 if (isTruncate(ExtScale))
16643 return DAG.getBitcast(VT, N00);
16644
16645 return SDValue();
16646}
16647
16648// Combine shuffles of splat-shuffles of the form:
16649// shuffle (shuffle V, undef, splat-mask), undef, M
16650// If splat-mask contains undef elements, we need to be careful about
16651// introducing undef's in the folded mask which are not the result of composing
16652// the masks of the shuffles.
16653static SDValue combineShuffleOfSplat(ArrayRef<int> UserMask,
16654 ShuffleVectorSDNode *Splat,
16655 SelectionDAG &DAG) {
16656 ArrayRef<int> SplatMask = Splat->getMask();
16657 assert(UserMask.size() == SplatMask.size() && "Mask length mismatch")(static_cast <bool> (UserMask.size() == SplatMask.size(
) && "Mask length mismatch") ? void (0) : __assert_fail
("UserMask.size() == SplatMask.size() && \"Mask length mismatch\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16657, __extension__ __PRETTY_FUNCTION__))
;
16658
16659 // Prefer simplifying to the splat-shuffle, if possible. This is legal if
16660 // every undef mask element in the splat-shuffle has a corresponding undef
16661 // element in the user-shuffle's mask or if the composition of mask elements
16662 // would result in undef.
16663 // Examples for (shuffle (shuffle v, undef, SplatMask), undef, UserMask):
16664 // * UserMask=[0,2,u,u], SplatMask=[2,u,2,u] -> [2,2,u,u]
16665 // In this case it is not legal to simplify to the splat-shuffle because we
16666 // may be exposing the users of the shuffle an undef element at index 1
16667 // which was not there before the combine.
16668 // * UserMask=[0,u,2,u], SplatMask=[2,u,2,u] -> [2,u,2,u]
16669 // In this case the composition of masks yields SplatMask, so it's ok to
16670 // simplify to the splat-shuffle.
16671 // * UserMask=[3,u,2,u], SplatMask=[2,u,2,u] -> [u,u,2,u]
16672 // In this case the composed mask includes all undef elements of SplatMask
16673 // and in addition sets element zero to undef. It is safe to simplify to
16674 // the splat-shuffle.
16675 auto CanSimplifyToExistingSplat = [](ArrayRef<int> UserMask,
16676 ArrayRef<int> SplatMask) {
16677 for (unsigned i = 0, e = UserMask.size(); i != e; ++i)
16678 if (UserMask[i] != -1 && SplatMask[i] == -1 &&
16679 SplatMask[UserMask[i]] != -1)
16680 return false;
16681 return true;
16682 };
16683 if (CanSimplifyToExistingSplat(UserMask, SplatMask))
16684 return SDValue(Splat, 0);
16685
16686 // Create a new shuffle with a mask that is composed of the two shuffles'
16687 // masks.
16688 SmallVector<int, 32> NewMask;
16689 for (int Idx : UserMask)
16690 NewMask.push_back(Idx == -1 ? -1 : SplatMask[Idx]);
16691
16692 return DAG.getVectorShuffle(Splat->getValueType(0), SDLoc(Splat),
16693 Splat->getOperand(0), Splat->getOperand(1),
16694 NewMask);
16695}
16696
16697/// If the shuffle mask is taking exactly one element from the first vector
16698/// operand and passing through all other elements from the second vector
16699/// operand, return the index of the mask element that is choosing an element
16700/// from the first operand. Otherwise, return -1.
16701static int getShuffleMaskIndexOfOneElementFromOp0IntoOp1(ArrayRef<int> Mask) {
16702 int MaskSize = Mask.size();
16703 int EltFromOp0 = -1;
16704 // TODO: This does not match if there are undef elements in the shuffle mask.
16705 // Should we ignore undefs in the shuffle mask instead? The trade-off is
16706 // removing an instruction (a shuffle), but losing the knowledge that some
16707 // vector lanes are not needed.
16708 for (int i = 0; i != MaskSize; ++i) {
16709 if (Mask[i] >= 0 && Mask[i] < MaskSize) {
16710 // We're looking for a shuffle of exactly one element from operand 0.
16711 if (EltFromOp0 != -1)
16712 return -1;
16713 EltFromOp0 = i;
16714 } else if (Mask[i] != i + MaskSize) {
16715 // Nothing from operand 1 can change lanes.
16716 return -1;
16717 }
16718 }
16719 return EltFromOp0;
16720}
16721
16722/// If a shuffle inserts exactly one element from a source vector operand into
16723/// another vector operand and we can access the specified element as a scalar,
16724/// then we can eliminate the shuffle.
16725static SDValue replaceShuffleOfInsert(ShuffleVectorSDNode *Shuf,
16726 SelectionDAG &DAG) {
16727 // First, check if we are taking one element of a vector and shuffling that
16728 // element into another vector.
16729 ArrayRef<int> Mask = Shuf->getMask();
16730 SmallVector<int, 16> CommutedMask(Mask.begin(), Mask.end());
16731 SDValue Op0 = Shuf->getOperand(0);
16732 SDValue Op1 = Shuf->getOperand(1);
16733 int ShufOp0Index = getShuffleMaskIndexOfOneElementFromOp0IntoOp1(Mask);
16734 if (ShufOp0Index == -1) {
16735 // Commute mask and check again.
16736 ShuffleVectorSDNode::commuteMask(CommutedMask);
16737 ShufOp0Index = getShuffleMaskIndexOfOneElementFromOp0IntoOp1(CommutedMask);
16738 if (ShufOp0Index == -1)
16739 return SDValue();
16740 // Commute operands to match the commuted shuffle mask.
16741 std::swap(Op0, Op1);
16742 Mask = CommutedMask;
16743 }
16744
16745 // The shuffle inserts exactly one element from operand 0 into operand 1.
16746 // Now see if we can access that element as a scalar via a real insert element
16747 // instruction.
16748 // TODO: We can try harder to locate the element as a scalar. Examples: it
16749 // could be an operand of SCALAR_TO_VECTOR, BUILD_VECTOR, or a constant.
16750 assert(Mask[ShufOp0Index] >= 0 && Mask[ShufOp0Index] < (int)Mask.size() &&(static_cast <bool> (Mask[ShufOp0Index] >= 0 &&
Mask[ShufOp0Index] < (int)Mask.size() && "Shuffle mask value must be from operand 0"
) ? void (0) : __assert_fail ("Mask[ShufOp0Index] >= 0 && Mask[ShufOp0Index] < (int)Mask.size() && \"Shuffle mask value must be from operand 0\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16751, __extension__ __PRETTY_FUNCTION__))
16751 "Shuffle mask value must be from operand 0")(static_cast <bool> (Mask[ShufOp0Index] >= 0 &&
Mask[ShufOp0Index] < (int)Mask.size() && "Shuffle mask value must be from operand 0"
) ? void (0) : __assert_fail ("Mask[ShufOp0Index] >= 0 && Mask[ShufOp0Index] < (int)Mask.size() && \"Shuffle mask value must be from operand 0\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16751, __extension__ __PRETTY_FUNCTION__))
;
16752 if (Op0.getOpcode() != ISD::INSERT_VECTOR_ELT)
16753 return SDValue();
16754
16755 auto *InsIndexC = dyn_cast<ConstantSDNode>(Op0.getOperand(2));
16756 if (!InsIndexC || InsIndexC->getSExtValue() != Mask[ShufOp0Index])
16757 return SDValue();
16758
16759 // There's an existing insertelement with constant insertion index, so we
16760 // don't need to check the legality/profitability of a replacement operation
16761 // that differs at most in the constant value. The target should be able to
16762 // lower any of those in a similar way. If not, legalization will expand this
16763 // to a scalar-to-vector plus shuffle.
16764 //
16765 // Note that the shuffle may move the scalar from the position that the insert
16766 // element used. Therefore, our new insert element occurs at the shuffle's
16767 // mask index value, not the insert's index value.
16768 // shuffle (insertelt v1, x, C), v2, mask --> insertelt v2, x, C'
16769 SDValue NewInsIndex = DAG.getConstant(ShufOp0Index, SDLoc(Shuf),
16770 Op0.getOperand(2).getValueType());
16771 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Shuf), Op0.getValueType(),
16772 Op1, Op0.getOperand(1), NewInsIndex);
16773}
16774
16775SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
16776 EVT VT = N->getValueType(0);
16777 unsigned NumElts = VT.getVectorNumElements();
16778
16779 SDValue N0 = N->getOperand(0);
16780 SDValue N1 = N->getOperand(1);
16781
16782 assert(N0.getValueType() == VT && "Vector shuffle must be normalized in DAG")(static_cast <bool> (N0.getValueType() == VT &&
"Vector shuffle must be normalized in DAG") ? void (0) : __assert_fail
("N0.getValueType() == VT && \"Vector shuffle must be normalized in DAG\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16782, __extension__ __PRETTY_FUNCTION__))
;
16783
16784 // Canonicalize shuffle undef, undef -> undef
16785 if (N0.isUndef() && N1.isUndef())
16786 return DAG.getUNDEF(VT);
16787
16788 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
16789
16790 // Canonicalize shuffle v, v -> v, undef
16791 if (N0 == N1) {
16792 SmallVector<int, 8> NewMask;
16793 for (unsigned i = 0; i != NumElts; ++i) {
16794 int Idx = SVN->getMaskElt(i);
16795 if (Idx >= (int)NumElts) Idx -= NumElts;
16796 NewMask.push_back(Idx);
16797 }
16798 return DAG.getVectorShuffle(VT, SDLoc(N), N0, DAG.getUNDEF(VT), NewMask);
16799 }
16800
16801 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
16802 if (N0.isUndef())
16803 return DAG.getCommutedVectorShuffle(*SVN);
16804
16805 // Remove references to rhs if it is undef
16806 if (N1.isUndef()) {
16807 bool Changed = false;
16808 SmallVector<int, 8> NewMask;
16809 for (unsigned i = 0; i != NumElts; ++i) {
16810 int Idx = SVN->getMaskElt(i);
16811 if (Idx >= (int)NumElts) {
16812 Idx = -1;
16813 Changed = true;
16814 }
16815 NewMask.push_back(Idx);
16816 }
16817 if (Changed)
16818 return DAG.getVectorShuffle(VT, SDLoc(N), N0, N1, NewMask);
16819 }
16820
16821 if (SDValue InsElt = replaceShuffleOfInsert(SVN, DAG))
16822 return InsElt;
16823
16824 // A shuffle of a single vector that is a splat can always be folded.
16825 if (auto *N0Shuf = dyn_cast<ShuffleVectorSDNode>(N0))
16826 if (N1->isUndef() && N0Shuf->isSplat())
16827 return combineShuffleOfSplat(SVN->getMask(), N0Shuf, DAG);
16828
16829 // If it is a splat, check if the argument vector is another splat or a
16830 // build_vector.
16831 if (SVN->isSplat() && SVN->getSplatIndex() < (int)NumElts) {
16832 SDNode *V = N0.getNode();
16833
16834 // If this is a bit convert that changes the element type of the vector but
16835 // not the number of vector elements, look through it. Be careful not to
16836 // look though conversions that change things like v4f32 to v2f64.
16837 if (V->getOpcode() == ISD::BITCAST) {
16838 SDValue ConvInput = V->getOperand(0);
16839 if (ConvInput.getValueType().isVector() &&
16840 ConvInput.getValueType().getVectorNumElements() == NumElts)
16841 V = ConvInput.getNode();
16842 }
16843
16844 if (V->getOpcode() == ISD::BUILD_VECTOR) {
16845 assert(V->getNumOperands() == NumElts &&(static_cast <bool> (V->getNumOperands() == NumElts &&
"BUILD_VECTOR has wrong number of operands") ? void (0) : __assert_fail
("V->getNumOperands() == NumElts && \"BUILD_VECTOR has wrong number of operands\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16846, __extension__ __PRETTY_FUNCTION__))
16846 "BUILD_VECTOR has wrong number of operands")(static_cast <bool> (V->getNumOperands() == NumElts &&
"BUILD_VECTOR has wrong number of operands") ? void (0) : __assert_fail
("V->getNumOperands() == NumElts && \"BUILD_VECTOR has wrong number of operands\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16846, __extension__ __PRETTY_FUNCTION__))
;
16847 SDValue Base;
16848 bool AllSame = true;
16849 for (unsigned i = 0; i != NumElts; ++i) {
16850 if (!V->getOperand(i).isUndef()) {
16851 Base = V->getOperand(i);
16852 break;
16853 }
16854 }
16855 // Splat of <u, u, u, u>, return <u, u, u, u>
16856 if (!Base.getNode())
16857 return N0;
16858 for (unsigned i = 0; i != NumElts; ++i) {
16859 if (V->getOperand(i) != Base) {
16860 AllSame = false;
16861 break;
16862 }
16863 }
16864 // Splat of <x, x, x, x>, return <x, x, x, x>
16865 if (AllSame)
16866 return N0;
16867
16868 // Canonicalize any other splat as a build_vector.
16869 const SDValue &Splatted = V->getOperand(SVN->getSplatIndex());
16870 SmallVector<SDValue, 8> Ops(NumElts, Splatted);
16871 SDValue NewBV = DAG.getBuildVector(V->getValueType(0), SDLoc(N), Ops);
16872
16873 // We may have jumped through bitcasts, so the type of the
16874 // BUILD_VECTOR may not match the type of the shuffle.
16875 if (V->getValueType(0) != VT)
16876 NewBV = DAG.getBitcast(VT, NewBV);
16877 return NewBV;
16878 }
16879 }
16880
16881 // Simplify source operands based on shuffle mask.
16882 if (SimplifyDemandedVectorElts(SDValue(N, 0)))
16883 return SDValue(N, 0);
16884
16885 // Match shuffles that can be converted to any_vector_extend_in_reg.
16886 if (SDValue V = combineShuffleToVectorExtend(SVN, DAG, TLI, LegalOperations, LegalTypes))
16887 return V;
16888
16889 // Combine "truncate_vector_in_reg" style shuffles.
16890 if (SDValue V = combineTruncationShuffle(SVN, DAG))
16891 return V;
16892
16893 if (N0.getOpcode() == ISD::CONCAT_VECTORS &&
16894 Level < AfterLegalizeVectorOps &&
16895 (N1.isUndef() ||
16896 (N1.getOpcode() == ISD::CONCAT_VECTORS &&
16897 N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()))) {
16898 if (SDValue V = partitionShuffleOfConcats(N, DAG))
16899 return V;
16900 }
16901
16902 // Attempt to combine a shuffle of 2 inputs of 'scalar sources' -
16903 // BUILD_VECTOR or SCALAR_TO_VECTOR into a single BUILD_VECTOR.
16904 if (Level < AfterLegalizeVectorOps && TLI.isTypeLegal(VT))
16905 if (SDValue Res = combineShuffleOfScalars(SVN, DAG, TLI))
16906 return Res;
16907
16908 // If this shuffle only has a single input that is a bitcasted shuffle,
16909 // attempt to merge the 2 shuffles and suitably bitcast the inputs/output
16910 // back to their original types.
16911 if (N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() &&
16912 N1.isUndef() && Level < AfterLegalizeVectorOps &&
16913 TLI.isTypeLegal(VT)) {
16914
16915 // Peek through the bitcast only if there is one user.
16916 SDValue BC0 = N0;
16917 while (BC0.getOpcode() == ISD::BITCAST) {
16918 if (!BC0.hasOneUse())
16919 break;
16920 BC0 = BC0.getOperand(0);
16921 }
16922
16923 auto ScaleShuffleMask = [](ArrayRef<int> Mask, int Scale) {
16924 if (Scale == 1)
16925 return SmallVector<int, 8>(Mask.begin(), Mask.end());
16926
16927 SmallVector<int, 8> NewMask;
16928 for (int M : Mask)
16929 for (int s = 0; s != Scale; ++s)
16930 NewMask.push_back(M < 0 ? -1 : Scale * M + s);
16931 return NewMask;
16932 };
16933
16934 if (BC0.getOpcode() == ISD::VECTOR_SHUFFLE && BC0.hasOneUse()) {
16935 EVT SVT = VT.getScalarType();
16936 EVT InnerVT = BC0->getValueType(0);
16937 EVT InnerSVT = InnerVT.getScalarType();
16938
16939 // Determine which shuffle works with the smaller scalar type.
16940 EVT ScaleVT = SVT.bitsLT(InnerSVT) ? VT : InnerVT;
16941 EVT ScaleSVT = ScaleVT.getScalarType();
16942
16943 if (TLI.isTypeLegal(ScaleVT) &&
16944 0 == (InnerSVT.getSizeInBits() % ScaleSVT.getSizeInBits()) &&
16945 0 == (SVT.getSizeInBits() % ScaleSVT.getSizeInBits())) {
16946 int InnerScale = InnerSVT.getSizeInBits() / ScaleSVT.getSizeInBits();
16947 int OuterScale = SVT.getSizeInBits() / ScaleSVT.getSizeInBits();
16948
16949 // Scale the shuffle masks to the smaller scalar type.
16950 ShuffleVectorSDNode *InnerSVN = cast<ShuffleVectorSDNode>(BC0);
16951 SmallVector<int, 8> InnerMask =
16952 ScaleShuffleMask(InnerSVN->getMask(), InnerScale);
16953 SmallVector<int, 8> OuterMask =
16954 ScaleShuffleMask(SVN->getMask(), OuterScale);
16955
16956 // Merge the shuffle masks.
16957 SmallVector<int, 8> NewMask;
16958 for (int M : OuterMask)
16959 NewMask.push_back(M < 0 ? -1 : InnerMask[M]);
16960
16961 // Test for shuffle mask legality over both commutations.
16962 SDValue SV0 = BC0->getOperand(0);
16963 SDValue SV1 = BC0->getOperand(1);
16964 bool LegalMask = TLI.isShuffleMaskLegal(NewMask, ScaleVT);
16965 if (!LegalMask) {
16966 std::swap(SV0, SV1);
16967 ShuffleVectorSDNode::commuteMask(NewMask);
16968 LegalMask = TLI.isShuffleMaskLegal(NewMask, ScaleVT);
16969 }
16970
16971 if (LegalMask) {
16972 SV0 = DAG.getBitcast(ScaleVT, SV0);
16973 SV1 = DAG.getBitcast(ScaleVT, SV1);
16974 return DAG.getBitcast(
16975 VT, DAG.getVectorShuffle(ScaleVT, SDLoc(N), SV0, SV1, NewMask));
16976 }
16977 }
16978 }
16979 }
16980
16981 // Canonicalize shuffles according to rules:
16982 // shuffle(A, shuffle(A, B)) -> shuffle(shuffle(A,B), A)
16983 // shuffle(B, shuffle(A, B)) -> shuffle(shuffle(A,B), B)
16984 // shuffle(B, shuffle(A, Undef)) -> shuffle(shuffle(A, Undef), B)
16985 if (N1.getOpcode() == ISD::VECTOR_SHUFFLE &&
16986 N0.getOpcode() != ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG &&
16987 TLI.isTypeLegal(VT)) {
16988 // The incoming shuffle must be of the same type as the result of the
16989 // current shuffle.
16990 assert(N1->getOperand(0).getValueType() == VT &&(static_cast <bool> (N1->getOperand(0).getValueType(
) == VT && "Shuffle types don't match") ? void (0) : __assert_fail
("N1->getOperand(0).getValueType() == VT && \"Shuffle types don't match\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16991, __extension__ __PRETTY_FUNCTION__))
16991 "Shuffle types don't match")(static_cast <bool> (N1->getOperand(0).getValueType(
) == VT && "Shuffle types don't match") ? void (0) : __assert_fail
("N1->getOperand(0).getValueType() == VT && \"Shuffle types don't match\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16991, __extension__ __PRETTY_FUNCTION__))
;
16992
16993 SDValue SV0 = N1->getOperand(0);
16994 SDValue SV1 = N1->getOperand(1);
16995 bool HasSameOp0 = N0 == SV0;
16996 bool IsSV1Undef = SV1.isUndef();
16997 if (HasSameOp0 || IsSV1Undef || N0 == SV1)
16998 // Commute the operands of this shuffle so that next rule
16999 // will trigger.
17000 return DAG.getCommutedVectorShuffle(*SVN);
17001 }
17002
17003 // Try to fold according to rules:
17004 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, B, M2)
17005 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, C, M2)
17006 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, C, M2)
17007 // Don't try to fold shuffles with illegal type.
17008 // Only fold if this shuffle is the only user of the other shuffle.
17009 if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && N->isOnlyUserOf(N0.getNode()) &&
17010 Level < AfterLegalizeDAG && TLI.isTypeLegal(VT)) {
17011 ShuffleVectorSDNode *OtherSV = cast<ShuffleVectorSDNode>(N0);
17012
17013 // Don't try to fold splats; they're likely to simplify somehow, or they
17014 // might be free.
17015 if (OtherSV->isSplat())
17016 return SDValue();
17017
17018 // The incoming shuffle must be of the same type as the result of the
17019 // current shuffle.
17020 assert(OtherSV->getOperand(0).getValueType() == VT &&(static_cast <bool> (OtherSV->getOperand(0).getValueType
() == VT && "Shuffle types don't match") ? void (0) :
__assert_fail ("OtherSV->getOperand(0).getValueType() == VT && \"Shuffle types don't match\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 17021, __extension__ __PRETTY_FUNCTION__))
17021 "Shuffle types don't match")(static_cast <bool> (OtherSV->getOperand(0).getValueType
() == VT && "Shuffle types don't match") ? void (0) :
__assert_fail ("OtherSV->getOperand(0).getValueType() == VT && \"Shuffle types don't match\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 17021, __extension__ __PRETTY_FUNCTION__))
;
17022
17023 SDValue SV0, SV1;
17024 SmallVector<int, 4> Mask;
17025 // Compute the combined shuffle mask for a shuffle with SV0 as the first
17026 // operand, and SV1 as the second operand.
17027 for (unsigned i = 0; i != NumElts; ++i) {
17028 int Idx = SVN->getMaskElt(i);
17029 if (Idx < 0) {
17030 // Propagate Undef.
17031 Mask.push_back(Idx);
17032 continue;
17033 }
17034
17035 SDValue CurrentVec;
17036 if (Idx < (int)NumElts) {
17037 // This shuffle index refers to the inner shuffle N0. Lookup the inner
17038 // shuffle mask to identify which vector is actually referenced.
17039 Idx = OtherSV->getMaskElt(Idx);
17040 if (Idx < 0) {
17041 // Propagate Undef.
17042 Mask.push_back(Idx);
17043 continue;
17044 }
17045
17046 CurrentVec = (Idx < (int) NumElts) ? OtherSV->getOperand(0)
17047 : OtherSV->getOperand(1);
17048 } else {
17049 // This shuffle index references an element within N1.
17050 CurrentVec = N1;
17051 }
17052
17053 // Simple case where 'CurrentVec' is UNDEF.
17054 if (CurrentVec.isUndef()) {
17055 Mask.push_back(-1);
17056 continue;
17057 }
17058
17059 // Canonicalize the shuffle index. We don't know yet if CurrentVec
17060 // will be the first or second operand of the combined shuffle.
17061 Idx = Idx % NumElts;
17062 if (!SV0.getNode() || SV0 == CurrentVec) {
17063 // Ok. CurrentVec is the left hand side.
17064 // Update the mask accordingly.
17065 SV0 = CurrentVec;
17066 Mask.push_back(Idx);
17067 continue;
17068 }
17069
17070 // Bail out if we cannot convert the shuffle pair into a single shuffle.
17071 if (SV1.getNode() && SV1 != CurrentVec)
17072 return SDValue();
17073
17074 // Ok. CurrentVec is the right hand side.
17075 // Update the mask accordingly.
17076 SV1 = CurrentVec;
17077 Mask.push_back(Idx + NumElts);
17078 }
17079
17080 // Check if all indices in Mask are Undef. In case, propagate Undef.
17081 bool isUndefMask = true;
17082 for (unsigned i = 0; i != NumElts && isUndefMask; ++i)
17083 isUndefMask &= Mask[i] < 0;
17084
17085 if (isUndefMask)
17086 return DAG.getUNDEF(VT);
17087
17088 if (!SV0.getNode())
17089 SV0 = DAG.getUNDEF(VT);
17090 if (!SV1.getNode())
17091 SV1 = DAG.getUNDEF(VT);
17092
17093 // Avoid introducing shuffles with illegal mask.
17094 if (!TLI.isShuffleMaskLegal(Mask, VT)) {
17095 ShuffleVectorSDNode::commuteMask(Mask);
17096
17097 if (!TLI.isShuffleMaskLegal(Mask, VT))
17098 return SDValue();
17099
17100 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, A, M2)
17101 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(C, A, M2)
17102 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(C, B, M2)
17103 std::swap(SV0, SV1);
17104 }
17105
17106 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, B, M2)
17107 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, C, M2)
17108 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, C, M2)
17109 return DAG.getVectorShuffle(VT, SDLoc(N), SV0, SV1, Mask);
17110 }
17111
17112 return SDValue();
17113}
17114
17115SDValue DAGCombiner::visitSCALAR_TO_VECTOR(SDNode *N) {
17116 SDValue InVal = N->getOperand(0);
17117 EVT VT = N->getValueType(0);
17118
17119 // Replace a SCALAR_TO_VECTOR(EXTRACT_VECTOR_ELT(V,C0)) pattern
17120 // with a VECTOR_SHUFFLE and possible truncate.
17121 if (InVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
17122 SDValue InVec = InVal->getOperand(0);
17123 SDValue EltNo = InVal->getOperand(1);
17124 auto InVecT = InVec.getValueType();
17125 if (ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(EltNo)) {
17126 SmallVector<int, 8> NewMask(InVecT.getVectorNumElements(), -1);
17127 int Elt = C0->getZExtValue();
17128 NewMask[0] = Elt;
17129 SDValue Val;
17130 // If we have an implict truncate do truncate here as long as it's legal.
17131 // if it's not legal, this should
17132 if (VT.getScalarType() != InVal.getValueType() &&
17133 InVal.getValueType().isScalarInteger() &&
17134 isTypeLegal(VT.getScalarType())) {
17135 Val =
17136 DAG.getNode(ISD::TRUNCATE, SDLoc(InVal), VT.getScalarType(), InVal);
17137 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Val);
17138 }
17139 if (VT.getScalarType() == InVecT.getScalarType() &&
17140 VT.getVectorNumElements() <= InVecT.getVectorNumElements() &&
17141 TLI.isShuffleMaskLegal(NewMask, VT)) {
17142 Val = DAG.getVectorShuffle(InVecT, SDLoc(N), InVec,
17143 DAG.getUNDEF(InVecT), NewMask);
17144 // If the initial vector is the correct size this shuffle is a
17145 // valid result.
17146 if (VT == InVecT)
17147 return Val;
17148 // If not we must truncate the vector.
17149 if (VT.getVectorNumElements() != InVecT.getVectorNumElements()) {
17150 MVT IdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
17151 SDValue ZeroIdx = DAG.getConstant(0, SDLoc(N), IdxTy);
17152 EVT SubVT =
17153 EVT::getVectorVT(*DAG.getContext(), InVecT.getVectorElementType(),
17154 VT.getVectorNumElements());
17155 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), SubVT, Val,
17156 ZeroIdx);
17157 return Val;
17158 }
17159 }
17160 }
17161 }
17162
17163 return SDValue();
17164}
17165
17166SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
17167 EVT VT = N->getValueType(0);
17168 SDValue N0 = N->getOperand(0);
17169 SDValue N1 = N->getOperand(1);
17170 SDValue N2 = N->getOperand(2);
17171
17172 // If inserting an UNDEF, just return the original vector.
17173 if (N1.isUndef())
17174 return N0;
17175
17176 // For nested INSERT_SUBVECTORs, attempt to combine inner node first to allow
17177 // us to pull BITCASTs from input to output.
17178 if (N0.hasOneUse() && N0->getOpcode() == ISD::INSERT_SUBVECTOR)
17179 if (SDValue NN0 = visitINSERT_SUBVECTOR(N0.getNode()))
17180 return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT, NN0, N1, N2);
17181
17182 // If this is an insert of an extracted vector into an undef vector, we can
17183 // just use the input to the extract.
17184 if (N0.isUndef() && N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
17185 N1.getOperand(1) == N2 && N1.getOperand(0).getValueType() == VT)
17186 return N1.getOperand(0);
17187
17188 // If we are inserting a bitcast value into an undef, with the same
17189 // number of elements, just use the bitcast input of the extract.
17190 // i.e. INSERT_SUBVECTOR UNDEF (BITCAST N1) N2 ->
17191 // BITCAST (INSERT_SUBVECTOR UNDEF N1 N2)
17192 if (N0.isUndef() && N1.getOpcode() == ISD::BITCAST &&
17193 N1.getOperand(0).getOpcode() == ISD::EXTRACT_SUBVECTOR &&
17194 N1.getOperand(0).getOperand(1) == N2 &&
17195 N1.getOperand(0).getOperand(0).getValueType().getVectorNumElements() ==
17196 VT.getVectorNumElements() &&
17197 N1.getOperand(0).getOperand(0).getValueType().getSizeInBits() ==
17198 VT.getSizeInBits()) {
17199 return DAG.getBitcast(VT, N1.getOperand(0).getOperand(0));
17200 }
17201
17202 // If both N1 and N2 are bitcast values on which insert_subvector
17203 // would makes sense, pull the bitcast through.
17204 // i.e. INSERT_SUBVECTOR (BITCAST N0) (BITCAST N1) N2 ->
17205 // BITCAST (INSERT_SUBVECTOR N0 N1 N2)
17206 if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST) {
17207 SDValue CN0 = N0.getOperand(0);
17208 SDValue CN1 = N1.getOperand(0);
17209 EVT CN0VT = CN0.getValueType();
17210 EVT CN1VT = CN1.getValueType();
17211 if (CN0VT.isVector() && CN1VT.isVector() &&
17212 CN0VT.getVectorElementType() == CN1VT.getVectorElementType() &&
17213 CN0VT.getVectorNumElements() == VT.getVectorNumElements()) {
17214 SDValue NewINSERT = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N),
17215 CN0.getValueType(), CN0, CN1, N2);
17216 return DAG.getBitcast(VT, NewINSERT);
17217 }
17218 }
17219
17220 // Combine INSERT_SUBVECTORs where we are inserting to the same index.
17221 // INSERT_SUBVECTOR( INSERT_SUBVECTOR( Vec, SubOld, Idx ), SubNew, Idx )
17222 // --> INSERT_SUBVECTOR( Vec, SubNew, Idx )
17223 if (N0.getOpcode() == ISD::INSERT_SUBVECTOR &&
17224 N0.getOperand(1).getValueType() == N1.getValueType() &&
17225 N0.getOperand(2) == N2)
17226 return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT, N0.getOperand(0),
17227 N1, N2);
17228
17229 if (!isa<ConstantSDNode>(N2))
17230 return SDValue();
17231
17232 unsigned InsIdx = cast<ConstantSDNode>(N2)->getZExtValue();
17233
17234 // Canonicalize insert_subvector dag nodes.
17235 // Example:
17236 // (insert_subvector (insert_subvector A, Idx0), Idx1)
17237 // -> (insert_subvector (insert_subvector A, Idx1), Idx0)
17238 if (N0.getOpcode() == ISD::INSERT_SUBVECTOR && N0.hasOneUse() &&
17239 N1.getValueType() == N0.getOperand(1).getValueType() &&
17240 isa<ConstantSDNode>(N0.getOperand(2))) {
17241 unsigned OtherIdx = N0.getConstantOperandVal(2);
17242 if (InsIdx < OtherIdx) {
17243 // Swap nodes.
17244 SDValue NewOp = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT,
17245 N0.getOperand(0), N1, N2);
17246 AddToWorklist(NewOp.getNode());
17247 return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N0.getNode()),
17248 VT, NewOp, N0.getOperand(1), N0.getOperand(2));
17249 }
17250 }
17251
17252 // If the input vector is a concatenation, and the insert replaces
17253 // one of the pieces, we can optimize into a single concat_vectors.
17254 if (N0.getOpcode() == ISD::CONCAT_VECTORS && N0.hasOneUse() &&
17255 N0.getOperand(0).getValueType() == N1.getValueType()) {
17256 unsigned Factor = N1.getValueType().getVectorNumElements();
17257
17258 SmallVector<SDValue, 8> Ops(N0->op_begin(), N0->op_end());
17259 Ops[cast<ConstantSDNode>(N2)->getZExtValue() / Factor] = N1;
17260
17261 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Ops);
17262 }
17263
17264 return SDValue();
17265}
17266
17267SDValue DAGCombiner::visitFP_TO_FP16(SDNode *N) {
17268 SDValue N0 = N->getOperand(0);
17269
17270 // fold (fp_to_fp16 (fp16_to_fp op)) -> op
17271 if (N0->getOpcode() == ISD::FP16_TO_FP)
17272 return N0->getOperand(0);
17273
17274 return SDValue();
17275}
17276
17277SDValue DAGCombiner::visitFP16_TO_FP(SDNode *N) {
17278 SDValue N0 = N->getOperand(0);
17279
17280 // fold fp16_to_fp(op & 0xffff) -> fp16_to_fp(op)
17281 if (N0->getOpcode() == ISD::AND) {
17282 ConstantSDNode *AndConst = getAsNonOpaqueConstant(N0.getOperand(1));
17283 if (AndConst && AndConst->getAPIntValue() == 0xffff) {
17284 return DAG.getNode(ISD::FP16_TO_FP, SDLoc(N), N->getValueType(0),
17285 N0.getOperand(0));
17286 }
17287 }
17288
17289 return SDValue();
17290}
17291
17292/// Returns a vector_shuffle if it able to transform an AND to a vector_shuffle
17293/// with the destination vector and a zero vector.
17294/// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==>
17295/// vector_shuffle V, Zero, <0, 4, 2, 4>
17296SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
17297 assert(N->getOpcode() == ISD::AND && "Unexpected opcode!")(static_cast <bool> (N->getOpcode() == ISD::AND &&
"Unexpected opcode!") ? void (0) : __assert_fail ("N->getOpcode() == ISD::AND && \"Unexpected opcode!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 17297, __extension__ __PRETTY_FUNCTION__))
;
17298
17299 EVT VT = N->getValueType(0);
17300 SDValue LHS = N->getOperand(0);
17301 SDValue RHS = peekThroughBitcast(N->getOperand(1));
17302 SDLoc DL(N);
17303
17304 // Make sure we're not running after operation legalization where it
17305 // may have custom lowered the vector shuffles.
17306 if (LegalOperations)
17307 return SDValue();
17308
17309 if (RHS.getOpcode() != ISD::BUILD_VECTOR)
17310 return SDValue();
17311
17312 EVT RVT = RHS.getValueType();
17313 unsigned NumElts = RHS.getNumOperands();
17314
17315 // Attempt to create a valid clear mask, splitting the mask into
17316 // sub elements and checking to see if each is
17317 // all zeros or all ones - suitable for shuffle masking.
17318 auto BuildClearMask = [&](int Split) {
17319 int NumSubElts = NumElts * Split;
17320 int NumSubBits = RVT.getScalarSizeInBits() / Split;
17321
17322 SmallVector<int, 8> Indices;
17323 for (int i = 0; i != NumSubElts; ++i) {
17324 int EltIdx = i / Split;
17325 int SubIdx = i % Split;
17326 SDValue Elt = RHS.getOperand(EltIdx);
17327 if (Elt.isUndef()) {
17328 Indices.push_back(-1);
17329 continue;
17330 }
17331
17332 APInt Bits;
17333 if (isa<ConstantSDNode>(Elt))
17334 Bits = cast<ConstantSDNode>(Elt)->getAPIntValue();
17335 else if (isa<ConstantFPSDNode>(Elt))
17336 Bits = cast<ConstantFPSDNode>(Elt)->getValueAPF().bitcastToAPInt();
17337 else
17338 return SDValue();
17339
17340 // Extract the sub element from the constant bit mask.
17341 if (DAG.getDataLayout().isBigEndian()) {
17342 Bits.lshrInPlace((Split - SubIdx - 1) * NumSubBits);
17343 } else {
17344 Bits.lshrInPlace(SubIdx * NumSubBits);
17345 }
17346
17347 if (Split > 1)
17348 Bits = Bits.trunc(NumSubBits);
17349
17350 if (Bits.isAllOnesValue())
17351 Indices.push_back(i);
17352 else if (Bits == 0)
17353 Indices.push_back(i + NumSubElts);
17354 else
17355 return SDValue();
17356 }
17357
17358 // Let's see if the target supports this vector_shuffle.
17359 EVT ClearSVT = EVT::getIntegerVT(*DAG.getContext(), NumSubBits);
17360 EVT ClearVT = EVT::getVectorVT(*DAG.getContext(), ClearSVT, NumSubElts);
17361 if (!TLI.isVectorClearMaskLegal(Indices, ClearVT))
17362 return SDValue();
17363
17364 SDValue Zero = DAG.getConstant(0, DL, ClearVT);
17365 return DAG.getBitcast(VT, DAG.getVectorShuffle(ClearVT, DL,
17366 DAG.getBitcast(ClearVT, LHS),
17367 Zero, Indices));
17368 };
17369
17370 // Determine maximum split level (byte level masking).
17371 int MaxSplit = 1;
17372 if (RVT.getScalarSizeInBits() % 8 == 0)
17373 MaxSplit = RVT.getScalarSizeInBits() / 8;
17374
17375 for (int Split = 1; Split <= MaxSplit; ++Split)
17376 if (RVT.getScalarSizeInBits() % Split == 0)
17377 if (SDValue S = BuildClearMask(Split))
17378 return S;
17379
17380 return SDValue();
17381}
17382
17383/// Visit a binary vector operation, like ADD.
17384SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
17385 assert(N->getValueType(0).isVector() &&(static_cast <bool> (N->getValueType(0).isVector() &&
"SimplifyVBinOp only works on vectors!") ? void (0) : __assert_fail
("N->getValueType(0).isVector() && \"SimplifyVBinOp only works on vectors!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 17386, __extension__ __PRETTY_FUNCTION__))
17386 "SimplifyVBinOp only works on vectors!")(static_cast <bool> (N->getValueType(0).isVector() &&
"SimplifyVBinOp only works on vectors!") ? void (0) : __assert_fail
("N->getValueType(0).isVector() && \"SimplifyVBinOp only works on vectors!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 17386, __extension__ __PRETTY_FUNCTION__))
;
17387
17388 SDValue LHS = N->getOperand(0);
17389 SDValue RHS = N->getOperand(1);
17390 SDValue Ops[] = {LHS, RHS};
17391
17392 // See if we can constant fold the vector operation.
17393 if (SDValue Fold = DAG.FoldConstantVectorArithmetic(
17394 N->getOpcode(), SDLoc(LHS), LHS.getValueType(), Ops, N->getFlags()))
17395 return Fold;
17396
17397 // Type legalization might introduce new shuffles in the DAG.
17398 // Fold (VBinOp (shuffle (A, Undef, Mask)), (shuffle (B, Undef, Mask)))
17399 // -> (shuffle (VBinOp (A, B)), Undef, Mask).
17400 if (LegalTypes && isa<ShuffleVectorSDNode>(LHS) &&
17401 isa<ShuffleVectorSDNode>(RHS) && LHS.hasOneUse() && RHS.hasOneUse() &&
17402 LHS.getOperand(1).isUndef() &&
17403 RHS.getOperand(1).isUndef()) {
17404 ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(LHS);
17405 ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(RHS);
17406
17407 if (SVN0->getMask().equals(SVN1->getMask())) {
17408 EVT VT = N->getValueType(0);
17409 SDValue UndefVector = LHS.getOperand(1);
17410 SDValue NewBinOp = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
17411 LHS.getOperand(0), RHS.getOperand(0),
17412 N->getFlags());
17413 AddUsersToWorklist(N);
17414 return DAG.getVectorShuffle(VT, SDLoc(N), NewBinOp, UndefVector,
17415 SVN0->getMask());
17416 }
17417 }
17418
17419 return SDValue();
17420}
17421
17422SDValue DAGCombiner::SimplifySelect(const SDLoc &DL, SDValue N0, SDValue N1,
17423 SDValue N2) {
17424 assert(N0.getOpcode() ==ISD::SETCC && "First argument must be a SetCC node!")(static_cast <bool> (N0.getOpcode() ==ISD::SETCC &&
"First argument must be a SetCC node!") ? void (0) : __assert_fail
("N0.getOpcode() ==ISD::SETCC && \"First argument must be a SetCC node!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 17424, __extension__ __PRETTY_FUNCTION__))
;
17425
17426 SDValue SCC = SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1), N1, N2,
17427 cast<CondCodeSDNode>(N0.getOperand(2))->get());
17428
17429 // If we got a simplified select_cc node back from SimplifySelectCC, then
17430 // break it down into a new SETCC node, and a new SELECT node, and then return
17431 // the SELECT node, since we were called with a SELECT node.
17432 if (SCC.getNode()) {
17433 // Check to see if we got a select_cc back (to turn into setcc/select).
17434 // Otherwise, just return whatever node we got back, like fabs.
17435 if (SCC.getOpcode() == ISD::SELECT_CC) {
17436 SDValue SETCC = DAG.getNode(ISD::SETCC, SDLoc(N0),
17437 N0.getValueType(),
17438 SCC.getOperand(0), SCC.getOperand(1),
17439 SCC.getOperand(4));
17440 AddToWorklist(SETCC.getNode());
17441 return DAG.getSelect(SDLoc(SCC), SCC.getValueType(), SETCC,
17442 SCC.getOperand(2), SCC.getOperand(3));
17443 }
17444
17445 return SCC;
17446 }
17447 return SDValue();
17448}
17449
17450/// Given a SELECT or a SELECT_CC node, where LHS and RHS are the two values
17451/// being selected between, see if we can simplify the select. Callers of this
17452/// should assume that TheSelect is deleted if this returns true. As such, they
17453/// should return the appropriate thing (e.g. the node) back to the top-level of
17454/// the DAG combiner loop to avoid it being looked at.
17455bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
17456 SDValue RHS) {
17457 // fold (select (setcc x, [+-]0.0, *lt), NaN, (fsqrt x))
17458 // The select + setcc is redundant, because fsqrt returns NaN for X < 0.
17459 if (const ConstantFPSDNode *NaN = isConstOrConstSplatFP(LHS)) {
17460 if (NaN->isNaN() && RHS.getOpcode() == ISD::FSQRT) {
17461 // We have: (select (setcc ?, ?, ?), NaN, (fsqrt ?))
17462 SDValue Sqrt = RHS;
17463 ISD::CondCode CC;
17464 SDValue CmpLHS;
17465 const ConstantFPSDNode *Zero = nullptr;
17466
17467 if (TheSelect->getOpcode() == ISD::SELECT_CC) {
17468 CC = cast<CondCodeSDNode>(TheSelect->getOperand(4))->get();
17469 CmpLHS = TheSelect->getOperand(0);
17470 Zero = isConstOrConstSplatFP(TheSelect->getOperand(1));
17471 } else {
17472 // SELECT or VSELECT
17473 SDValue Cmp = TheSelect->getOperand(0);
17474 if (Cmp.getOpcode() == ISD::SETCC) {
17475 CC = cast<CondCodeSDNode>(Cmp.getOperand(2))->get();
17476 CmpLHS = Cmp.getOperand(0);
17477 Zero = isConstOrConstSplatFP(Cmp.getOperand(1));
17478 }
17479 }
17480 if (Zero && Zero->isZero() &&
17481 Sqrt.getOperand(0) == CmpLHS && (CC == ISD::SETOLT ||
17482 CC == ISD::SETULT || CC == ISD::SETLT)) {
17483 // We have: (select (setcc x, [+-]0.0, *lt), NaN, (fsqrt x))
17484 CombineTo(TheSelect, Sqrt);
17485 return true;
17486 }
17487 }
17488 }
17489 // Cannot simplify select with vector condition
17490 if (TheSelect->getOperand(0).getValueType().isVector()) return false;
17491
17492 // If this is a select from two identical things, try to pull the operation
17493 // through the select.
17494 if (LHS.getOpcode() != RHS.getOpcode() ||
17495 !LHS.hasOneUse() || !RHS.hasOneUse())
17496 return false;
17497
17498 // If this is a load and the token chain is identical, replace the select
17499 // of two loads with a load through a select of the address to load from.
17500 // This triggers in things like "select bool X, 10.0, 123.0" after the FP
17501 // constants have been dropped into the constant pool.
17502 if (LHS.getOpcode() == ISD::LOAD) {
17503 LoadSDNode *LLD = cast<LoadSDNode>(LHS);
17504 LoadSDNode *RLD = cast<LoadSDNode>(RHS);
17505
17506 // Token chains must be identical.
17507 if (LHS.getOperand(0) != RHS.getOperand(0) ||
17508 // Do not let this transformation reduce the number of volatile loads.
17509 LLD->isVolatile() || RLD->isVolatile() ||
17510 // FIXME: If either is a pre/post inc/dec load,
17511 // we'd need to split out the address adjustment.
17512 LLD->isIndexed() || RLD->isIndexed() ||
17513 // If this is an EXTLOAD, the VT's must match.
17514 LLD->getMemoryVT() != RLD->getMemoryVT() ||
17515 // If this is an EXTLOAD, the kind of extension must match.
17516 (LLD->getExtensionType() != RLD->getExtensionType() &&
17517 // The only exception is if one of the extensions is anyext.
17518 LLD->getExtensionType() != ISD::EXTLOAD &&
17519 RLD->getExtensionType() != ISD::EXTLOAD) ||
17520 // FIXME: this discards src value information. This is
17521 // over-conservative. It would be beneficial to be able to remember
17522 // both potential memory locations. Since we are discarding
17523 // src value info, don't do the transformation if the memory
17524 // locations are not in the default address space.
17525 LLD->getPointerInfo().getAddrSpace() != 0 ||
17526 RLD->getPointerInfo().getAddrSpace() != 0 ||
17527 !TLI.isOperationLegalOrCustom(TheSelect->getOpcode(),
17528 LLD->getBasePtr().getValueType()))
17529 return false;
17530
17531 // Check that the select condition doesn't reach either load. If so,
17532 // folding this will induce a cycle into the DAG. If not, this is safe to
17533 // xform, so create a select of the addresses.
17534 SDValue Addr;
17535 if (TheSelect->getOpcode() == ISD::SELECT) {
17536 SDNode *CondNode = TheSelect->getOperand(0).getNode();
17537 if ((LLD->hasAnyUseOfValue(1) && LLD->isPredecessorOf(CondNode)) ||
17538 (RLD->hasAnyUseOfValue(1) && RLD->isPredecessorOf(CondNode)))
17539 return false;
17540 // The loads must not depend on one another.
17541 if (LLD->isPredecessorOf(RLD) ||
17542 RLD->isPredecessorOf(LLD))
17543 return false;
17544 Addr = DAG.getSelect(SDLoc(TheSelect),
17545 LLD->getBasePtr().getValueType(),
17546 TheSelect->getOperand(0), LLD->getBasePtr(),
17547 RLD->getBasePtr());
17548 } else { // Otherwise SELECT_CC
17549 SDNode *CondLHS = TheSelect->getOperand(0).getNode();
17550 SDNode *CondRHS = TheSelect->getOperand(1).getNode();
17551
17552 if ((LLD->hasAnyUseOfValue(1) &&
17553 (LLD->isPredecessorOf(CondLHS) || LLD->isPredecessorOf(CondRHS))) ||
17554 (RLD->hasAnyUseOfValue(1) &&
17555 (RLD->isPredecessorOf(CondLHS) || RLD->isPredecessorOf(CondRHS))))
17556 return false;
17557
17558 Addr = DAG.getNode(ISD::SELECT_CC, SDLoc(TheSelect),
17559 LLD->getBasePtr().getValueType(),
17560 TheSelect->getOperand(0),
17561 TheSelect->getOperand(1),
17562 LLD->getBasePtr(), RLD->getBasePtr(),
17563 TheSelect->getOperand(4));
17564 }
17565
17566 SDValue Load;
17567 // It is safe to replace the two loads if they have different alignments,
17568 // but the new load must be the minimum (most restrictive) alignment of the
17569 // inputs.
17570 unsigned Alignment = std::min(LLD->getAlignment(), RLD->getAlignment());
17571 MachineMemOperand::Flags MMOFlags = LLD->getMemOperand()->getFlags();
17572 if (!RLD->isInvariant())
17573 MMOFlags &= ~MachineMemOperand::MOInvariant;
17574 if (!RLD->isDereferenceable())
17575 MMOFlags &= ~MachineMemOperand::MODereferenceable;
17576 if (LLD->getExtensionType() == ISD::NON_EXTLOAD) {
17577 // FIXME: Discards pointer and AA info.
17578 Load = DAG.getLoad(TheSelect->getValueType(0), SDLoc(TheSelect),
17579 LLD->getChain(), Addr, MachinePointerInfo(), Alignment,
17580 MMOFlags);
17581 } else {
17582 // FIXME: Discards pointer and AA info.
17583 Load = DAG.getExtLoad(
17584 LLD->getExtensionType() == ISD::EXTLOAD ? RLD->getExtensionType()
17585 : LLD->getExtensionType(),
17586 SDLoc(TheSelect), TheSelect->getValueType(0), LLD->getChain(), Addr,
17587 MachinePointerInfo(), LLD->getMemoryVT(), Alignment, MMOFlags);
17588 }
17589
17590 // Users of the select now use the result of the load.
17591 CombineTo(TheSelect, Load);
17592
17593 // Users of the old loads now use the new load's chain. We know the
17594 // old-load value is dead now.
17595 CombineTo(LHS.getNode(), Load.getValue(0), Load.getValue(1));
17596 CombineTo(RHS.getNode(), Load.getValue(0), Load.getValue(1));
17597 return true;
17598 }
17599
17600 return false;
17601}
17602
17603/// Try to fold an expression of the form (N0 cond N1) ? N2 : N3 to a shift and
17604/// bitwise 'and'.
17605SDValue DAGCombiner::foldSelectCCToShiftAnd(const SDLoc &DL, SDValue N0,
17606 SDValue N1, SDValue N2, SDValue N3,
17607 ISD::CondCode CC) {
17608 // If this is a select where the false operand is zero and the compare is a
17609 // check of the sign bit, see if we can perform the "gzip trick":
17610 // select_cc setlt X, 0, A, 0 -> and (sra X, size(X)-1), A
17611 // select_cc setgt X, 0, A, 0 -> and (not (sra X, size(X)-1)), A
17612 EVT XType = N0.getValueType();
17613 EVT AType = N2.getValueType();
17614 if (!isNullConstant(N3) || !XType.bitsGE(AType))
17615 return SDValue();
17616
17617 // If the comparison is testing for a positive value, we have to invert
17618 // the sign bit mask, so only do that transform if the target has a bitwise
17619 // 'and not' instruction (the invert is free).
17620 if (CC == ISD::SETGT && TLI.hasAndNot(N2)) {
17621 // (X > -1) ? A : 0
17622 // (X > 0) ? X : 0 <-- This is canonical signed max.
17623 if (!(isAllOnesConstant(N1) || (isNullConstant(N1) && N0 == N2)))
17624 return SDValue();
17625 } else if (CC == ISD::SETLT) {
17626 // (X < 0) ? A : 0
17627 // (X < 1) ? X : 0 <-- This is un-canonicalized signed min.
17628 if (!(isNullConstant(N1) || (isOneConstant(N1) && N0 == N2)))
17629 return SDValue();
17630 } else {
17631 return SDValue();
17632 }
17633
17634 // and (sra X, size(X)-1), A -> "and (srl X, C2), A" iff A is a single-bit
17635 // constant.
17636 EVT ShiftAmtTy = getShiftAmountTy(N0.getValueType());
17637 auto *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
17638 if (N2C && ((N2C->getAPIntValue() & (N2C->getAPIntValue() - 1)) == 0)) {
17639 unsigned ShCt = XType.getSizeInBits() - N2C->getAPIntValue().logBase2() - 1;
17640 SDValue ShiftAmt = DAG.getConstant(ShCt, DL, ShiftAmtTy);
17641 SDValue Shift = DAG.getNode(ISD::SRL, DL, XType, N0, ShiftAmt);
17642 AddToWorklist(Shift.getNode());
17643
17644 if (XType.bitsGT(AType)) {
17645 Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift);
17646 AddToWorklist(Shift.getNode());
17647 }
17648
17649 if (CC == ISD::SETGT)
17650 Shift = DAG.getNOT(DL, Shift, AType);
17651
17652 return DAG.getNode(ISD::AND, DL, AType, Shift, N2);
17653 }
17654
17655 SDValue ShiftAmt = DAG.getConstant(XType.getSizeInBits() - 1, DL, ShiftAmtTy);
17656 SDValue Shift = DAG.getNode(ISD::SRA, DL, XType, N0, ShiftAmt);
17657 AddToWorklist(Shift.getNode());
17658
17659 if (XType.bitsGT(AType)) {
17660 Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift);
17661 AddToWorklist(Shift.getNode());
17662 }
17663
17664 if (CC == ISD::SETGT)
17665 Shift = DAG.getNOT(DL, Shift, AType);
17666
17667 return DAG.getNode(ISD::AND, DL, AType, Shift, N2);
17668}
17669
17670/// Simplify an expression of the form (N0 cond N1) ? N2 : N3
17671/// where 'cond' is the comparison specified by CC.
17672SDValue DAGCombiner::SimplifySelectCC(const SDLoc &DL, SDValue N0, SDValue N1,
17673 SDValue N2, SDValue N3, ISD::CondCode CC,
17674 bool NotExtCompare) {
17675 // (x ? y : y) -> y.
17676 if (N2 == N3) return N2;
17677
17678 EVT VT = N2.getValueType();
17679 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
17680 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
17681
17682 // Determine if the condition we're dealing with is constant
17683 SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()),
17684 N0, N1, CC, DL, false);
17685 if (SCC.getNode()) AddToWorklist(SCC.getNode());
17686
17687 if (ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.getNode())) {
17688 // fold select_cc true, x, y -> x
17689 // fold select_cc false, x, y -> y
17690 return !SCCC->isNullValue() ? N2 : N3;
17691 }
17692
17693 // Turn "(a cond b) ? 1.0f : 2.0f" into "load (tmp + ((a cond b) ? 0 : 4)"
17694 // where "tmp" is a constant pool entry containing an array with 1.0 and 2.0
17695 // in it. This is a win when the constant is not otherwise available because
17696 // it replaces two constant pool loads with one. We only do this if the FP
17697 // type is known to be legal, because if it isn't, then we are before legalize
17698 // types an we want the other legalization to happen first (e.g. to avoid
17699 // messing with soft float) and if the ConstantFP is not legal, because if
17700 // it is legal, we may not need to store the FP constant in a constant pool.
17701 if (ConstantFPSDNode *TV = dyn_cast<ConstantFPSDNode>(N2))
17702 if (ConstantFPSDNode *FV = dyn_cast<ConstantFPSDNode>(N3)) {
17703 if (TLI.isTypeLegal(N2.getValueType()) &&
17704 (TLI.getOperationAction(ISD::ConstantFP, N2.getValueType()) !=
17705 TargetLowering::Legal &&
17706 !TLI.isFPImmLegal(TV->getValueAPF(), TV->getValueType(0)) &&
17707 !TLI.isFPImmLegal(FV->getValueAPF(), FV->getValueType(0))) &&
17708 // If both constants have multiple uses, then we won't need to do an
17709 // extra load, they are likely around in registers for other users.
17710 (TV->hasOneUse() || FV->hasOneUse())) {
17711 Constant *Elts[] = {
17712 const_cast<ConstantFP*>(FV->getConstantFPValue()),
17713 const_cast<ConstantFP*>(TV->getConstantFPValue())
17714 };
17715 Type *FPTy = Elts[0]->getType();
17716 const DataLayout &TD = DAG.getDataLayout();
17717
17718 // Create a ConstantArray of the two constants.
17719 Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts);
17720 SDValue CPIdx =
17721 DAG.getConstantPool(CA, TLI.getPointerTy(DAG.getDataLayout()),
17722 TD.getPrefTypeAlignment(FPTy));
17723 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
17724
17725 // Get the offsets to the 0 and 1 element of the array so that we can
17726 // select between them.
17727 SDValue Zero = DAG.getIntPtrConstant(0, DL);
17728 unsigned EltSize = (unsigned)TD.getTypeAllocSize(Elts[0]->getType());
17729 SDValue One = DAG.getIntPtrConstant(EltSize, SDLoc(FV));
17730
17731 SDValue Cond = DAG.getSetCC(DL,
17732 getSetCCResultType(N0.getValueType()),
17733 N0, N1, CC);
17734 AddToWorklist(Cond.getNode());
17735 SDValue CstOffset = DAG.getSelect(DL, Zero.getValueType(),
17736 Cond, One, Zero);
17737 AddToWorklist(CstOffset.getNode());
17738 CPIdx = DAG.getNode(ISD::ADD, DL, CPIdx.getValueType(), CPIdx,
17739 CstOffset);
17740 AddToWorklist(CPIdx.getNode());
17741 return DAG.getLoad(
17742 TV->getValueType(0), DL, DAG.getEntryNode(), CPIdx,
17743 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
17744 Alignment);
17745 }
17746 }
17747
17748 if (SDValue V = foldSelectCCToShiftAnd(DL, N0, N1, N2, N3, CC))
17749 return V;
17750
17751 // fold (select_cc seteq (and x, y), 0, 0, A) -> (and (shr (shl x)) A)
17752 // where y is has a single bit set.
17753 // A plaintext description would be, we can turn the SELECT_CC into an AND
17754 // when the condition can be materialized as an all-ones register. Any
17755 // single bit-test can be materialized as an all-ones register with
17756 // shift-left and shift-right-arith.
17757 if (CC == ISD::SETEQ && N0->getOpcode() == ISD::AND &&
17758 N0->getValueType(0) == VT && isNullConstant(N1) && isNullConstant(N2)) {
17759 SDValue AndLHS = N0->getOperand(0);
17760 ConstantSDNode *ConstAndRHS = dyn_cast<ConstantSDNode>(N0->getOperand(1));
17761 if (ConstAndRHS && ConstAndRHS->getAPIntValue().countPopulation() == 1) {
17762 // Shift the tested bit over the sign bit.
17763 const APInt &AndMask = ConstAndRHS->getAPIntValue();
17764 SDValue ShlAmt =
17765 DAG.getConstant(AndMask.countLeadingZeros(), SDLoc(AndLHS),
17766 getShiftAmountTy(AndLHS.getValueType()));
17767 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(N0), VT, AndLHS, ShlAmt);
17768
17769 // Now arithmetic right shift it all the way over, so the result is either
17770 // all-ones, or zero.
17771 SDValue ShrAmt =
17772 DAG.getConstant(AndMask.getBitWidth() - 1, SDLoc(Shl),
17773 getShiftAmountTy(Shl.getValueType()));
17774 SDValue Shr = DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, ShrAmt);
17775
17776 return DAG.getNode(ISD::AND, DL, VT, Shr, N3);
17777 }
17778 }
17779
17780 // fold select C, 16, 0 -> shl C, 4
17781 if (N2C && isNullConstant(N3) && N2C->getAPIntValue().isPowerOf2() &&
17782 TLI.getBooleanContents(N0.getValueType()) ==
17783 TargetLowering::ZeroOrOneBooleanContent) {
17784
17785 // If the caller doesn't want us to simplify this into a zext of a compare,
17786 // don't do it.
17787 if (NotExtCompare && N2C->isOne())
17788 return SDValue();
17789
17790 // Get a SetCC of the condition
17791 // NOTE: Don't create a SETCC if it's not legal on this target.
17792 if (!LegalOperations ||
17793 TLI.isOperationLegal(ISD::SETCC, N0.getValueType())) {
17794 SDValue Temp, SCC;
17795 // cast from setcc result type to select result type
17796 if (LegalTypes) {
17797 SCC = DAG.getSetCC(DL, getSetCCResultType(N0.getValueType()),
17798 N0, N1, CC);
17799 if (N2.getValueType().bitsLT(SCC.getValueType()))
17800 Temp = DAG.getZeroExtendInReg(SCC, SDLoc(N2),
17801 N2.getValueType());
17802 else
17803 Temp = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N2),
17804 N2.getValueType(), SCC);
17805 } else {
17806 SCC = DAG.getSetCC(SDLoc(N0), MVT::i1, N0, N1, CC);
17807 Temp = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N2),
17808 N2.getValueType(), SCC);
17809 }
17810
17811 AddToWorklist(SCC.getNode());
17812 AddToWorklist(Temp.getNode());
17813
17814 if (N2C->isOne())
17815 return Temp;
17816
17817 // shl setcc result by log2 n2c
17818 return DAG.getNode(
17819 ISD::SHL, DL, N2.getValueType(), Temp,
17820 DAG.getConstant(N2C->getAPIntValue().logBase2(), SDLoc(Temp),
17821 getShiftAmountTy(Temp.getValueType())));
17822 }
17823 }
17824
17825 // Check to see if this is an integer abs.
17826 // select_cc setg[te] X, 0, X, -X ->
17827 // select_cc setgt X, -1, X, -X ->
17828 // select_cc setl[te] X, 0, -X, X ->
17829 // select_cc setlt X, 1, -X, X ->
17830 // Y = sra (X, size(X)-1); xor (add (X, Y), Y)
17831 if (N1C) {
17832 ConstantSDNode *SubC = nullptr;
17833 if (((N1C->isNullValue() && (CC == ISD::SETGT || CC == ISD::SETGE)) ||
17834 (N1C->isAllOnesValue() && CC == ISD::SETGT)) &&
17835 N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1))
17836 SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0));
17837 else if (((N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE)) ||
17838 (N1C->isOne() && CC == ISD::SETLT)) &&
17839 N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1))
17840 SubC = dyn_cast<ConstantSDNode>(N2.getOperand(0));
17841
17842 EVT XType = N0.getValueType();
17843 if (SubC && SubC->isNullValue() && XType.isInteger()) {
17844 SDLoc DL(N0);
17845 SDValue Shift = DAG.getNode(ISD::SRA, DL, XType,
17846 N0,
17847 DAG.getConstant(XType.getSizeInBits() - 1, DL,
17848 getShiftAmountTy(N0.getValueType())));
17849 SDValue Add = DAG.getNode(ISD::ADD, DL,
17850 XType, N0, Shift);
17851 AddToWorklist(Shift.getNode());
17852 AddToWorklist(Add.getNode());
17853 return DAG.getNode(ISD::XOR, DL, XType, Add, Shift);
17854 }
17855 }
17856
17857 // select_cc seteq X, 0, sizeof(X), ctlz(X) -> ctlz(X)
17858 // select_cc seteq X, 0, sizeof(X), ctlz_zero_undef(X) -> ctlz(X)
17859 // select_cc seteq X, 0, sizeof(X), cttz(X) -> cttz(X)
17860 // select_cc seteq X, 0, sizeof(X), cttz_zero_undef(X) -> cttz(X)
17861 // select_cc setne X, 0, ctlz(X), sizeof(X) -> ctlz(X)
17862 // select_cc setne X, 0, ctlz_zero_undef(X), sizeof(X) -> ctlz(X)
17863 // select_cc setne X, 0, cttz(X), sizeof(X) -> cttz(X)
17864 // select_cc setne X, 0, cttz_zero_undef(X), sizeof(X) -> cttz(X)
17865 if (N1C && N1C->isNullValue() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
17866 SDValue ValueOnZero = N2;
17867 SDValue Count = N3;
17868 // If the condition is NE instead of E, swap the operands.
17869 if (CC == ISD::SETNE)
17870 std::swap(ValueOnZero, Count);
17871 // Check if the value on zero is a constant equal to the bits in the type.
17872 if (auto *ValueOnZeroC = dyn_cast<ConstantSDNode>(ValueOnZero)) {
17873 if (ValueOnZeroC->getAPIntValue() == VT.getSizeInBits()) {
17874 // If the other operand is cttz/cttz_zero_undef of N0, and cttz is
17875 // legal, combine to just cttz.
17876 if ((Count.getOpcode() == ISD::CTTZ ||
17877 Count.getOpcode() == ISD::CTTZ_ZERO_UNDEF) &&
17878 N0 == Count.getOperand(0) &&
17879 (!LegalOperations || TLI.isOperationLegal(ISD::CTTZ, VT)))
17880 return DAG.getNode(ISD::CTTZ, DL, VT, N0);
17881 // If the other operand is ctlz/ctlz_zero_undef of N0, and ctlz is
17882 // legal, combine to just ctlz.
17883 if ((Count.getOpcode() == ISD::CTLZ ||
17884 Count.getOpcode() == ISD::CTLZ_ZERO_UNDEF) &&
17885 N0 == Count.getOperand(0) &&
17886 (!LegalOperations || TLI.isOperationLegal(ISD::CTLZ, VT)))
17887 return DAG.getNode(ISD::CTLZ, DL, VT, N0);
17888 }
17889 }
17890 }
17891
17892 return SDValue();
17893}
17894
17895/// This is a stub for TargetLowering::SimplifySetCC.
17896SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
17897 ISD::CondCode Cond, const SDLoc &DL,
17898 bool foldBooleans) {
17899 TargetLowering::DAGCombinerInfo
17900 DagCombineInfo(DAG, Level, false, this);
17901 return TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo, DL);
17902}
17903
17904/// Given an ISD::SDIV node expressing a divide by constant, return
17905/// a DAG expression to select that will generate the same value by multiplying
17906/// by a magic number.
17907/// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
17908SDValue DAGCombiner::BuildSDIV(SDNode *N) {
17909 // when optimising for minimum size, we don't want to expand a div to a mul
17910 // and a shift.
17911 if (DAG.getMachineFunction().getFunction().optForMinSize())
17912 return SDValue();
17913
17914 ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1));
17915 if (!C)
17916 return SDValue();
17917
17918 // Avoid division by zero.
17919 if (C->isNullValue())
17920 return SDValue();
17921
17922 std::vector<SDNode *> Built;
17923 SDValue S =
17924 TLI.BuildSDIV(N, C->getAPIntValue(), DAG, LegalOperations, Built);
17925
17926 for (SDNode *N : Built)
17927 AddToWorklist(N);
17928 return S;
17929}
17930
17931/// Given an ISD::SDIV node expressing a divide by constant power of 2, return a
17932/// DAG expression that will generate the same value by right shifting.
17933SDValue DAGCombiner::BuildSDIVPow2(SDNode *N) {
17934 ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1));
17935 if (!C)
17936 return SDValue();
17937
17938 // Avoid division by zero.
17939 if (C->isNullValue())
17940 return SDValue();
17941
17942 std::vector<SDNode *> Built;
17943 SDValue S = TLI.BuildSDIVPow2(N, C->getAPIntValue(), DAG, &Built);
17944
17945 for (SDNode *N : Built)
17946 AddToWorklist(N);
17947 return S;
17948}
17949
17950/// Given an ISD::UDIV node expressing a divide by constant, return a DAG
17951/// expression that will generate the same value by multiplying by a magic
17952/// number.
17953/// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
17954SDValue DAGCombiner::BuildUDIV(SDNode *N) {
17955 // when optimising for minimum size, we don't want to expand a div to a mul
17956 // and a shift.
17957 if (DAG.getMachineFunction().getFunction().optForMinSize())
17958 return SDValue();
17959
17960 ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1));
17961 if (!C)
17962 return SDValue();
17963
17964 // Avoid division by zero.
17965 if (C->isNullValue())
17966 return SDValue();
17967
17968 std::vector<SDNode *> Built;
17969 SDValue S =
17970 TLI.BuildUDIV(N, C->getAPIntValue(), DAG, LegalOperations, Built);
17971
17972 for (SDNode *N : Built)
17973 AddToWorklist(N);
17974 return S;
17975}
17976
17977/// Determines the LogBase2 value for a non-null input value using the
17978/// transform: LogBase2(V) = (EltBits - 1) - ctlz(V).
17979SDValue DAGCombiner::BuildLogBase2(SDValue V, const SDLoc &DL) {
17980 EVT VT = V.getValueType();
17981 unsigned EltBits = VT.getScalarSizeInBits();
17982 SDValue Ctlz = DAG.getNode(ISD::CTLZ, DL, VT, V);
17983 SDValue Base = DAG.getConstant(EltBits - 1, DL, VT);
17984 SDValue LogBase2 = DAG.getNode(ISD::SUB, DL, VT, Base, Ctlz);
17985 return LogBase2;
17986}
17987
17988/// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i)
17989/// For the reciprocal, we need to find the zero of the function:
17990/// F(X) = A X - 1 [which has a zero at X = 1/A]
17991/// =>
17992/// X_{i+1} = X_i (2 - A X_i) = X_i + X_i (1 - A X_i) [this second form
17993/// does not require additional intermediate precision]
17994SDValue DAGCombiner::BuildReciprocalEstimate(SDValue Op, SDNodeFlags Flags) {
17995 if (Level >= AfterLegalizeDAG)
17996 return SDValue();
17997
17998 // TODO: Handle half and/or extended types?
17999 EVT VT = Op.getValueType();
18000 if (VT.getScalarType() != MVT::f32 && VT.getScalarType() != MVT::f64)
18001 return SDValue();
18002
18003 // If estimates are explicitly disabled for this function, we're done.
18004 MachineFunction &MF = DAG.getMachineFunction();
18005 int Enabled = TLI.getRecipEstimateDivEnabled(VT, MF);
18006 if (Enabled == TLI.ReciprocalEstimate::Disabled)
18007 return SDValue();
18008
18009 // Estimates may be explicitly enabled for this type with a custom number of
18010 // refinement steps.
18011 int Iterations = TLI.getDivRefinementSteps(VT, MF);
18012 if (SDValue Est = TLI.getRecipEstimate(Op, DAG, Enabled, Iterations)) {
18013 AddToWorklist(Est.getNode());
18014
18015 if (Iterations) {
18016 EVT VT = Op.getValueType();
18017 SDLoc DL(Op);
18018 SDValue FPOne = DAG.getConstantFP(1.0, DL, VT);
18019
18020 // Newton iterations: Est = Est + Est (1 - Arg * Est)
18021 for (int i = 0; i < Iterations; ++i) {
18022 SDValue NewEst = DAG.getNode(ISD::FMUL, DL, VT, Op, Est, Flags);
18023 AddToWorklist(NewEst.getNode());
18024
18025 NewEst = DAG.getNode(ISD::FSUB, DL, VT, FPOne, NewEst, Flags);
18026 AddToWorklist(NewEst.getNode());
18027
18028 NewEst = DAG.getNode(ISD::FMUL, DL, VT, Est, NewEst, Flags);
18029 AddToWorklist(NewEst.getNode());
18030
18031 Est = DAG.getNode(ISD::FADD, DL, VT, Est, NewEst, Flags);
18032 AddToWorklist(Est.getNode());
18033 }
18034 }
18035 return Est;
18036 }
18037
18038 return SDValue();
18039}
18040
18041/// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i)
18042/// For the reciprocal sqrt, we need to find the zero of the function:
18043/// F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)]
18044/// =>
18045/// X_{i+1} = X_i (1.5 - A X_i^2 / 2)
18046/// As a result, we precompute A/2 prior to the iteration loop.
18047SDValue DAGCombiner::buildSqrtNROneConst(SDValue Arg, SDValue Est,
18048 unsigned Iterations,
18049 SDNodeFlags Flags, bool Reciprocal) {
18050 EVT VT = Arg.getValueType();
18051 SDLoc DL(Arg);
18052 SDValue ThreeHalves = DAG.getConstantFP(1.5, DL, VT);
18053
18054 // We now need 0.5 * Arg which we can write as (1.5 * Arg - Arg) so that
18055 // this entire sequence requires only one FP constant.
18056 SDValue HalfArg = DAG.getNode(ISD::FMUL, DL, VT, ThreeHalves, Arg, Flags);
18057 AddToWorklist(HalfArg.getNode());
18058
18059 HalfArg = DAG.getNode(ISD::FSUB, DL, VT, HalfArg, Arg, Flags);
18060 AddToWorklist(HalfArg.getNode());
18061
18062 // Newton iterations: Est = Est * (1.5 - HalfArg * Est * Est)
18063 for (unsigned i = 0; i < Iterations; ++i) {
18064 SDValue NewEst = DAG.getNode(ISD::FMUL, DL, VT, Est, Est, Flags);
18065 AddToWorklist(NewEst.getNode());
18066
18067 NewEst = DAG.getNode(ISD::FMUL, DL, VT, HalfArg, NewEst, Flags);
18068 AddToWorklist(NewEst.getNode());
18069
18070 NewEst = DAG.getNode(ISD::FSUB, DL, VT, ThreeHalves, NewEst, Flags);
18071 AddToWorklist(NewEst.getNode());
18072
18073 Est = DAG.getNode(ISD::FMUL, DL, VT, Est, NewEst, Flags);
18074 AddToWorklist(Est.getNode());
18075 }
18076
18077 // If non-reciprocal square root is requested, multiply the result by Arg.
18078 if (!Reciprocal) {
18079 Est = DAG.getNode(ISD::FMUL, DL, VT, Est, Arg, Flags);
18080 AddToWorklist(Est.getNode());
18081 }
18082
18083 return Est;
18084}
18085
18086/// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i)
18087/// For the reciprocal sqrt, we need to find the zero of the function:
18088/// F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)]
18089/// =>
18090/// X_{i+1} = (-0.5 * X_i) * (A * X_i * X_i + (-3.0))
18091SDValue DAGCombiner::buildSqrtNRTwoConst(SDValue Arg, SDValue Est,
18092 unsigned Iterations,
18093 SDNodeFlags Flags, bool Reciprocal) {
18094 EVT VT = Arg.getValueType();
18095 SDLoc DL(Arg);
18096 SDValue MinusThree = DAG.getConstantFP(-3.0, DL, VT);
18097 SDValue MinusHalf = DAG.getConstantFP(-0.5, DL, VT);
18098
18099 // This routine must enter the loop below to work correctly
18100 // when (Reciprocal == false).
18101 assert(Iterations > 0)(static_cast <bool> (Iterations > 0) ? void (0) : __assert_fail
("Iterations > 0", "/build/llvm-toolchain-snapshot-7~svn338205/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18101, __extension__ __PRETTY_FUNCTION__))
;
18102
18103 // Newton iterations for reciprocal square root:
18104 // E = (E * -0.5) * ((A * E) * E + -3.0)
18105 for (unsigned i = 0; i < Iterations; ++i) {
18106 SDValue AE = DAG.getNode(ISD::FMUL, DL, VT, Arg, Est, Flags);
18107 AddToWorklist(AE.getNode());
18108
18109 SDValue AEE = DAG.getNode(ISD::FMUL, DL, VT, AE, Est, Flags);
18110 AddToWorklist(AEE.getNode());
18111
18112 SDValue RHS = DAG.getNode(ISD::FADD, DL, VT, AEE, MinusThree, Flags);
18113 AddToWorklist(RHS.getNode());
18114
18115 // When calculating a square root at the last iteration build:
18116 // S = ((A * E) * -0.5) * ((A * E) * E + -3.0)
18117 // (notice a common subexpression)
18118 SDValue LHS;
18119 if (Reciprocal || (i + 1) < Iterations) {
18120 // RSQRT: LHS = (E * -0.5)
18121 LHS = DAG.getNode(ISD::FMUL, DL, VT, Est, MinusHalf, Flags);
18122 } else {
18123 // SQRT: LHS = (A * E) * -0.5
18124 LHS = DAG.getNode(ISD::FMUL, DL, VT, AE, MinusHalf, Flags);
18125 }
18126 AddToWorklist(LHS.getNode());
18127
18128 Est = DAG.getNode(ISD::FMUL, DL, VT, LHS, RHS, Flags);
18129 AddToWorklist(Est.getNode());
18130 }
18131
18132 return Est;
18133}
18134
18135/// Build code to calculate either rsqrt(Op) or sqrt(Op). In the latter case
18136/// Op*rsqrt(Op) is actually computed, so additional postprocessing is needed if
18137/// Op can be zero.
18138SDValue DAGCombiner::buildSqrtEstimateImpl(SDValue Op, SDNodeFlags Flags,
18139 bool Reciprocal) {
18140 if (Level >= AfterLegalizeDAG)
18141 return SDValue();
18142
18143 // TODO: Handle half and/or extended types?
18144 EVT VT = Op.getValueType();
18145 if (VT.getScalarType() != MVT::f32 && VT.getScalarType() != MVT::f64)
18146 return SDValue();
18147
18148 // If estimates are explicitly disabled for this function, we're done.
18149 MachineFunction &MF = DAG.getMachineFunction();
18150 int Enabled = TLI.getRecipEstimateSqrtEnabled(VT, MF);
18151 if (Enabled == TLI.ReciprocalEstimate::Disabled)
18152 return SDValue();
18153
18154 // Estimates may be explicitly enabled for this type with a custom number of
18155 // refinement steps.
18156 int Iterations = TLI.getSqrtRefinementSteps(VT, MF);
18157
18158 bool UseOneConstNR = false;
18159 if (SDValue Est =
18160 TLI.getSqrtEstimate(Op, DAG, Enabled, Iterations, UseOneConstNR,
18161 Reciprocal)) {
18162 AddToWorklist(Est.getNode());
18163
18164 if (Iterations) {
18165 Est = UseOneConstNR
18166 ? buildSqrtNROneConst(Op, Est, Iterations, Flags, Reciprocal)
18167 : buildSqrtNRTwoConst(Op, Est, Iterations, Flags, Reciprocal);
18168
18169 if (!Reciprocal) {
18170 // The estimate is now completely wrong if the input was exactly 0.0 or
18171 // possibly a denormal. Force the answer to 0.0 for those cases.
18172 EVT VT = Op.getValueType();
18173 SDLoc DL(Op);
18174 EVT CCVT = getSetCCResultType(VT);
18175 ISD::NodeType SelOpcode = VT.isVector() ? ISD::VSELECT : ISD::SELECT;
18176 const Function &F = DAG.getMachineFunction().getFunction();
18177 Attribute Denorms = F.getFnAttribute("denormal-fp-math");
18178 if (Denorms.getValueAsString().equals("ieee")) {
18179 // fabs(X) < SmallestNormal ? 0.0 : Est
18180 const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
18181 APFloat SmallestNorm = APFloat::getSmallestNormalized(FltSem);
18182 SDValue NormC = DAG.getConstantFP(SmallestNorm, DL, VT);
18183 SDValue FPZero = DAG.getConstantFP(0.0, DL, VT);
18184 SDValue Fabs = DAG.getNode(ISD::FABS, DL, VT, Op);
18185 SDValue IsDenorm = DAG.getSetCC(DL, CCVT, Fabs, NormC, ISD::SETLT);
18186 Est = DAG.getNode(SelOpcode, DL, VT, IsDenorm, FPZero, Est);
18187 AddToWorklist(Fabs.getNode());
18188 AddToWorklist(IsDenorm.getNode());
18189 AddToWorklist(Est.getNode());
18190 } else {
18191 // X == 0.0 ? 0.0 : Est
18192 SDValue FPZero = DAG.getConstantFP(0.0, DL, VT);
18193 SDValue IsZero = DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ);
18194 Est = DAG.getNode(SelOpcode, DL, VT, IsZero, FPZero, Est);
18195 AddToWorklist(IsZero.getNode());
18196 AddToWorklist(Est.getNode());
18197 }
18198 }
18199 }
18200 return Est;
18201 }
18202
18203 return SDValue();
18204}
18205
18206SDValue DAGCombiner::buildRsqrtEstimate(SDValue Op, SDNodeFlags Flags) {
18207 return buildSqrtEstimateImpl(Op, Flags, true);
18208}
18209
18210SDValue DAGCombiner::buildSqrtEstimate(SDValue Op, SDNodeFlags Flags) {
18211 return buildSqrtEstimateImpl(Op, Flags, false);
18212}
18213
18214/// Return true if there is any possibility that the two addresses overlap.
18215bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const {
18216 // If they are the same then they must be aliases.
18217 if (Op0->getBasePtr() == Op1->getBasePtr()) return true;
18218
18219 // If they are both volatile then they cannot be reordered.
18220 if (Op0->isVolatile() && Op1->isVolatile()) return true;
18221
18222 // If one operation reads from invariant memory, and the other may store, they
18223 // cannot alias. These should really be checking the equivalent of mayWrite,
18224 // but it only matters for memory nodes other than load /store.
18225 if (Op0->isInvariant() && Op1->writeMem())
18226 return false;
18227
18228 if (Op1->isInvariant() && Op0->writeMem())
18229 return false;
18230
18231 unsigned NumBytes0 = Op0->getMemoryVT().getStoreSize();
18232 unsigned NumBytes1 = Op1->getMemoryVT().getStoreSize();
18233
18234 // Check for BaseIndexOffset matching.
18235 BaseIndexOffset BasePtr0 = BaseIndexOffset::match(Op0, DAG);
18236 BaseIndexOffset BasePtr1 = BaseIndexOffset::match(Op1, DAG);
18237 int64_t PtrDiff;
18238 if (BasePtr0.getBase().getNode() && BasePtr1.getBase().getNode()) {
18239 if (BasePtr0.equalBaseIndex(BasePtr1, DAG, PtrDiff))
18240 return !((NumBytes0 <= PtrDiff) || (PtrDiff + NumBytes1 <= 0));
18241
18242 // If both BasePtr0 and BasePtr1 are FrameIndexes, we will not be
18243 // able to calculate their relative offset if at least one arises
18244 // from an alloca. However, these allocas cannot overlap and we
18245 // can infer there is no alias.
18246 if (auto *A = dyn_cast<FrameIndexSDNode>(BasePtr0.getBase()))
18247 if (auto *B = dyn_cast<FrameIndexSDNode>(BasePtr1.getBase())) {
18248 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
18249 // If the base are the same frame index but the we couldn't find a
18250 // constant offset, (indices are different) be conservative.
18251 if (A != B && (!MFI.isFixedObjectIndex(A->getIndex()) ||
18252 !MFI.isFixedObjectIndex(B->getIndex())))
18253 return false;
18254 }
18255
18256 bool IsFI0 = isa<FrameIndexSDNode>(BasePtr0.getBase());
18257 bool IsFI1 = isa<FrameIndexSDNode>(BasePtr1.getBase());
18258 bool IsGV0 = isa<GlobalAddressSDNode>(BasePtr0.getBase());
18259 bool IsGV1 = isa<GlobalAddressSDNode>(BasePtr1.getBase());
18260 bool IsCV0 = isa<ConstantPoolSDNode>(BasePtr0.getBase());
18261 bool IsCV1 = isa<ConstantPoolSDNode>(BasePtr1.getBase());
18262
18263 // If of mismatched base types or checkable indices we can check
18264 // they do not alias.
18265 if ((BasePtr0.getIndex() == BasePtr1.getIndex() || (IsFI0 != IsFI1) ||
18266 (IsGV0 != IsGV1) || (IsCV0 != IsCV1)) &&
18267 (IsFI0 || IsGV0 || IsCV0) && (IsFI1 || IsGV1 || IsCV1))
18268 return false;
18269 }
18270
18271 // If we know required SrcValue1 and SrcValue2 have relatively large
18272 // alignment compared to the size and offset of the access, we may be able
18273 // to prove they do not alias. This check is conservative for now to catch
18274 // cases created by splitting vector types.
18275 int64_t SrcValOffset0 = Op0->getSrcValueOffset();
18276 int64_t SrcValOffset1 = Op1->getSrcValueOffset();
18277 unsigned OrigAlignment0 = Op0->getOriginalAlignment();
18278 unsigned OrigAlignment1 = Op1->getOriginalAlignment();
18279 if (OrigAlignment0 == OrigAlignment1 && SrcValOffset0 != SrcValOffset1 &&
18280 NumBytes0 == NumBytes1 && OrigAlignment0 > NumBytes0) {
18281 int64_t OffAlign0 = SrcValOffset0 % OrigAlignment0;
18282 int64_t OffAlign1 = SrcValOffset1 % OrigAlignment1;
18283
18284 // There is no overlap between these relatively aligned accesses of
18285 // similar size. Return no alias.
18286 if ((OffAlign0 + NumBytes0) <= OffAlign1 ||
18287 (OffAlign1 + NumBytes1) <= OffAlign0)
18288 return false;
18289 }
18290
18291 bool UseAA = CombinerGlobalAA.getNumOccurrences() > 0
18292 ? CombinerGlobalAA
18293 : DAG.getSubtarget().useAA();
18294#ifndef NDEBUG
18295 if (CombinerAAOnlyFunc.getNumOccurrences() &&
18296 CombinerAAOnlyFunc != DAG.getMachineFunction().getName())
18297 UseAA = false;
18298#endif
18299
18300 if (UseAA && AA &&
18301 Op0->getMemOperand()->getValue() && Op1->getMemOperand()->getValue()) {
18302 // Use alias analysis information.
18303 int64_t MinOffset = std::min(SrcValOffset0, SrcValOffset1);
18304 int64_t Overlap0 = NumBytes0 + SrcValOffset0 - MinOffset;
18305 int64_t Overlap1 = NumBytes1 + SrcValOffset1 - MinOffset;
18306 AliasResult AAResult =
18307 AA->alias(MemoryLocation(Op0->getMemOperand()->getValue(), Overlap0,
18308 UseTBAA ? Op0->getAAInfo() : AAMDNodes()),
18309 MemoryLocation(Op1->getMemOperand()->getValue(), Overlap1,
18310 UseTBAA ? Op1->getAAInfo() : AAMDNodes()) );
18311 if (AAResult == NoAlias)
18312 return false;
18313 }
18314
18315 // Otherwise we have to assume they alias.
18316 return true;
18317}
18318
18319/// Walk up chain skipping non-aliasing memory nodes,
18320/// looking for aliasing nodes and adding them to the Aliases vector.
18321void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
18322 SmallVectorImpl<SDValue> &Aliases) {
18323 SmallVector<SDValue, 8> Chains; // List of chains to visit.
18324 SmallPtrSet<SDNode *, 16> Visited; // Visited node set.
18325
18326 // Get alias information for node.
18327 bool IsLoad = isa<LoadSDNode>(N) && !cast<LSBaseSDNode>(N)->isVolatile();
18328
18329 // Starting off.
18330 Chains.push_back(OriginalChain);
18331 unsigned Depth = 0;
18332
18333 // Look at each chain and determine if it is an alias. If so, add it to the
18334 // aliases list. If not, then continue up the chain looking for the next
18335 // candidate.
18336 while (!Chains.empty()) {
18337 SDValue Chain = Chains.pop_back_val();
18338
18339 // For TokenFactor nodes, look at each operand and only continue up the
18340 // chain until we reach the depth limit.
18341 //
18342 // FIXME: The depth check could be made to return the last non-aliasing
18343 // chain we found before we hit a tokenfactor rather than the original
18344 // chain.
18345 if (Depth > TLI.getGatherAllAliasesMaxDepth()) {
18346 Aliases.clear();
18347 Aliases.push_back(OriginalChain);
18348 return;
18349 }
18350
18351 // Don't bother if we've been before.
18352 if (!Visited.insert(Chain.getNode()).second)
18353 continue;
18354
18355 switch (Chain.getOpcode()) {
18356 case ISD::EntryToken:
18357 // Entry token is ideal chain operand, but handled in FindBetterChain.
18358 break;
18359
18360 case ISD::LOAD:
18361 case ISD::STORE: {
18362 // Get alias information for Chain.
18363 bool IsOpLoad = isa<LoadSDNode>(Chain.getNode()) &&
18364 !cast<LSBaseSDNode>(Chain.getNode())->isVolatile();
18365
18366 // If chain is alias then stop here.
18367 if (!(IsLoad && IsOpLoad) &&
18368 isAlias(cast<LSBaseSDNode>(N), cast<LSBaseSDNode>(Chain.getNode()))) {
18369 Aliases.push_back(Chain);
18370 } else {
18371 // Look further up the chain.
18372 Chains.push_back(Chain.getOperand(0));
18373 ++Depth;
18374 }
18375 break;
18376 }
18377
18378 case ISD::TokenFactor:
18379 // We have to check each of the operands of the token factor for "small"
18380 // token factors, so we queue them up. Adding the operands to the queue
18381 // (stack) in reverse order maintains the original order and increases the
18382 // likelihood that getNode will find a matching token factor (CSE.)
18383 if (Chain.getNumOperands() > 16) {
18384 Aliases.push_back(Chain);
18385 break;
18386 }
18387 for (unsigned n = Chain.getNumOperands(); n;)
18388 Chains.push_back(Chain.getOperand(--n));
18389 ++Depth;
18390 break;
18391
18392 case ISD::CopyFromReg:
18393 // Forward past CopyFromReg.
18394 Chains.push_back(Chain.getOperand(0));
18395 ++Depth;
18396 break;
18397
18398 default:
18399 // For all other instructions we will just have to take what we can get.
18400 Aliases.push_back(Chain);
18401 break;
18402 }
18403 }
18404}
18405
18406/// Walk up chain skipping non-aliasing memory nodes, looking for a better chain
18407/// (aliasing node.)
18408SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) {
18409 if (OptLevel == CodeGenOpt::None)
18410 return OldChain;
18411
18412 // Ops for replacing token factor.
18413 SmallVector<SDValue, 8> Aliases;
18414
18415 // Accumulate all the aliases to this node.
18416 GatherAllAliases(N, OldChain, Aliases);
18417
18418 // If no operands then chain to entry token.
18419 if (Aliases.size() == 0)
18420 return DAG.getEntryNode();
18421
18422 // If a single operand then chain to it. We don't need to revisit it.
18423 if (Aliases.size() == 1)
18424 return Aliases[0];
18425
18426 // Construct a custom tailored token factor.
18427 return DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Aliases);
18428}
18429
18430// This function tries to collect a bunch of potentially interesting
18431// nodes to improve the chains of, all at once. This might seem
18432// redundant, as this function gets called when visiting every store
18433// node, so why not let the work be done on each store as it's visited?
18434//
18435// I believe this is mainly important because MergeConsecutiveStores
18436// is unable to deal with merging stores of different sizes, so unless
18437// we improve the chains of all the potential candidates up-front
18438// before running MergeConsecutiveStores, it might only see some of
18439// the nodes that will eventually be candidates, and then not be able
18440// to go from a partially-merged state to the desired final
18441// fully-merged state.
18442bool DAGCombiner::findBetterNeighborChains(StoreSDNode *St) {
18443 if (OptLevel == CodeGenOpt::None)
18444 return false;
18445
18446 // This holds the base pointer, index, and the offset in bytes from the base
18447 // pointer.
18448 BaseIndexOffset BasePtr = BaseIndexOffset::match(St, DAG);
18449
18450 // We must have a base and an offset.
18451 if (!BasePtr.getBase().getNode())
18452 return false;
18453
18454 // Do not handle stores to undef base pointers.
18455 if (BasePtr.getBase().isUndef())
18456 return false;
18457
18458 SmallVector<StoreSDNode *, 8> ChainedStores;
18459 ChainedStores.push_back(St);
18460
18461 // Walk up the chain and look for nodes with offsets from the same
18462 // base pointer. Stop when reaching an instruction with a different kind
18463 // or instruction which has a different base pointer.
18464 StoreSDNode *Index = St;
18465 while (Index) {
18466 // If the chain has more than one use, then we can't reorder the mem ops.
18467 if (Index != St && !SDValue(Index, 0)->hasOneUse())
18468 break;
18469
18470 if (Index->isVolatile() || Index->isIndexed())
18471 break;
18472
18473 // Find the base pointer and offset for this memory node.
18474 BaseIndexOffset Ptr = BaseIndexOffset::match(Index, DAG);
18475
18476 // Check that the base pointer is the same as the original one.
18477 if (!BasePtr.equalBaseIndex(Ptr, DAG))
18478 break;
18479
18480 // Walk up the chain to find the next store node, ignoring any
18481 // intermediate loads. Any other kind of node will halt the loop.
18482 SDNode *NextInChain = Index->getChain().getNode();
18483 while (true) {
18484 if (StoreSDNode *STn = dyn_cast<StoreSDNode>(NextInChain)) {
18485 // We found a store node. Use it for the next iteration.
18486 if (STn->isVolatile() || STn->isIndexed()) {
18487 Index = nullptr;
18488 break;
18489 }
18490 ChainedStores.push_back(STn);
18491 Index = STn;
18492 break;
18493 } else if (LoadSDNode *Ldn = dyn_cast<LoadSDNode>(NextInChain)) {
18494 NextInChain = Ldn->getChain().getNode();
18495 continue;
18496 } else {
18497 Index = nullptr;
18498 break;
18499 }
18500 }// end while
18501 }
18502
18503 // At this point, ChainedStores lists all of the Store nodes
18504 // reachable by iterating up through chain nodes matching the above
18505 // conditions. For each such store identified, try to find an
18506 // earlier chain to attach the store to which won't violate the
18507 // required ordering.
18508 bool MadeChangeToSt = false;
18509 SmallVector<std::pair<StoreSDNode *, SDValue>, 8> BetterChains;
18510
18511 for (StoreSDNode *ChainedStore : ChainedStores) {
18512 SDValue Chain = ChainedStore->getChain();
18513 SDValue BetterChain = FindBetterChain(ChainedStore, Chain);
18514
18515 if (Chain != BetterChain) {
18516 if (ChainedStore == St)
18517 MadeChangeToSt = true;
18518 BetterChains.push_back(std::make_pair(ChainedStore, BetterChain));
18519 }
18520 }
18521
18522 // Do all replacements after finding the replacements to make to avoid making
18523 // the chains more complicated by introducing new TokenFactors.
18524 for (auto Replacement : BetterChains)
18525 replaceStoreChain(Replacement.first, Replacement.second);
18526
18527 return MadeChangeToSt;
18528}
18529
18530/// This is the entry point for the file.
18531void SelectionDAG::Combine(CombineLevel Level, AliasAnalysis *AA,
18532 CodeGenOpt::Level OptLevel) {
18533 /// This is the main entry point to this class.
18534 DAGCombiner(*this, AA, OptLevel).Run(Level);
18535}