LLVM  14.0.0git
X86ISelDAGToDAG.cpp
Go to the documentation of this file.
1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines a DAG pattern matching instruction selector for X86,
10 // converting from a legalized dag to a X86 dag.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "X86.h"
15 #include "X86MachineFunctionInfo.h"
16 #include "X86RegisterInfo.h"
17 #include "X86Subtarget.h"
18 #include "X86TargetMachine.h"
19 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Config/llvm-config.h"
23 #include "llvm/IR/ConstantRange.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/IR/Instructions.h"
26 #include "llvm/IR/Intrinsics.h"
27 #include "llvm/IR/IntrinsicsX86.h"
28 #include "llvm/IR/Type.h"
29 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/KnownBits.h"
33 #include <cstdint>
34 
35 using namespace llvm;
36 
37 #define DEBUG_TYPE "x86-isel"
38 
39 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
40 
41 static cl::opt<bool> AndImmShrink("x86-and-imm-shrink", cl::init(true),
42  cl::desc("Enable setting constant bits to reduce size of mask immediates"),
43  cl::Hidden);
44 
46  "x86-promote-anyext-load", cl::init(true),
47  cl::desc("Enable promoting aligned anyext load to wider load"), cl::Hidden);
48 
50 
51 //===----------------------------------------------------------------------===//
52 // Pattern Matcher Implementation
53 //===----------------------------------------------------------------------===//
54 
55 namespace {
56  /// This corresponds to X86AddressMode, but uses SDValue's instead of register
57  /// numbers for the leaves of the matched tree.
58  struct X86ISelAddressMode {
59  enum {
60  RegBase,
61  FrameIndexBase
62  } BaseType;
63 
64  // This is really a union, discriminated by BaseType!
65  SDValue Base_Reg;
66  int Base_FrameIndex;
67 
68  unsigned Scale;
69  SDValue IndexReg;
70  int32_t Disp;
71  SDValue Segment;
72  const GlobalValue *GV;
73  const Constant *CP;
74  const BlockAddress *BlockAddr;
75  const char *ES;
76  MCSymbol *MCSym;
77  int JT;
78  Align Alignment; // CP alignment.
79  unsigned char SymbolFlags; // X86II::MO_*
80  bool NegateIndex = false;
81 
82  X86ISelAddressMode()
83  : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
84  Segment(), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr),
85  MCSym(nullptr), JT(-1), SymbolFlags(X86II::MO_NO_FLAG) {}
86 
87  bool hasSymbolicDisplacement() const {
88  return GV != nullptr || CP != nullptr || ES != nullptr ||
89  MCSym != nullptr || JT != -1 || BlockAddr != nullptr;
90  }
91 
92  bool hasBaseOrIndexReg() const {
93  return BaseType == FrameIndexBase ||
94  IndexReg.getNode() != nullptr || Base_Reg.getNode() != nullptr;
95  }
96 
97  /// Return true if this addressing mode is already RIP-relative.
98  bool isRIPRelative() const {
99  if (BaseType != RegBase) return false;
100  if (RegisterSDNode *RegNode =
101  dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
102  return RegNode->getReg() == X86::RIP;
103  return false;
104  }
105 
106  void setBaseReg(SDValue Reg) {
107  BaseType = RegBase;
108  Base_Reg = Reg;
109  }
110 
111 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
112  void dump(SelectionDAG *DAG = nullptr) {
113  dbgs() << "X86ISelAddressMode " << this << '\n';
114  dbgs() << "Base_Reg ";
115  if (Base_Reg.getNode())
116  Base_Reg.getNode()->dump(DAG);
117  else
118  dbgs() << "nul\n";
119  if (BaseType == FrameIndexBase)
120  dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n';
121  dbgs() << " Scale " << Scale << '\n'
122  << "IndexReg ";
123  if (NegateIndex)
124  dbgs() << "negate ";
125  if (IndexReg.getNode())
126  IndexReg.getNode()->dump(DAG);
127  else
128  dbgs() << "nul\n";
129  dbgs() << " Disp " << Disp << '\n'
130  << "GV ";
131  if (GV)
132  GV->dump();
133  else
134  dbgs() << "nul";
135  dbgs() << " CP ";
136  if (CP)
137  CP->dump();
138  else
139  dbgs() << "nul";
140  dbgs() << '\n'
141  << "ES ";
142  if (ES)
143  dbgs() << ES;
144  else
145  dbgs() << "nul";
146  dbgs() << " MCSym ";
147  if (MCSym)
148  dbgs() << MCSym;
149  else
150  dbgs() << "nul";
151  dbgs() << " JT" << JT << " Align" << Alignment.value() << '\n';
152  }
153 #endif
154  };
155 }
156 
157 namespace {
158  //===--------------------------------------------------------------------===//
159  /// ISel - X86-specific code to select X86 machine instructions for
160  /// SelectionDAG operations.
161  ///
162  class X86DAGToDAGISel final : public SelectionDAGISel {
163  /// Keep a pointer to the X86Subtarget around so that we can
164  /// make the right decision when generating code for different targets.
165  const X86Subtarget *Subtarget;
166 
167  /// If true, selector should try to optimize for minimum code size.
168  bool OptForMinSize;
169 
170  /// Disable direct TLS access through segment registers.
171  bool IndirectTlsSegRefs;
172 
173  public:
174  explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
175  : SelectionDAGISel(tm, OptLevel), Subtarget(nullptr),
176  OptForMinSize(false), IndirectTlsSegRefs(false) {}
177 
178  StringRef getPassName() const override {
179  return "X86 DAG->DAG Instruction Selection";
180  }
181 
182  bool runOnMachineFunction(MachineFunction &MF) override {
183  // Reset the subtarget each time through.
184  Subtarget = &MF.getSubtarget<X86Subtarget>();
185  IndirectTlsSegRefs = MF.getFunction().hasFnAttribute(
186  "indirect-tls-seg-refs");
187 
188  // OptFor[Min]Size are used in pattern predicates that isel is matching.
189  OptForMinSize = MF.getFunction().hasMinSize();
190  assert((!OptForMinSize || MF.getFunction().hasOptSize()) &&
191  "OptForMinSize implies OptForSize");
192 
194  return true;
195  }
196 
197  void emitFunctionEntryCode() override;
198 
199  bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override;
200 
201  void PreprocessISelDAG() override;
202  void PostprocessISelDAG() override;
203 
204 // Include the pieces autogenerated from the target description.
205 #include "X86GenDAGISel.inc"
206 
207  private:
208  void Select(SDNode *N) override;
209 
210  bool foldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
211  bool matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM,
212  bool AllowSegmentRegForX32 = false);
213  bool matchWrapper(SDValue N, X86ISelAddressMode &AM);
214  bool matchAddress(SDValue N, X86ISelAddressMode &AM);
215  bool matchVectorAddress(SDValue N, X86ISelAddressMode &AM);
216  bool matchAdd(SDValue &N, X86ISelAddressMode &AM, unsigned Depth);
217  bool matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
218  unsigned Depth);
219  bool matchVectorAddressRecursively(SDValue N, X86ISelAddressMode &AM,
220  unsigned Depth);
221  bool matchAddressBase(SDValue N, X86ISelAddressMode &AM);
222  bool selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
223  SDValue &Scale, SDValue &Index, SDValue &Disp,
224  SDValue &Segment);
225  bool selectVectorAddr(MemSDNode *Parent, SDValue BasePtr, SDValue IndexOp,
226  SDValue ScaleOp, SDValue &Base, SDValue &Scale,
227  SDValue &Index, SDValue &Disp, SDValue &Segment);
228  bool selectMOV64Imm32(SDValue N, SDValue &Imm);
229  bool selectLEAAddr(SDValue N, SDValue &Base,
230  SDValue &Scale, SDValue &Index, SDValue &Disp,
231  SDValue &Segment);
232  bool selectLEA64_32Addr(SDValue N, SDValue &Base,
233  SDValue &Scale, SDValue &Index, SDValue &Disp,
234  SDValue &Segment);
235  bool selectTLSADDRAddr(SDValue N, SDValue &Base,
236  SDValue &Scale, SDValue &Index, SDValue &Disp,
237  SDValue &Segment);
238  bool selectRelocImm(SDValue N, SDValue &Op);
239 
240  bool tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
241  SDValue &Base, SDValue &Scale,
242  SDValue &Index, SDValue &Disp,
243  SDValue &Segment);
244 
245  // Convenience method where P is also root.
246  bool tryFoldLoad(SDNode *P, SDValue N,
247  SDValue &Base, SDValue &Scale,
248  SDValue &Index, SDValue &Disp,
249  SDValue &Segment) {
250  return tryFoldLoad(P, P, N, Base, Scale, Index, Disp, Segment);
251  }
252 
253  bool tryFoldBroadcast(SDNode *Root, SDNode *P, SDValue N,
254  SDValue &Base, SDValue &Scale,
255  SDValue &Index, SDValue &Disp,
256  SDValue &Segment);
257 
258  bool isProfitableToFormMaskedOp(SDNode *N) const;
259 
260  /// Implement addressing mode selection for inline asm expressions.
261  bool SelectInlineAsmMemoryOperand(const SDValue &Op,
262  unsigned ConstraintID,
263  std::vector<SDValue> &OutOps) override;
264 
265  void emitSpecialCodeForMain();
266 
267  inline void getAddressOperands(X86ISelAddressMode &AM, const SDLoc &DL,
268  MVT VT, SDValue &Base, SDValue &Scale,
269  SDValue &Index, SDValue &Disp,
270  SDValue &Segment) {
271  if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
272  Base = CurDAG->getTargetFrameIndex(
273  AM.Base_FrameIndex, TLI->getPointerTy(CurDAG->getDataLayout()));
274  else if (AM.Base_Reg.getNode())
275  Base = AM.Base_Reg;
276  else
277  Base = CurDAG->getRegister(0, VT);
278 
279  Scale = getI8Imm(AM.Scale, DL);
280 
281  // Negate the index if needed.
282  if (AM.NegateIndex) {
283  unsigned NegOpc = VT == MVT::i64 ? X86::NEG64r : X86::NEG32r;
284  SDValue Neg = SDValue(CurDAG->getMachineNode(NegOpc, DL, VT, MVT::i32,
285  AM.IndexReg), 0);
286  AM.IndexReg = Neg;
287  }
288 
289  if (AM.IndexReg.getNode())
290  Index = AM.IndexReg;
291  else
292  Index = CurDAG->getRegister(0, VT);
293 
294  // These are 32-bit even in 64-bit mode since RIP-relative offset
295  // is 32-bit.
296  if (AM.GV)
297  Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(),
298  MVT::i32, AM.Disp,
299  AM.SymbolFlags);
300  else if (AM.CP)
301  Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, AM.Alignment,
302  AM.Disp, AM.SymbolFlags);
303  else if (AM.ES) {
304  assert(!AM.Disp && "Non-zero displacement is ignored with ES.");
305  Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
306  } else if (AM.MCSym) {
307  assert(!AM.Disp && "Non-zero displacement is ignored with MCSym.");
308  assert(AM.SymbolFlags == 0 && "oo");
309  Disp = CurDAG->getMCSymbol(AM.MCSym, MVT::i32);
310  } else if (AM.JT != -1) {
311  assert(!AM.Disp && "Non-zero displacement is ignored with JT.");
312  Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
313  } else if (AM.BlockAddr)
314  Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp,
315  AM.SymbolFlags);
316  else
317  Disp = CurDAG->getTargetConstant(AM.Disp, DL, MVT::i32);
318 
319  if (AM.Segment.getNode())
320  Segment = AM.Segment;
321  else
322  Segment = CurDAG->getRegister(0, MVT::i16);
323  }
324 
325  // Utility function to determine whether we should avoid selecting
326  // immediate forms of instructions for better code size or not.
327  // At a high level, we'd like to avoid such instructions when
328  // we have similar constants used within the same basic block
329  // that can be kept in a register.
330  //
331  bool shouldAvoidImmediateInstFormsForSize(SDNode *N) const {
332  uint32_t UseCount = 0;
333 
334  // Do not want to hoist if we're not optimizing for size.
335  // TODO: We'd like to remove this restriction.
336  // See the comment in X86InstrInfo.td for more info.
337  if (!CurDAG->shouldOptForSize())
338  return false;
339 
340  // Walk all the users of the immediate.
341  for (const SDNode *User : N->uses()) {
342  if (UseCount >= 2)
343  break;
344 
345  // This user is already selected. Count it as a legitimate use and
346  // move on.
347  if (User->isMachineOpcode()) {
348  UseCount++;
349  continue;
350  }
351 
352  // We want to count stores of immediates as real uses.
353  if (User->getOpcode() == ISD::STORE &&
354  User->getOperand(1).getNode() == N) {
355  UseCount++;
356  continue;
357  }
358 
359  // We don't currently match users that have > 2 operands (except
360  // for stores, which are handled above)
361  // Those instruction won't match in ISEL, for now, and would
362  // be counted incorrectly.
363  // This may change in the future as we add additional instruction
364  // types.
365  if (User->getNumOperands() != 2)
366  continue;
367 
368  // If this is a sign-extended 8-bit integer immediate used in an ALU
369  // instruction, there is probably an opcode encoding to save space.
370  auto *C = dyn_cast<ConstantSDNode>(N);
371  if (C && isInt<8>(C->getSExtValue()))
372  continue;
373 
374  // Immediates that are used for offsets as part of stack
375  // manipulation should be left alone. These are typically
376  // used to indicate SP offsets for argument passing and
377  // will get pulled into stores/pushes (implicitly).
378  if (User->getOpcode() == X86ISD::ADD ||
379  User->getOpcode() == ISD::ADD ||
380  User->getOpcode() == X86ISD::SUB ||
381  User->getOpcode() == ISD::SUB) {
382 
383  // Find the other operand of the add/sub.
384  SDValue OtherOp = User->getOperand(0);
385  if (OtherOp.getNode() == N)
386  OtherOp = User->getOperand(1);
387 
388  // Don't count if the other operand is SP.
389  RegisterSDNode *RegNode;
390  if (OtherOp->getOpcode() == ISD::CopyFromReg &&
391  (RegNode = dyn_cast_or_null<RegisterSDNode>(
392  OtherOp->getOperand(1).getNode())))
393  if ((RegNode->getReg() == X86::ESP) ||
394  (RegNode->getReg() == X86::RSP))
395  continue;
396  }
397 
398  // ... otherwise, count this and move on.
399  UseCount++;
400  }
401 
402  // If we have more than 1 use, then recommend for hoisting.
403  return (UseCount > 1);
404  }
405 
406  /// Return a target constant with the specified value of type i8.
407  inline SDValue getI8Imm(unsigned Imm, const SDLoc &DL) {
408  return CurDAG->getTargetConstant(Imm, DL, MVT::i8);
409  }
410 
411  /// Return a target constant with the specified value, of type i32.
412  inline SDValue getI32Imm(unsigned Imm, const SDLoc &DL) {
413  return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
414  }
415 
416  /// Return a target constant with the specified value, of type i64.
417  inline SDValue getI64Imm(uint64_t Imm, const SDLoc &DL) {
418  return CurDAG->getTargetConstant(Imm, DL, MVT::i64);
419  }
420 
421  SDValue getExtractVEXTRACTImmediate(SDNode *N, unsigned VecWidth,
422  const SDLoc &DL) {
423  assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width");
424  uint64_t Index = N->getConstantOperandVal(1);
425  MVT VecVT = N->getOperand(0).getSimpleValueType();
426  return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
427  }
428 
429  SDValue getInsertVINSERTImmediate(SDNode *N, unsigned VecWidth,
430  const SDLoc &DL) {
431  assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width");
432  uint64_t Index = N->getConstantOperandVal(2);
433  MVT VecVT = N->getSimpleValueType(0);
434  return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
435  }
436 
437  SDValue getPermuteVINSERTCommutedImmediate(SDNode *N, unsigned VecWidth,
438  const SDLoc &DL) {
439  assert(VecWidth == 128 && "Unexpected vector width");
440  uint64_t Index = N->getConstantOperandVal(2);
441  MVT VecVT = N->getSimpleValueType(0);
442  uint64_t InsertIdx = (Index * VecVT.getScalarSizeInBits()) / VecWidth;
443  assert((InsertIdx == 0 || InsertIdx == 1) && "Bad insertf128 index");
444  // vinsert(0,sub,vec) -> [sub0][vec1] -> vperm2x128(0x30,vec,sub)
445  // vinsert(1,sub,vec) -> [vec0][sub0] -> vperm2x128(0x02,vec,sub)
446  return getI8Imm(InsertIdx ? 0x02 : 0x30, DL);
447  }
448 
449  // Helper to detect unneeded and instructions on shift amounts. Called
450  // from PatFrags in tablegen.
451  bool isUnneededShiftMask(SDNode *N, unsigned Width) const {
452  assert(N->getOpcode() == ISD::AND && "Unexpected opcode");
453  const APInt &Val = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue();
454 
455  if (Val.countTrailingOnes() >= Width)
456  return true;
457 
458  APInt Mask = Val | CurDAG->computeKnownBits(N->getOperand(0)).Zero;
459  return Mask.countTrailingOnes() >= Width;
460  }
461 
462  /// Return an SDNode that returns the value of the global base register.
463  /// Output instructions required to initialize the global base register,
464  /// if necessary.
465  SDNode *getGlobalBaseReg();
466 
467  /// Return a reference to the TargetMachine, casted to the target-specific
468  /// type.
469  const X86TargetMachine &getTargetMachine() const {
470  return static_cast<const X86TargetMachine &>(TM);
471  }
472 
473  /// Return a reference to the TargetInstrInfo, casted to the target-specific
474  /// type.
475  const X86InstrInfo *getInstrInfo() const {
476  return Subtarget->getInstrInfo();
477  }
478 
479  /// Address-mode matching performs shift-of-and to and-of-shift
480  /// reassociation in order to expose more scaled addressing
481  /// opportunities.
482  bool ComplexPatternFuncMutatesDAG() const override {
483  return true;
484  }
485 
486  bool isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const;
487 
488  // Indicates we should prefer to use a non-temporal load for this load.
489  bool useNonTemporalLoad(LoadSDNode *N) const {
490  if (!N->isNonTemporal())
491  return false;
492 
493  unsigned StoreSize = N->getMemoryVT().getStoreSize();
494 
495  if (N->getAlignment() < StoreSize)
496  return false;
497 
498  switch (StoreSize) {
499  default: llvm_unreachable("Unsupported store size");
500  case 4:
501  case 8:
502  return false;
503  case 16:
504  return Subtarget->hasSSE41();
505  case 32:
506  return Subtarget->hasAVX2();
507  case 64:
508  return Subtarget->hasAVX512();
509  }
510  }
511 
512  bool foldLoadStoreIntoMemOperand(SDNode *Node);
513  MachineSDNode *matchBEXTRFromAndImm(SDNode *Node);
514  bool matchBitExtract(SDNode *Node);
515  bool shrinkAndImmediate(SDNode *N);
516  bool isMaskZeroExtended(SDNode *N) const;
517  bool tryShiftAmountMod(SDNode *N);
518  bool tryShrinkShlLogicImm(SDNode *N);
519  bool tryVPTERNLOG(SDNode *N);
520  bool matchVPTERNLOG(SDNode *Root, SDNode *ParentA, SDNode *ParentB,
521  SDNode *ParentC, SDValue A, SDValue B, SDValue C,
522  uint8_t Imm);
523  bool tryVPTESTM(SDNode *Root, SDValue Setcc, SDValue Mask);
524  bool tryMatchBitSelect(SDNode *N);
525 
526  MachineSDNode *emitPCMPISTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
527  const SDLoc &dl, MVT VT, SDNode *Node);
528  MachineSDNode *emitPCMPESTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
529  const SDLoc &dl, MVT VT, SDNode *Node,
530  SDValue &InFlag);
531 
532  bool tryOptimizeRem8Extend(SDNode *N);
533 
534  bool onlyUsesZeroFlag(SDValue Flags) const;
535  bool hasNoSignFlagUses(SDValue Flags) const;
536  bool hasNoCarryFlagUses(SDValue Flags) const;
537  };
538 }
539 
540 
541 // Returns true if this masked compare can be implemented legally with this
542 // type.
543 static bool isLegalMaskCompare(SDNode *N, const X86Subtarget *Subtarget) {
544  unsigned Opcode = N->getOpcode();
545  if (Opcode == X86ISD::CMPM || Opcode == X86ISD::CMPMM ||
546  Opcode == X86ISD::STRICT_CMPM || Opcode == ISD::SETCC ||
547  Opcode == X86ISD::CMPMM_SAE || Opcode == X86ISD::VFPCLASS) {
548  // We can get 256-bit 8 element types here without VLX being enabled. When
549  // this happens we will use 512-bit operations and the mask will not be
550  // zero extended.
551  EVT OpVT = N->getOperand(0).getValueType();
552  // The first operand of X86ISD::STRICT_CMPM is chain, so we need to get the
553  // second operand.
554  if (Opcode == X86ISD::STRICT_CMPM)
555  OpVT = N->getOperand(1).getValueType();
556  if (OpVT.is256BitVector() || OpVT.is128BitVector())
557  return Subtarget->hasVLX();
558 
559  return true;
560  }
561  // Scalar opcodes use 128 bit registers, but aren't subject to the VLX check.
562  if (Opcode == X86ISD::VFPCLASSS || Opcode == X86ISD::FSETCCM ||
563  Opcode == X86ISD::FSETCCM_SAE)
564  return true;
565 
566  return false;
567 }
568 
569 // Returns true if we can assume the writer of the mask has zero extended it
570 // for us.
571 bool X86DAGToDAGISel::isMaskZeroExtended(SDNode *N) const {
572  // If this is an AND, check if we have a compare on either side. As long as
573  // one side guarantees the mask is zero extended, the AND will preserve those
574  // zeros.
575  if (N->getOpcode() == ISD::AND)
576  return isLegalMaskCompare(N->getOperand(0).getNode(), Subtarget) ||
577  isLegalMaskCompare(N->getOperand(1).getNode(), Subtarget);
578 
579  return isLegalMaskCompare(N, Subtarget);
580 }
581 
582 bool
583 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
584  if (OptLevel == CodeGenOpt::None) return false;
585 
586  if (!N.hasOneUse())
587  return false;
588 
589  if (N.getOpcode() != ISD::LOAD)
590  return true;
591 
592  // Don't fold non-temporal loads if we have an instruction for them.
593  if (useNonTemporalLoad(cast<LoadSDNode>(N)))
594  return false;
595 
596  // If N is a load, do additional profitability checks.
597  if (U == Root) {
598  switch (U->getOpcode()) {
599  default: break;
600  case X86ISD::ADD:
601  case X86ISD::ADC:
602  case X86ISD::SUB:
603  case X86ISD::SBB:
604  case X86ISD::AND:
605  case X86ISD::XOR:
606  case X86ISD::OR:
607  case ISD::ADD:
608  case ISD::ADDCARRY:
609  case ISD::AND:
610  case ISD::OR:
611  case ISD::XOR: {
612  SDValue Op1 = U->getOperand(1);
613 
614  // If the other operand is a 8-bit immediate we should fold the immediate
615  // instead. This reduces code size.
616  // e.g.
617  // movl 4(%esp), %eax
618  // addl $4, %eax
619  // vs.
620  // movl $4, %eax
621  // addl 4(%esp), %eax
622  // The former is 2 bytes shorter. In case where the increment is 1, then
623  // the saving can be 4 bytes (by using incl %eax).
624  if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) {
625  if (Imm->getAPIntValue().isSignedIntN(8))
626  return false;
627 
628  // If this is a 64-bit AND with an immediate that fits in 32-bits,
629  // prefer using the smaller and over folding the load. This is needed to
630  // make sure immediates created by shrinkAndImmediate are always folded.
631  // Ideally we would narrow the load during DAG combine and get the
632  // best of both worlds.
633  if (U->getOpcode() == ISD::AND &&
634  Imm->getAPIntValue().getBitWidth() == 64 &&
635  Imm->getAPIntValue().isIntN(32))
636  return false;
637 
638  // If this really a zext_inreg that can be represented with a movzx
639  // instruction, prefer that.
640  // TODO: We could shrink the load and fold if it is non-volatile.
641  if (U->getOpcode() == ISD::AND &&
642  (Imm->getAPIntValue() == UINT8_MAX ||
643  Imm->getAPIntValue() == UINT16_MAX ||
644  Imm->getAPIntValue() == UINT32_MAX))
645  return false;
646 
647  // ADD/SUB with can negate the immediate and use the opposite operation
648  // to fit 128 into a sign extended 8 bit immediate.
649  if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB) &&
650  (-Imm->getAPIntValue()).isSignedIntN(8))
651  return false;
652 
653  if ((U->getOpcode() == X86ISD::ADD || U->getOpcode() == X86ISD::SUB) &&
654  (-Imm->getAPIntValue()).isSignedIntN(8) &&
655  hasNoCarryFlagUses(SDValue(U, 1)))
656  return false;
657  }
658 
659  // If the other operand is a TLS address, we should fold it instead.
660  // This produces
661  // movl %gs:0, %eax
662  // leal i@NTPOFF(%eax), %eax
663  // instead of
664  // movl $i@NTPOFF, %eax
665  // addl %gs:0, %eax
666  // if the block also has an access to a second TLS address this will save
667  // a load.
668  // FIXME: This is probably also true for non-TLS addresses.
669  if (Op1.getOpcode() == X86ISD::Wrapper) {
670  SDValue Val = Op1.getOperand(0);
672  return false;
673  }
674 
675  // Don't fold load if this matches the BTS/BTR/BTC patterns.
676  // BTS: (or X, (shl 1, n))
677  // BTR: (and X, (rotl -2, n))
678  // BTC: (xor X, (shl 1, n))
679  if (U->getOpcode() == ISD::OR || U->getOpcode() == ISD::XOR) {
680  if (U->getOperand(0).getOpcode() == ISD::SHL &&
682  return false;
683 
684  if (U->getOperand(1).getOpcode() == ISD::SHL &&
686  return false;
687  }
688  if (U->getOpcode() == ISD::AND) {
689  SDValue U0 = U->getOperand(0);
690  SDValue U1 = U->getOperand(1);
691  if (U0.getOpcode() == ISD::ROTL) {
692  auto *C = dyn_cast<ConstantSDNode>(U0.getOperand(0));
693  if (C && C->getSExtValue() == -2)
694  return false;
695  }
696 
697  if (U1.getOpcode() == ISD::ROTL) {
698  auto *C = dyn_cast<ConstantSDNode>(U1.getOperand(0));
699  if (C && C->getSExtValue() == -2)
700  return false;
701  }
702  }
703 
704  break;
705  }
706  case ISD::SHL:
707  case ISD::SRA:
708  case ISD::SRL:
709  // Don't fold a load into a shift by immediate. The BMI2 instructions
710  // support folding a load, but not an immediate. The legacy instructions
711  // support folding an immediate, but can't fold a load. Folding an
712  // immediate is preferable to folding a load.
713  if (isa<ConstantSDNode>(U->getOperand(1)))
714  return false;
715 
716  break;
717  }
718  }
719 
720  // Prevent folding a load if this can implemented with an insert_subreg or
721  // a move that implicitly zeroes.
722  if (Root->getOpcode() == ISD::INSERT_SUBVECTOR &&
723  isNullConstant(Root->getOperand(2)) &&
724  (Root->getOperand(0).isUndef() ||
726  return false;
727 
728  return true;
729 }
730 
731 // Indicates it is profitable to form an AVX512 masked operation. Returning
732 // false will favor a masked register-register masked move or vblendm and the
733 // operation will be selected separately.
734 bool X86DAGToDAGISel::isProfitableToFormMaskedOp(SDNode *N) const {
735  assert(
736  (N->getOpcode() == ISD::VSELECT || N->getOpcode() == X86ISD::SELECTS) &&
737  "Unexpected opcode!");
738 
739  // If the operation has additional users, the operation will be duplicated.
740  // Check the use count to prevent that.
741  // FIXME: Are there cheap opcodes we might want to duplicate?
742  return N->getOperand(1).hasOneUse();
743 }
744 
745 /// Replace the original chain operand of the call with
746 /// load's chain operand and move load below the call's chain operand.
748  SDValue Call, SDValue OrigChain) {
750  SDValue Chain = OrigChain.getOperand(0);
751  if (Chain.getNode() == Load.getNode())
752  Ops.push_back(Load.getOperand(0));
753  else {
754  assert(Chain.getOpcode() == ISD::TokenFactor &&
755  "Unexpected chain operand");
756  for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
757  if (Chain.getOperand(i).getNode() == Load.getNode())
758  Ops.push_back(Load.getOperand(0));
759  else
760  Ops.push_back(Chain.getOperand(i));
761  SDValue NewChain =
762  CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, Ops);
763  Ops.clear();
764  Ops.push_back(NewChain);
765  }
766  Ops.append(OrigChain->op_begin() + 1, OrigChain->op_end());
767  CurDAG->UpdateNodeOperands(OrigChain.getNode(), Ops);
768  CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
769  Load.getOperand(1), Load.getOperand(2));
770 
771  Ops.clear();
772  Ops.push_back(SDValue(Load.getNode(), 1));
773  Ops.append(Call->op_begin() + 1, Call->op_end());
774  CurDAG->UpdateNodeOperands(Call.getNode(), Ops);
775 }
776 
777 /// Return true if call address is a load and it can be
778 /// moved below CALLSEQ_START and the chains leading up to the call.
779 /// Return the CALLSEQ_START by reference as a second output.
780 /// In the case of a tail call, there isn't a callseq node between the call
781 /// chain and the load.
782 static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
783  // The transformation is somewhat dangerous if the call's chain was glued to
784  // the call. After MoveBelowOrigChain the load is moved between the call and
785  // the chain, this can create a cycle if the load is not folded. So it is
786  // *really* important that we are sure the load will be folded.
787  if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
788  return false;
789  LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
790  if (!LD ||
791  !LD->isSimple() ||
792  LD->getAddressingMode() != ISD::UNINDEXED ||
793  LD->getExtensionType() != ISD::NON_EXTLOAD)
794  return false;
795 
796  // Now let's find the callseq_start.
797  while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
798  if (!Chain.hasOneUse())
799  return false;
800  Chain = Chain.getOperand(0);
801  }
802 
803  if (!Chain.getNumOperands())
804  return false;
805  // Since we are not checking for AA here, conservatively abort if the chain
806  // writes to memory. It's not safe to move the callee (a load) across a store.
807  if (isa<MemSDNode>(Chain.getNode()) &&
808  cast<MemSDNode>(Chain.getNode())->writeMem())
809  return false;
810  if (Chain.getOperand(0).getNode() == Callee.getNode())
811  return true;
812  if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
813  Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
814  Callee.getValue(1).hasOneUse())
815  return true;
816  return false;
817 }
818 
819 static bool isEndbrImm64(uint64_t Imm) {
820 // There may be some other prefix bytes between 0xF3 and 0x0F1EFA.
821 // i.g: 0xF3660F1EFA, 0xF3670F1EFA
822  if ((Imm & 0x00FFFFFF) != 0x0F1EFA)
823  return false;
824 
825  uint8_t OptionalPrefixBytes [] = {0x26, 0x2e, 0x36, 0x3e, 0x64,
826  0x65, 0x66, 0x67, 0xf0, 0xf2};
827  int i = 24; // 24bit 0x0F1EFA has matched
828  while (i < 64) {
829  uint8_t Byte = (Imm >> i) & 0xFF;
830  if (Byte == 0xF3)
831  return true;
832  if (!llvm::is_contained(OptionalPrefixBytes, Byte))
833  return false;
834  i += 8;
835  }
836 
837  return false;
838 }
839 
840 void X86DAGToDAGISel::PreprocessISelDAG() {
841  bool MadeChange = false;
842  for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
843  E = CurDAG->allnodes_end(); I != E; ) {
844  SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
845 
846  // This is for CET enhancement.
847  //
848  // ENDBR32 and ENDBR64 have specific opcodes:
849  // ENDBR32: F3 0F 1E FB
850  // ENDBR64: F3 0F 1E FA
851  // And we want that attackers won’t find unintended ENDBR32/64
852  // opcode matches in the binary
853  // Here’s an example:
854  // If the compiler had to generate asm for the following code:
855  // a = 0xF30F1EFA
856  // it could, for example, generate:
857  // mov 0xF30F1EFA, dword ptr[a]
858  // In such a case, the binary would include a gadget that starts
859  // with a fake ENDBR64 opcode. Therefore, we split such generation
860  // into multiple operations, let it not shows in the binary
861  if (N->getOpcode() == ISD::Constant) {
862  MVT VT = N->getSimpleValueType(0);
863  int64_t Imm = cast<ConstantSDNode>(N)->getSExtValue();
864  int32_t EndbrImm = Subtarget->is64Bit() ? 0xF30F1EFA : 0xF30F1EFB;
865  if (Imm == EndbrImm || isEndbrImm64(Imm)) {
866  // Check that the cf-protection-branch is enabled.
867  Metadata *CFProtectionBranch =
868  MF->getMMI().getModule()->getModuleFlag("cf-protection-branch");
869  if (CFProtectionBranch || IndirectBranchTracking) {
870  SDLoc dl(N);
871  SDValue Complement = CurDAG->getConstant(~Imm, dl, VT, false, true);
872  Complement = CurDAG->getNOT(dl, Complement, VT);
873  --I;
874  CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Complement);
875  ++I;
876  MadeChange = true;
877  continue;
878  }
879  }
880  }
881 
882  // If this is a target specific AND node with no flag usages, turn it back
883  // into ISD::AND to enable test instruction matching.
884  if (N->getOpcode() == X86ISD::AND && !N->hasAnyUseOfValue(1)) {
885  SDValue Res = CurDAG->getNode(ISD::AND, SDLoc(N), N->getValueType(0),
886  N->getOperand(0), N->getOperand(1));
887  --I;
888  CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
889  ++I;
890  MadeChange = true;
891  continue;
892  }
893 
894  // Convert vector increment or decrement to sub/add with an all-ones
895  // constant:
896  // add X, <1, 1...> --> sub X, <-1, -1...>
897  // sub X, <1, 1...> --> add X, <-1, -1...>
898  // The all-ones vector constant can be materialized using a pcmpeq
899  // instruction that is commonly recognized as an idiom (has no register
900  // dependency), so that's better/smaller than loading a splat 1 constant.
901  //
902  // But don't do this if it would inhibit a potentially profitable load
903  // folding opportunity for the other operand. That only occurs with the
904  // intersection of:
905  // (1) The other operand (op0) is load foldable.
906  // (2) The op is an add (otherwise, we are *creating* an add and can still
907  // load fold the other op).
908  // (3) The target has AVX (otherwise, we have a destructive add and can't
909  // load fold the other op without killing the constant op).
910  // (4) The constant 1 vector has multiple uses (so it is profitable to load
911  // into a register anyway).
912  auto mayPreventLoadFold = [&]() {
913  return X86::mayFoldLoad(N->getOperand(0), *Subtarget) &&
914  N->getOpcode() == ISD::ADD && Subtarget->hasAVX() &&
915  !N->getOperand(1).hasOneUse();
916  };
917  if ((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
918  N->getSimpleValueType(0).isVector() && !mayPreventLoadFold()) {
919  APInt SplatVal;
920  if (X86::isConstantSplat(N->getOperand(1), SplatVal) &&
921  SplatVal.isOne()) {
922  SDLoc DL(N);
923 
924  MVT VT = N->getSimpleValueType(0);
925  unsigned NumElts = VT.getSizeInBits() / 32;
926  SDValue AllOnes =
927  CurDAG->getAllOnesConstant(DL, MVT::getVectorVT(MVT::i32, NumElts));
928  AllOnes = CurDAG->getBitcast(VT, AllOnes);
929 
930  unsigned NewOpcode = N->getOpcode() == ISD::ADD ? ISD::SUB : ISD::ADD;
931  SDValue Res =
932  CurDAG->getNode(NewOpcode, DL, VT, N->getOperand(0), AllOnes);
933  --I;
934  CurDAG->ReplaceAllUsesWith(N, Res.getNode());
935  ++I;
936  MadeChange = true;
937  continue;
938  }
939  }
940 
941  switch (N->getOpcode()) {
942  case X86ISD::VBROADCAST: {
943  MVT VT = N->getSimpleValueType(0);
944  // Emulate v32i16/v64i8 broadcast without BWI.
945  if (!Subtarget->hasBWI() && (VT == MVT::v32i16 || VT == MVT::v64i8)) {
946  MVT NarrowVT = VT == MVT::v32i16 ? MVT::v16i16 : MVT::v32i8;
947  SDLoc dl(N);
948  SDValue NarrowBCast =
949  CurDAG->getNode(X86ISD::VBROADCAST, dl, NarrowVT, N->getOperand(0));
950  SDValue Res =
951  CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, CurDAG->getUNDEF(VT),
952  NarrowBCast, CurDAG->getIntPtrConstant(0, dl));
953  unsigned Index = VT == MVT::v32i16 ? 16 : 32;
954  Res = CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, NarrowBCast,
955  CurDAG->getIntPtrConstant(Index, dl));
956 
957  --I;
958  CurDAG->ReplaceAllUsesWith(N, Res.getNode());
959  ++I;
960  MadeChange = true;
961  continue;
962  }
963 
964  break;
965  }
967  MVT VT = N->getSimpleValueType(0);
968  // Emulate v32i16/v64i8 broadcast without BWI.
969  if (!Subtarget->hasBWI() && (VT == MVT::v32i16 || VT == MVT::v64i8)) {
970  MVT NarrowVT = VT == MVT::v32i16 ? MVT::v16i16 : MVT::v32i8;
971  auto *MemNode = cast<MemSDNode>(N);
972  SDLoc dl(N);
973  SDVTList VTs = CurDAG->getVTList(NarrowVT, MVT::Other);
974  SDValue Ops[] = {MemNode->getChain(), MemNode->getBasePtr()};
975  SDValue NarrowBCast = CurDAG->getMemIntrinsicNode(
976  X86ISD::VBROADCAST_LOAD, dl, VTs, Ops, MemNode->getMemoryVT(),
977  MemNode->getMemOperand());
978  SDValue Res =
979  CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, CurDAG->getUNDEF(VT),
980  NarrowBCast, CurDAG->getIntPtrConstant(0, dl));
981  unsigned Index = VT == MVT::v32i16 ? 16 : 32;
982  Res = CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, NarrowBCast,
983  CurDAG->getIntPtrConstant(Index, dl));
984 
985  --I;
986  SDValue To[] = {Res, NarrowBCast.getValue(1)};
987  CurDAG->ReplaceAllUsesWith(N, To);
988  ++I;
989  MadeChange = true;
990  continue;
991  }
992 
993  break;
994  }
995  case ISD::VSELECT: {
996  // Replace VSELECT with non-mask conditions with with BLENDV.
997  if (N->getOperand(0).getValueType().getVectorElementType() == MVT::i1)
998  break;
999 
1000  assert(Subtarget->hasSSE41() && "Expected SSE4.1 support!");
1001  SDValue Blendv =
1002  CurDAG->getNode(X86ISD::BLENDV, SDLoc(N), N->getValueType(0),
1003  N->getOperand(0), N->getOperand(1), N->getOperand(2));
1004  --I;
1005  CurDAG->ReplaceAllUsesWith(N, Blendv.getNode());
1006  ++I;
1007  MadeChange = true;
1008  continue;
1009  }
1010  case ISD::FP_ROUND:
1011  case ISD::STRICT_FP_ROUND:
1012  case ISD::FP_TO_SINT:
1013  case ISD::FP_TO_UINT:
1015  case ISD::STRICT_FP_TO_UINT: {
1016  // Replace vector fp_to_s/uint with their X86 specific equivalent so we
1017  // don't need 2 sets of patterns.
1018  if (!N->getSimpleValueType(0).isVector())
1019  break;
1020 
1021  unsigned NewOpc;
1022  switch (N->getOpcode()) {
1023  default: llvm_unreachable("Unexpected opcode!");
1024  case ISD::FP_ROUND: NewOpc = X86ISD::VFPROUND; break;
1025  case ISD::STRICT_FP_ROUND: NewOpc = X86ISD::STRICT_VFPROUND; break;
1026  case ISD::STRICT_FP_TO_SINT: NewOpc = X86ISD::STRICT_CVTTP2SI; break;
1027  case ISD::FP_TO_SINT: NewOpc = X86ISD::CVTTP2SI; break;
1028  case ISD::STRICT_FP_TO_UINT: NewOpc = X86ISD::STRICT_CVTTP2UI; break;
1029  case ISD::FP_TO_UINT: NewOpc = X86ISD::CVTTP2UI; break;
1030  }
1031  SDValue Res;
1032  if (N->isStrictFPOpcode())
1033  Res =
1034  CurDAG->getNode(NewOpc, SDLoc(N), {N->getValueType(0), MVT::Other},
1035  {N->getOperand(0), N->getOperand(1)});
1036  else
1037  Res =
1038  CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
1039  N->getOperand(0));
1040  --I;
1041  CurDAG->ReplaceAllUsesWith(N, Res.getNode());
1042  ++I;
1043  MadeChange = true;
1044  continue;
1045  }
1046  case ISD::SHL:
1047  case ISD::SRA:
1048  case ISD::SRL: {
1049  // Replace vector shifts with their X86 specific equivalent so we don't
1050  // need 2 sets of patterns.
1051  if (!N->getValueType(0).isVector())
1052  break;
1053 
1054  unsigned NewOpc;
1055  switch (N->getOpcode()) {
1056  default: llvm_unreachable("Unexpected opcode!");
1057  case ISD::SHL: NewOpc = X86ISD::VSHLV; break;
1058  case ISD::SRA: NewOpc = X86ISD::VSRAV; break;
1059  case ISD::SRL: NewOpc = X86ISD::VSRLV; break;
1060  }
1061  SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
1062  N->getOperand(0), N->getOperand(1));
1063  --I;
1064  CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
1065  ++I;
1066  MadeChange = true;
1067  continue;
1068  }
1069  case ISD::ANY_EXTEND:
1071  // Replace vector any extend with the zero extend equivalents so we don't
1072  // need 2 sets of patterns. Ignore vXi1 extensions.
1073  if (!N->getValueType(0).isVector())
1074  break;
1075 
1076  unsigned NewOpc;
1077  if (N->getOperand(0).getScalarValueSizeInBits() == 1) {
1078  assert(N->getOpcode() == ISD::ANY_EXTEND &&
1079  "Unexpected opcode for mask vector!");
1080  NewOpc = ISD::SIGN_EXTEND;
1081  } else {
1082  NewOpc = N->getOpcode() == ISD::ANY_EXTEND
1085  }
1086 
1087  SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
1088  N->getOperand(0));
1089  --I;
1090  CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
1091  ++I;
1092  MadeChange = true;
1093  continue;
1094  }
1095  case ISD::FCEIL:
1096  case ISD::STRICT_FCEIL:
1097  case ISD::FFLOOR:
1098  case ISD::STRICT_FFLOOR:
1099  case ISD::FTRUNC:
1100  case ISD::STRICT_FTRUNC:
1101  case ISD::FROUNDEVEN:
1103  case ISD::FNEARBYINT:
1105  case ISD::FRINT:
1106  case ISD::STRICT_FRINT: {
1107  // Replace fp rounding with their X86 specific equivalent so we don't
1108  // need 2 sets of patterns.
1109  unsigned Imm;
1110  switch (N->getOpcode()) {
1111  default: llvm_unreachable("Unexpected opcode!");
1112  case ISD::STRICT_FCEIL:
1113  case ISD::FCEIL: Imm = 0xA; break;
1114  case ISD::STRICT_FFLOOR:
1115  case ISD::FFLOOR: Imm = 0x9; break;
1116  case ISD::STRICT_FTRUNC:
1117  case ISD::FTRUNC: Imm = 0xB; break;
1119  case ISD::FROUNDEVEN: Imm = 0x8; break;
1121  case ISD::FNEARBYINT: Imm = 0xC; break;
1122  case ISD::STRICT_FRINT:
1123  case ISD::FRINT: Imm = 0x4; break;
1124  }
1125  SDLoc dl(N);
1126  bool IsStrict = N->isStrictFPOpcode();
1127  SDValue Res;
1128  if (IsStrict)
1129  Res = CurDAG->getNode(X86ISD::STRICT_VRNDSCALE, dl,
1130  {N->getValueType(0), MVT::Other},
1131  {N->getOperand(0), N->getOperand(1),
1132  CurDAG->getTargetConstant(Imm, dl, MVT::i32)});
1133  else
1134  Res = CurDAG->getNode(X86ISD::VRNDSCALE, dl, N->getValueType(0),
1135  N->getOperand(0),
1136  CurDAG->getTargetConstant(Imm, dl, MVT::i32));
1137  --I;
1138  CurDAG->ReplaceAllUsesWith(N, Res.getNode());
1139  ++I;
1140  MadeChange = true;
1141  continue;
1142  }
1143  case X86ISD::FANDN:
1144  case X86ISD::FAND:
1145  case X86ISD::FOR:
1146  case X86ISD::FXOR: {
1147  // Widen scalar fp logic ops to vector to reduce isel patterns.
1148  // FIXME: Can we do this during lowering/combine.
1149  MVT VT = N->getSimpleValueType(0);
1150  if (VT.isVector() || VT == MVT::f128)
1151  break;
1152 
1153  MVT VecVT = VT == MVT::f64 ? MVT::v2f64
1154  : VT == MVT::f32 ? MVT::v4f32
1155  : MVT::v8f16;
1156 
1157  SDLoc dl(N);
1158  SDValue Op0 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT,
1159  N->getOperand(0));
1160  SDValue Op1 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT,
1161  N->getOperand(1));
1162 
1163  SDValue Res;
1164  if (Subtarget->hasSSE2()) {
1165  EVT IntVT = EVT(VecVT).changeVectorElementTypeToInteger();
1166  Op0 = CurDAG->getNode(ISD::BITCAST, dl, IntVT, Op0);
1167  Op1 = CurDAG->getNode(ISD::BITCAST, dl, IntVT, Op1);
1168  unsigned Opc;
1169  switch (N->getOpcode()) {
1170  default: llvm_unreachable("Unexpected opcode!");
1171  case X86ISD::FANDN: Opc = X86ISD::ANDNP; break;
1172  case X86ISD::FAND: Opc = ISD::AND; break;
1173  case X86ISD::FOR: Opc = ISD::OR; break;
1174  case X86ISD::FXOR: Opc = ISD::XOR; break;
1175  }
1176  Res = CurDAG->getNode(Opc, dl, IntVT, Op0, Op1);
1177  Res = CurDAG->getNode(ISD::BITCAST, dl, VecVT, Res);
1178  } else {
1179  Res = CurDAG->getNode(N->getOpcode(), dl, VecVT, Op0, Op1);
1180  }
1181  Res = CurDAG->getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res,
1182  CurDAG->getIntPtrConstant(0, dl));
1183  --I;
1184  CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
1185  ++I;
1186  MadeChange = true;
1187  continue;
1188  }
1189  }
1190 
1191  if (OptLevel != CodeGenOpt::None &&
1192  // Only do this when the target can fold the load into the call or
1193  // jmp.
1194  !Subtarget->useIndirectThunkCalls() &&
1195  ((N->getOpcode() == X86ISD::CALL && !Subtarget->slowTwoMemOps()) ||
1196  (N->getOpcode() == X86ISD::TC_RETURN &&
1197  (Subtarget->is64Bit() ||
1198  !getTargetMachine().isPositionIndependent())))) {
1199  /// Also try moving call address load from outside callseq_start to just
1200  /// before the call to allow it to be folded.
1201  ///
1202  /// [Load chain]
1203  /// ^
1204  /// |
1205  /// [Load]
1206  /// ^ ^
1207  /// | |
1208  /// / \--
1209  /// / |
1210  ///[CALLSEQ_START] |
1211  /// ^ |
1212  /// | |
1213  /// [LOAD/C2Reg] |
1214  /// | |
1215  /// \ /
1216  /// \ /
1217  /// [CALL]
1218  bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
1219  SDValue Chain = N->getOperand(0);
1220  SDValue Load = N->getOperand(1);
1221  if (!isCalleeLoad(Load, Chain, HasCallSeq))
1222  continue;
1223  moveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
1224  ++NumLoadMoved;
1225  MadeChange = true;
1226  continue;
1227  }
1228 
1229  // Lower fpround and fpextend nodes that target the FP stack to be store and
1230  // load to the stack. This is a gross hack. We would like to simply mark
1231  // these as being illegal, but when we do that, legalize produces these when
1232  // it expands calls, then expands these in the same legalize pass. We would
1233  // like dag combine to be able to hack on these between the call expansion
1234  // and the node legalization. As such this pass basically does "really
1235  // late" legalization of these inline with the X86 isel pass.
1236  // FIXME: This should only happen when not compiled with -O0.
1237  switch (N->getOpcode()) {
1238  default: continue;
1239  case ISD::FP_ROUND:
1240  case ISD::FP_EXTEND:
1241  {
1242  MVT SrcVT = N->getOperand(0).getSimpleValueType();
1243  MVT DstVT = N->getSimpleValueType(0);
1244 
1245  // If any of the sources are vectors, no fp stack involved.
1246  if (SrcVT.isVector() || DstVT.isVector())
1247  continue;
1248 
1249  // If the source and destination are SSE registers, then this is a legal
1250  // conversion that should not be lowered.
1251  const X86TargetLowering *X86Lowering =
1252  static_cast<const X86TargetLowering *>(TLI);
1253  bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
1254  bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
1255  if (SrcIsSSE && DstIsSSE)
1256  continue;
1257 
1258  if (!SrcIsSSE && !DstIsSSE) {
1259  // If this is an FPStack extension, it is a noop.
1260  if (N->getOpcode() == ISD::FP_EXTEND)
1261  continue;
1262  // If this is a value-preserving FPStack truncation, it is a noop.
1263  if (N->getConstantOperandVal(1))
1264  continue;
1265  }
1266 
1267  // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
1268  // FPStack has extload and truncstore. SSE can fold direct loads into other
1269  // operations. Based on this, decide what we want to do.
1270  MVT MemVT = (N->getOpcode() == ISD::FP_ROUND) ? DstVT : SrcVT;
1271  SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
1272  int SPFI = cast<FrameIndexSDNode>(MemTmp)->getIndex();
1273  MachinePointerInfo MPI =
1274  MachinePointerInfo::getFixedStack(CurDAG->getMachineFunction(), SPFI);
1275  SDLoc dl(N);
1276 
1277  // FIXME: optimize the case where the src/dest is a load or store?
1278 
1279  SDValue Store = CurDAG->getTruncStore(
1280  CurDAG->getEntryNode(), dl, N->getOperand(0), MemTmp, MPI, MemVT);
1281  SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store,
1282  MemTmp, MPI, MemVT);
1283 
1284  // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
1285  // extload we created. This will cause general havok on the dag because
1286  // anything below the conversion could be folded into other existing nodes.
1287  // To avoid invalidating 'I', back it up to the convert node.
1288  --I;
1289  CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
1290  break;
1291  }
1292 
1293  //The sequence of events for lowering STRICT_FP versions of these nodes requires
1294  //dealing with the chain differently, as there is already a preexisting chain.
1295  case ISD::STRICT_FP_ROUND:
1296  case ISD::STRICT_FP_EXTEND:
1297  {
1298  MVT SrcVT = N->getOperand(1).getSimpleValueType();
1299  MVT DstVT = N->getSimpleValueType(0);
1300 
1301  // If any of the sources are vectors, no fp stack involved.
1302  if (SrcVT.isVector() || DstVT.isVector())
1303  continue;
1304 
1305  // If the source and destination are SSE registers, then this is a legal
1306  // conversion that should not be lowered.
1307  const X86TargetLowering *X86Lowering =
1308  static_cast<const X86TargetLowering *>(TLI);
1309  bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
1310  bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
1311  if (SrcIsSSE && DstIsSSE)
1312  continue;
1313 
1314  if (!SrcIsSSE && !DstIsSSE) {
1315  // If this is an FPStack extension, it is a noop.
1316  if (N->getOpcode() == ISD::STRICT_FP_EXTEND)
1317  continue;
1318  // If this is a value-preserving FPStack truncation, it is a noop.
1319  if (N->getConstantOperandVal(2))
1320  continue;
1321  }
1322 
1323  // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
1324  // FPStack has extload and truncstore. SSE can fold direct loads into other
1325  // operations. Based on this, decide what we want to do.
1326  MVT MemVT = (N->getOpcode() == ISD::STRICT_FP_ROUND) ? DstVT : SrcVT;
1327  SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
1328  int SPFI = cast<FrameIndexSDNode>(MemTmp)->getIndex();
1329  MachinePointerInfo MPI =
1330  MachinePointerInfo::getFixedStack(CurDAG->getMachineFunction(), SPFI);
1331  SDLoc dl(N);
1332 
1333  // FIXME: optimize the case where the src/dest is a load or store?
1334 
1335  //Since the operation is StrictFP, use the preexisting chain.
1336  SDValue Store, Result;
1337  if (!SrcIsSSE) {
1338  SDVTList VTs = CurDAG->getVTList(MVT::Other);
1339  SDValue Ops[] = {N->getOperand(0), N->getOperand(1), MemTmp};
1340  Store = CurDAG->getMemIntrinsicNode(X86ISD::FST, dl, VTs, Ops, MemVT,
1341  MPI, /*Align*/ None,
1343  if (N->getFlags().hasNoFPExcept()) {
1344  SDNodeFlags Flags = Store->getFlags();
1345  Flags.setNoFPExcept(true);
1346  Store->setFlags(Flags);
1347  }
1348  } else {
1349  assert(SrcVT == MemVT && "Unexpected VT!");
1350  Store = CurDAG->getStore(N->getOperand(0), dl, N->getOperand(1), MemTmp,
1351  MPI);
1352  }
1353 
1354  if (!DstIsSSE) {
1355  SDVTList VTs = CurDAG->getVTList(DstVT, MVT::Other);
1356  SDValue Ops[] = {Store, MemTmp};
1357  Result = CurDAG->getMemIntrinsicNode(
1358  X86ISD::FLD, dl, VTs, Ops, MemVT, MPI,
1359  /*Align*/ None, MachineMemOperand::MOLoad);
1360  if (N->getFlags().hasNoFPExcept()) {
1361  SDNodeFlags Flags = Result->getFlags();
1362  Flags.setNoFPExcept(true);
1363  Result->setFlags(Flags);
1364  }
1365  } else {
1366  assert(DstVT == MemVT && "Unexpected VT!");
1367  Result = CurDAG->getLoad(DstVT, dl, Store, MemTmp, MPI);
1368  }
1369 
1370  // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
1371  // extload we created. This will cause general havok on the dag because
1372  // anything below the conversion could be folded into other existing nodes.
1373  // To avoid invalidating 'I', back it up to the convert node.
1374  --I;
1375  CurDAG->ReplaceAllUsesWith(N, Result.getNode());
1376  break;
1377  }
1378  }
1379 
1380 
1381  // Now that we did that, the node is dead. Increment the iterator to the
1382  // next node to process, then delete N.
1383  ++I;
1384  MadeChange = true;
1385  }
1386 
1387  // Remove any dead nodes that may have been left behind.
1388  if (MadeChange)
1389  CurDAG->RemoveDeadNodes();
1390 }
1391 
1392 // Look for a redundant movzx/movsx that can occur after an 8-bit divrem.
1393 bool X86DAGToDAGISel::tryOptimizeRem8Extend(SDNode *N) {
1394  unsigned Opc = N->getMachineOpcode();
1395  if (Opc != X86::MOVZX32rr8 && Opc != X86::MOVSX32rr8 &&
1396  Opc != X86::MOVSX64rr8)
1397  return false;
1398 
1399  SDValue N0 = N->getOperand(0);
1400 
1401  // We need to be extracting the lower bit of an extend.
1402  if (!N0.isMachineOpcode() ||
1403  N0.getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG ||
1404  N0.getConstantOperandVal(1) != X86::sub_8bit)
1405  return false;
1406 
1407  // We're looking for either a movsx or movzx to match the original opcode.
1408  unsigned ExpectedOpc = Opc == X86::MOVZX32rr8 ? X86::MOVZX32rr8_NOREX
1409  : X86::MOVSX32rr8_NOREX;
1410  SDValue N00 = N0.getOperand(0);
1411  if (!N00.isMachineOpcode() || N00.getMachineOpcode() != ExpectedOpc)
1412  return false;
1413 
1414  if (Opc == X86::MOVSX64rr8) {
1415  // If we had a sign extend from 8 to 64 bits. We still need to go from 32
1416  // to 64.
1417  MachineSDNode *Extend = CurDAG->getMachineNode(X86::MOVSX64rr32, SDLoc(N),
1418  MVT::i64, N00);
1419  ReplaceUses(N, Extend);
1420  } else {
1421  // Ok we can drop this extend and just use the original extend.
1422  ReplaceUses(N, N00.getNode());
1423  }
1424 
1425  return true;
1426 }
1427 
1428 void X86DAGToDAGISel::PostprocessISelDAG() {
1429  // Skip peepholes at -O0.
1430  if (TM.getOptLevel() == CodeGenOpt::None)
1431  return;
1432 
1433  SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
1434 
1435  bool MadeChange = false;
1436  while (Position != CurDAG->allnodes_begin()) {
1437  SDNode *N = &*--Position;
1438  // Skip dead nodes and any non-machine opcodes.
1439  if (N->use_empty() || !N->isMachineOpcode())
1440  continue;
1441 
1442  if (tryOptimizeRem8Extend(N)) {
1443  MadeChange = true;
1444  continue;
1445  }
1446 
1447  // Look for a TESTrr+ANDrr pattern where both operands of the test are
1448  // the same. Rewrite to remove the AND.
1449  unsigned Opc = N->getMachineOpcode();
1450  if ((Opc == X86::TEST8rr || Opc == X86::TEST16rr ||
1451  Opc == X86::TEST32rr || Opc == X86::TEST64rr) &&
1452  N->getOperand(0) == N->getOperand(1) &&
1453  N->isOnlyUserOf(N->getOperand(0).getNode()) &&
1454  N->getOperand(0).isMachineOpcode()) {
1455  SDValue And = N->getOperand(0);
1456  unsigned N0Opc = And.getMachineOpcode();
1457  if (N0Opc == X86::AND8rr || N0Opc == X86::AND16rr ||
1458  N0Opc == X86::AND32rr || N0Opc == X86::AND64rr) {
1459  MachineSDNode *Test = CurDAG->getMachineNode(Opc, SDLoc(N),
1460  MVT::i32,
1461  And.getOperand(0),
1462  And.getOperand(1));
1463  ReplaceUses(N, Test);
1464  MadeChange = true;
1465  continue;
1466  }
1467  if (N0Opc == X86::AND8rm || N0Opc == X86::AND16rm ||
1468  N0Opc == X86::AND32rm || N0Opc == X86::AND64rm) {
1469  unsigned NewOpc;
1470  switch (N0Opc) {
1471  case X86::AND8rm: NewOpc = X86::TEST8mr; break;
1472  case X86::AND16rm: NewOpc = X86::TEST16mr; break;
1473  case X86::AND32rm: NewOpc = X86::TEST32mr; break;
1474  case X86::AND64rm: NewOpc = X86::TEST64mr; break;
1475  }
1476 
1477  // Need to swap the memory and register operand.
1478  SDValue Ops[] = { And.getOperand(1),
1479  And.getOperand(2),
1480  And.getOperand(3),
1481  And.getOperand(4),
1482  And.getOperand(5),
1483  And.getOperand(0),
1484  And.getOperand(6) /* Chain */ };
1485  MachineSDNode *Test = CurDAG->getMachineNode(NewOpc, SDLoc(N),
1486  MVT::i32, MVT::Other, Ops);
1487  CurDAG->setNodeMemRefs(
1488  Test, cast<MachineSDNode>(And.getNode())->memoperands());
1489  ReplaceUses(N, Test);
1490  MadeChange = true;
1491  continue;
1492  }
1493  }
1494 
1495  // Look for a KAND+KORTEST and turn it into KTEST if only the zero flag is
1496  // used. We're doing this late so we can prefer to fold the AND into masked
1497  // comparisons. Doing that can be better for the live range of the mask
1498  // register.
1499  if ((Opc == X86::KORTESTBrr || Opc == X86::KORTESTWrr ||
1500  Opc == X86::KORTESTDrr || Opc == X86::KORTESTQrr) &&
1501  N->getOperand(0) == N->getOperand(1) &&
1502  N->isOnlyUserOf(N->getOperand(0).getNode()) &&
1503  N->getOperand(0).isMachineOpcode() &&
1504  onlyUsesZeroFlag(SDValue(N, 0))) {
1505  SDValue And = N->getOperand(0);
1506  unsigned N0Opc = And.getMachineOpcode();
1507  // KANDW is legal with AVX512F, but KTESTW requires AVX512DQ. The other
1508  // KAND instructions and KTEST use the same ISA feature.
1509  if (N0Opc == X86::KANDBrr ||
1510  (N0Opc == X86::KANDWrr && Subtarget->hasDQI()) ||
1511  N0Opc == X86::KANDDrr || N0Opc == X86::KANDQrr) {
1512  unsigned NewOpc;
1513  switch (Opc) {
1514  default: llvm_unreachable("Unexpected opcode!");
1515  case X86::KORTESTBrr: NewOpc = X86::KTESTBrr; break;
1516  case X86::KORTESTWrr: NewOpc = X86::KTESTWrr; break;
1517  case X86::KORTESTDrr: NewOpc = X86::KTESTDrr; break;
1518  case X86::KORTESTQrr: NewOpc = X86::KTESTQrr; break;
1519  }
1520  MachineSDNode *KTest = CurDAG->getMachineNode(NewOpc, SDLoc(N),
1521  MVT::i32,
1522  And.getOperand(0),
1523  And.getOperand(1));
1524  ReplaceUses(N, KTest);
1525  MadeChange = true;
1526  continue;
1527  }
1528  }
1529 
1530  // Attempt to remove vectors moves that were inserted to zero upper bits.
1531  if (Opc != TargetOpcode::SUBREG_TO_REG)
1532  continue;
1533 
1534  unsigned SubRegIdx = N->getConstantOperandVal(2);
1535  if (SubRegIdx != X86::sub_xmm && SubRegIdx != X86::sub_ymm)
1536  continue;
1537 
1538  SDValue Move = N->getOperand(1);
1539  if (!Move.isMachineOpcode())
1540  continue;
1541 
1542  // Make sure its one of the move opcodes we recognize.
1543  switch (Move.getMachineOpcode()) {
1544  default:
1545  continue;
1546  case X86::VMOVAPDrr: case X86::VMOVUPDrr:
1547  case X86::VMOVAPSrr: case X86::VMOVUPSrr:
1548  case X86::VMOVDQArr: case X86::VMOVDQUrr:
1549  case X86::VMOVAPDYrr: case X86::VMOVUPDYrr:
1550  case X86::VMOVAPSYrr: case X86::VMOVUPSYrr:
1551  case X86::VMOVDQAYrr: case X86::VMOVDQUYrr:
1552  case X86::VMOVAPDZ128rr: case X86::VMOVUPDZ128rr:
1553  case X86::VMOVAPSZ128rr: case X86::VMOVUPSZ128rr:
1554  case X86::VMOVDQA32Z128rr: case X86::VMOVDQU32Z128rr:
1555  case X86::VMOVDQA64Z128rr: case X86::VMOVDQU64Z128rr:
1556  case X86::VMOVAPDZ256rr: case X86::VMOVUPDZ256rr:
1557  case X86::VMOVAPSZ256rr: case X86::VMOVUPSZ256rr:
1558  case X86::VMOVDQA32Z256rr: case X86::VMOVDQU32Z256rr:
1559  case X86::VMOVDQA64Z256rr: case X86::VMOVDQU64Z256rr:
1560  break;
1561  }
1562 
1563  SDValue In = Move.getOperand(0);
1564  if (!In.isMachineOpcode() ||
1565  In.getMachineOpcode() <= TargetOpcode::GENERIC_OP_END)
1566  continue;
1567 
1568  // Make sure the instruction has a VEX, XOP, or EVEX prefix. This covers
1569  // the SHA instructions which use a legacy encoding.
1570  uint64_t TSFlags = getInstrInfo()->get(In.getMachineOpcode()).TSFlags;
1571  if ((TSFlags & X86II::EncodingMask) != X86II::VEX &&
1572  (TSFlags & X86II::EncodingMask) != X86II::EVEX &&
1573  (TSFlags & X86II::EncodingMask) != X86II::XOP)
1574  continue;
1575 
1576  // Producing instruction is another vector instruction. We can drop the
1577  // move.
1578  CurDAG->UpdateNodeOperands(N, N->getOperand(0), In, N->getOperand(2));
1579  MadeChange = true;
1580  }
1581 
1582  if (MadeChange)
1583  CurDAG->RemoveDeadNodes();
1584 }
1585 
1586 
1587 /// Emit any code that needs to be executed only in the main function.
1588 void X86DAGToDAGISel::emitSpecialCodeForMain() {
1589  if (Subtarget->isTargetCygMing()) {
1591  auto &DL = CurDAG->getDataLayout();
1592 
1593  TargetLowering::CallLoweringInfo CLI(*CurDAG);
1594  CLI.setChain(CurDAG->getRoot())
1595  .setCallee(CallingConv::C, Type::getVoidTy(*CurDAG->getContext()),
1596  CurDAG->getExternalSymbol("__main", TLI->getPointerTy(DL)),
1597  std::move(Args));
1598  const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
1599  std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
1600  CurDAG->setRoot(Result.second);
1601  }
1602 }
1603 
1604 void X86DAGToDAGISel::emitFunctionEntryCode() {
1605  // If this is main, emit special code for main.
1606  const Function &F = MF->getFunction();
1607  if (F.hasExternalLinkage() && F.getName() == "main")
1608  emitSpecialCodeForMain();
1609 }
1610 
1611 static bool isDispSafeForFrameIndex(int64_t Val) {
1612  // On 64-bit platforms, we can run into an issue where a frame index
1613  // includes a displacement that, when added to the explicit displacement,
1614  // will overflow the displacement field. Assuming that the frame index
1615  // displacement fits into a 31-bit integer (which is only slightly more
1616  // aggressive than the current fundamental assumption that it fits into
1617  // a 32-bit integer), a 31-bit disp should always be safe.
1618  return isInt<31>(Val);
1619 }
1620 
1621 bool X86DAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset,
1622  X86ISelAddressMode &AM) {
1623  // We may have already matched a displacement and the caller just added the
1624  // symbolic displacement. So we still need to do the checks even if Offset
1625  // is zero.
1626 
1627  int64_t Val = AM.Disp + Offset;
1628 
1629  // Cannot combine ExternalSymbol displacements with integer offsets.
1630  if (Val != 0 && (AM.ES || AM.MCSym))
1631  return true;
1632 
1633  CodeModel::Model M = TM.getCodeModel();
1634  if (Subtarget->is64Bit()) {
1635  if (Val != 0 &&
1637  AM.hasSymbolicDisplacement()))
1638  return true;
1639  // In addition to the checks required for a register base, check that
1640  // we do not try to use an unsafe Disp with a frame index.
1641  if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
1643  return true;
1644  }
1645  AM.Disp = Val;
1646  return false;
1647 
1648 }
1649 
1650 bool X86DAGToDAGISel::matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM,
1651  bool AllowSegmentRegForX32) {
1652  SDValue Address = N->getOperand(1);
1653 
1654  // load gs:0 -> GS segment register.
1655  // load fs:0 -> FS segment register.
1656  //
1657  // This optimization is generally valid because the GNU TLS model defines that
1658  // gs:0 (or fs:0 on X86-64) contains its own address. However, for X86-64 mode
1659  // with 32-bit registers, as we get in ILP32 mode, those registers are first
1660  // zero-extended to 64 bits and then added it to the base address, which gives
1661  // unwanted results when the register holds a negative value.
1662  // For more information see http://people.redhat.com/drepper/tls.pdf
1663  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address)) {
1664  if (C->getSExtValue() == 0 && AM.Segment.getNode() == nullptr &&
1665  !IndirectTlsSegRefs &&
1666  (Subtarget->isTargetGlibc() || Subtarget->isTargetAndroid() ||
1667  Subtarget->isTargetFuchsia())) {
1668  if (Subtarget->isTarget64BitILP32() && !AllowSegmentRegForX32)
1669  return true;
1670  switch (N->getPointerInfo().getAddrSpace()) {
1671  case X86AS::GS:
1672  AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1673  return false;
1674  case X86AS::FS:
1675  AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1676  return false;
1677  // Address space X86AS::SS is not handled here, because it is not used to
1678  // address TLS areas.
1679  }
1680  }
1681  }
1682 
1683  return true;
1684 }
1685 
1686 /// Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes into an addressing
1687 /// mode. These wrap things that will resolve down into a symbol reference.
1688 /// If no match is possible, this returns true, otherwise it returns false.
1689 bool X86DAGToDAGISel::matchWrapper(SDValue N, X86ISelAddressMode &AM) {
1690  // If the addressing mode already has a symbol as the displacement, we can
1691  // never match another symbol.
1692  if (AM.hasSymbolicDisplacement())
1693  return true;
1694 
1695  bool IsRIPRelTLS = false;
1696  bool IsRIPRel = N.getOpcode() == X86ISD::WrapperRIP;
1697  if (IsRIPRel) {
1698  SDValue Val = N.getOperand(0);
1700  IsRIPRelTLS = true;
1701  }
1702 
1703  // We can't use an addressing mode in the 64-bit large code model.
1704  // Global TLS addressing is an exception. In the medium code model,
1705  // we use can use a mode when RIP wrappers are present.
1706  // That signifies access to globals that are known to be "near",
1707  // such as the GOT itself.
1708  CodeModel::Model M = TM.getCodeModel();
1709  if (Subtarget->is64Bit() &&
1710  ((M == CodeModel::Large && !IsRIPRelTLS) ||
1711  (M == CodeModel::Medium && !IsRIPRel)))
1712  return true;
1713 
1714  // Base and index reg must be 0 in order to use %rip as base.
1715  if (IsRIPRel && AM.hasBaseOrIndexReg())
1716  return true;
1717 
1718  // Make a local copy in case we can't do this fold.
1719  X86ISelAddressMode Backup = AM;
1720 
1721  int64_t Offset = 0;
1722  SDValue N0 = N.getOperand(0);
1723  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
1724  AM.GV = G->getGlobal();
1725  AM.SymbolFlags = G->getTargetFlags();
1726  Offset = G->getOffset();
1727  } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
1728  AM.CP = CP->getConstVal();
1729  AM.Alignment = CP->getAlign();
1730  AM.SymbolFlags = CP->getTargetFlags();
1731  Offset = CP->getOffset();
1732  } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
1733  AM.ES = S->getSymbol();
1734  AM.SymbolFlags = S->getTargetFlags();
1735  } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
1736  AM.MCSym = S->getMCSymbol();
1737  } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
1738  AM.JT = J->getIndex();
1739  AM.SymbolFlags = J->getTargetFlags();
1740  } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
1741  AM.BlockAddr = BA->getBlockAddress();
1742  AM.SymbolFlags = BA->getTargetFlags();
1743  Offset = BA->getOffset();
1744  } else
1745  llvm_unreachable("Unhandled symbol reference node.");
1746 
1747  if (foldOffsetIntoAddress(Offset, AM)) {
1748  AM = Backup;
1749  return true;
1750  }
1751 
1752  if (IsRIPRel)
1753  AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
1754 
1755  // Commit the changes now that we know this fold is safe.
1756  return false;
1757 }
1758 
1759 /// Add the specified node to the specified addressing mode, returning true if
1760 /// it cannot be done. This just pattern matches for the addressing mode.
1761 bool X86DAGToDAGISel::matchAddress(SDValue N, X86ISelAddressMode &AM) {
1762  if (matchAddressRecursively(N, AM, 0))
1763  return true;
1764 
1765  // Post-processing: Make a second attempt to fold a load, if we now know
1766  // that there will not be any other register. This is only performed for
1767  // 64-bit ILP32 mode since 32-bit mode and 64-bit LP64 mode will have folded
1768  // any foldable load the first time.
1769  if (Subtarget->isTarget64BitILP32() &&
1770  AM.BaseType == X86ISelAddressMode::RegBase &&
1771  AM.Base_Reg.getNode() != nullptr && AM.IndexReg.getNode() == nullptr) {
1772  SDValue Save_Base_Reg = AM.Base_Reg;
1773  if (auto *LoadN = dyn_cast<LoadSDNode>(Save_Base_Reg)) {
1774  AM.Base_Reg = SDValue();
1775  if (matchLoadInAddress(LoadN, AM, /*AllowSegmentRegForX32=*/true))
1776  AM.Base_Reg = Save_Base_Reg;
1777  }
1778  }
1779 
1780  // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
1781  // a smaller encoding and avoids a scaled-index.
1782  if (AM.Scale == 2 &&
1783  AM.BaseType == X86ISelAddressMode::RegBase &&
1784  AM.Base_Reg.getNode() == nullptr) {
1785  AM.Base_Reg = AM.IndexReg;
1786  AM.Scale = 1;
1787  }
1788 
1789  // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
1790  // because it has a smaller encoding.
1791  // TODO: Which other code models can use this?
1792  switch (TM.getCodeModel()) {
1793  default: break;
1794  case CodeModel::Small:
1795  case CodeModel::Kernel:
1796  if (Subtarget->is64Bit() &&
1797  AM.Scale == 1 &&
1798  AM.BaseType == X86ISelAddressMode::RegBase &&
1799  AM.Base_Reg.getNode() == nullptr &&
1800  AM.IndexReg.getNode() == nullptr &&
1801  AM.SymbolFlags == X86II::MO_NO_FLAG &&
1802  AM.hasSymbolicDisplacement())
1803  AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
1804  break;
1805  }
1806 
1807  return false;
1808 }
1809 
1810 bool X86DAGToDAGISel::matchAdd(SDValue &N, X86ISelAddressMode &AM,
1811  unsigned Depth) {
1812  // Add an artificial use to this node so that we can keep track of
1813  // it if it gets CSE'd with a different node.
1814  HandleSDNode Handle(N);
1815 
1816  X86ISelAddressMode Backup = AM;
1817  if (!matchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1818  !matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
1819  return false;
1820  AM = Backup;
1821 
1822  // Try again after commutating the operands.
1823  if (!matchAddressRecursively(Handle.getValue().getOperand(1), AM,
1824  Depth + 1) &&
1825  !matchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth + 1))
1826  return false;
1827  AM = Backup;
1828 
1829  // If we couldn't fold both operands into the address at the same time,
1830  // see if we can just put each operand into a register and fold at least
1831  // the add.
1832  if (AM.BaseType == X86ISelAddressMode::RegBase &&
1833  !AM.Base_Reg.getNode() &&
1834  !AM.IndexReg.getNode()) {
1835  N = Handle.getValue();
1836  AM.Base_Reg = N.getOperand(0);
1837  AM.IndexReg = N.getOperand(1);
1838  AM.Scale = 1;
1839  return false;
1840  }
1841  N = Handle.getValue();
1842  return true;
1843 }
1844 
1845 // Insert a node into the DAG at least before the Pos node's position. This
1846 // will reposition the node as needed, and will assign it a node ID that is <=
1847 // the Pos node's ID. Note that this does *not* preserve the uniqueness of node
1848 // IDs! The selection DAG must no longer depend on their uniqueness when this
1849 // is used.
1850 static void insertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
1851  if (N->getNodeId() == -1 ||
1854  DAG.RepositionNode(Pos->getIterator(), N.getNode());
1855  // Mark Node as invalid for pruning as after this it may be a successor to a
1856  // selected node but otherwise be in the same position of Pos.
1857  // Conservatively mark it with the same -abs(Id) to assure node id
1858  // invariant is preserved.
1859  N->setNodeId(Pos->getNodeId());
1861  }
1862 }
1863 
1864 // Transform "(X >> (8-C1)) & (0xff << C1)" to "((X >> 8) & 0xff) << C1" if
1865 // safe. This allows us to convert the shift and and into an h-register
1866 // extract and a scaled index. Returns false if the simplification is
1867 // performed.
1869  uint64_t Mask,
1871  X86ISelAddressMode &AM) {
1872  if (Shift.getOpcode() != ISD::SRL ||
1873  !isa<ConstantSDNode>(Shift.getOperand(1)) ||
1874  !Shift.hasOneUse())
1875  return true;
1876 
1877  int ScaleLog = 8 - Shift.getConstantOperandVal(1);
1878  if (ScaleLog <= 0 || ScaleLog >= 4 ||
1879  Mask != (0xffu << ScaleLog))
1880  return true;
1881 
1882  MVT VT = N.getSimpleValueType();
1883  SDLoc DL(N);
1884  SDValue Eight = DAG.getConstant(8, DL, MVT::i8);
1885  SDValue NewMask = DAG.getConstant(0xff, DL, VT);
1886  SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
1887  SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
1888  SDValue ShlCount = DAG.getConstant(ScaleLog, DL, MVT::i8);
1889  SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
1890 
1891  // Insert the new nodes into the topological ordering. We must do this in
1892  // a valid topological ordering as nothing is going to go back and re-sort
1893  // these nodes. We continually insert before 'N' in sequence as this is
1894  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1895  // hierarchy left to express.
1896  insertDAGNode(DAG, N, Eight);
1897  insertDAGNode(DAG, N, Srl);
1898  insertDAGNode(DAG, N, NewMask);
1899  insertDAGNode(DAG, N, And);
1900  insertDAGNode(DAG, N, ShlCount);
1901  insertDAGNode(DAG, N, Shl);
1902  DAG.ReplaceAllUsesWith(N, Shl);
1903  DAG.RemoveDeadNode(N.getNode());
1904  AM.IndexReg = And;
1905  AM.Scale = (1 << ScaleLog);
1906  return false;
1907 }
1908 
1909 // Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
1910 // allows us to fold the shift into this addressing mode. Returns false if the
1911 // transform succeeded.
1913  X86ISelAddressMode &AM) {
1914  SDValue Shift = N.getOperand(0);
1915 
1916  // Use a signed mask so that shifting right will insert sign bits. These
1917  // bits will be removed when we shift the result left so it doesn't matter
1918  // what we use. This might allow a smaller immediate encoding.
1919  int64_t Mask = cast<ConstantSDNode>(N->getOperand(1))->getSExtValue();
1920 
1921  // If we have an any_extend feeding the AND, look through it to see if there
1922  // is a shift behind it. But only if the AND doesn't use the extended bits.
1923  // FIXME: Generalize this to other ANY_EXTEND than i32 to i64?
1924  bool FoundAnyExtend = false;
1925  if (Shift.getOpcode() == ISD::ANY_EXTEND && Shift.hasOneUse() &&
1926  Shift.getOperand(0).getSimpleValueType() == MVT::i32 &&
1927  isUInt<32>(Mask)) {
1928  FoundAnyExtend = true;
1929  Shift = Shift.getOperand(0);
1930  }
1931 
1932  if (Shift.getOpcode() != ISD::SHL ||
1933  !isa<ConstantSDNode>(Shift.getOperand(1)))
1934  return true;
1935 
1936  SDValue X = Shift.getOperand(0);
1937 
1938  // Not likely to be profitable if either the AND or SHIFT node has more
1939  // than one use (unless all uses are for address computation). Besides,
1940  // isel mechanism requires their node ids to be reused.
1941  if (!N.hasOneUse() || !Shift.hasOneUse())
1942  return true;
1943 
1944  // Verify that the shift amount is something we can fold.
1945  unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1946  if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
1947  return true;
1948 
1949  MVT VT = N.getSimpleValueType();
1950  SDLoc DL(N);
1951  if (FoundAnyExtend) {
1952  SDValue NewX = DAG.getNode(ISD::ANY_EXTEND, DL, VT, X);
1953  insertDAGNode(DAG, N, NewX);
1954  X = NewX;
1955  }
1956 
1957  SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, DL, VT);
1958  SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
1959  SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
1960 
1961  // Insert the new nodes into the topological ordering. We must do this in
1962  // a valid topological ordering as nothing is going to go back and re-sort
1963  // these nodes. We continually insert before 'N' in sequence as this is
1964  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1965  // hierarchy left to express.
1966  insertDAGNode(DAG, N, NewMask);
1967  insertDAGNode(DAG, N, NewAnd);
1968  insertDAGNode(DAG, N, NewShift);
1969  DAG.ReplaceAllUsesWith(N, NewShift);
1970  DAG.RemoveDeadNode(N.getNode());
1971 
1972  AM.Scale = 1 << ShiftAmt;
1973  AM.IndexReg = NewAnd;
1974  return false;
1975 }
1976 
1977 // Implement some heroics to detect shifts of masked values where the mask can
1978 // be replaced by extending the shift and undoing that in the addressing mode
1979 // scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
1980 // (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
1981 // the addressing mode. This results in code such as:
1982 //
1983 // int f(short *y, int *lookup_table) {
1984 // ...
1985 // return *y + lookup_table[*y >> 11];
1986 // }
1987 //
1988 // Turning into:
1989 // movzwl (%rdi), %eax
1990 // movl %eax, %ecx
1991 // shrl $11, %ecx
1992 // addl (%rsi,%rcx,4), %eax
1993 //
1994 // Instead of:
1995 // movzwl (%rdi), %eax
1996 // movl %eax, %ecx
1997 // shrl $9, %ecx
1998 // andl $124, %rcx
1999 // addl (%rsi,%rcx), %eax
2000 //
2001 // Note that this function assumes the mask is provided as a mask *after* the
2002 // value is shifted. The input chain may or may not match that, but computing
2003 // such a mask is trivial.
2005  uint64_t Mask,
2007  X86ISelAddressMode &AM) {
2008  if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
2009  !isa<ConstantSDNode>(Shift.getOperand(1)))
2010  return true;
2011 
2012  unsigned ShiftAmt = Shift.getConstantOperandVal(1);
2013  unsigned MaskLZ = countLeadingZeros(Mask);
2014  unsigned MaskTZ = countTrailingZeros(Mask);
2015 
2016  // The amount of shift we're trying to fit into the addressing mode is taken
2017  // from the trailing zeros of the mask.
2018  unsigned AMShiftAmt = MaskTZ;
2019 
2020  // There is nothing we can do here unless the mask is removing some bits.
2021  // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
2022  if (AMShiftAmt == 0 || AMShiftAmt > 3) return true;
2023 
2024  // We also need to ensure that mask is a continuous run of bits.
2025  if (countTrailingOnes(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
2026 
2027  // Scale the leading zero count down based on the actual size of the value.
2028  // Also scale it down based on the size of the shift.
2029  unsigned ScaleDown = (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
2030  if (MaskLZ < ScaleDown)
2031  return true;
2032  MaskLZ -= ScaleDown;
2033 
2034  // The final check is to ensure that any masked out high bits of X are
2035  // already known to be zero. Otherwise, the mask has a semantic impact
2036  // other than masking out a couple of low bits. Unfortunately, because of
2037  // the mask, zero extensions will be removed from operands in some cases.
2038  // This code works extra hard to look through extensions because we can
2039  // replace them with zero extensions cheaply if necessary.
2040  bool ReplacingAnyExtend = false;
2041  if (X.getOpcode() == ISD::ANY_EXTEND) {
2042  unsigned ExtendBits = X.getSimpleValueType().getSizeInBits() -
2043  X.getOperand(0).getSimpleValueType().getSizeInBits();
2044  // Assume that we'll replace the any-extend with a zero-extend, and
2045  // narrow the search to the extended value.
2046  X = X.getOperand(0);
2047  MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
2048  ReplacingAnyExtend = true;
2049  }
2050  APInt MaskedHighBits =
2051  APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ);
2052  KnownBits Known = DAG.computeKnownBits(X);
2053  if (MaskedHighBits != Known.Zero) return true;
2054 
2055  // We've identified a pattern that can be transformed into a single shift
2056  // and an addressing mode. Make it so.
2057  MVT VT = N.getSimpleValueType();
2058  if (ReplacingAnyExtend) {
2059  assert(X.getValueType() != VT);
2060  // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
2061  SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X);
2062  insertDAGNode(DAG, N, NewX);
2063  X = NewX;
2064  }
2065  SDLoc DL(N);
2066  SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
2067  SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
2068  SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
2069  SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
2070 
2071  // Insert the new nodes into the topological ordering. We must do this in
2072  // a valid topological ordering as nothing is going to go back and re-sort
2073  // these nodes. We continually insert before 'N' in sequence as this is
2074  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
2075  // hierarchy left to express.
2076  insertDAGNode(DAG, N, NewSRLAmt);
2077  insertDAGNode(DAG, N, NewSRL);
2078  insertDAGNode(DAG, N, NewSHLAmt);
2079  insertDAGNode(DAG, N, NewSHL);
2080  DAG.ReplaceAllUsesWith(N, NewSHL);
2081  DAG.RemoveDeadNode(N.getNode());
2082 
2083  AM.Scale = 1 << AMShiftAmt;
2084  AM.IndexReg = NewSRL;
2085  return false;
2086 }
2087 
2088 // Transform "(X >> SHIFT) & (MASK << C1)" to
2089 // "((X >> (SHIFT + C1)) & (MASK)) << C1". Everything before the SHL will be
2090 // matched to a BEXTR later. Returns false if the simplification is performed.
2092  uint64_t Mask,
2094  X86ISelAddressMode &AM,
2095  const X86Subtarget &Subtarget) {
2096  if (Shift.getOpcode() != ISD::SRL ||
2097  !isa<ConstantSDNode>(Shift.getOperand(1)) ||
2098  !Shift.hasOneUse() || !N.hasOneUse())
2099  return true;
2100 
2101  // Only do this if BEXTR will be matched by matchBEXTRFromAndImm.
2102  if (!Subtarget.hasTBM() &&
2103  !(Subtarget.hasBMI() && Subtarget.hasFastBEXTR()))
2104  return true;
2105 
2106  // We need to ensure that mask is a continuous run of bits.
2107  if (!isShiftedMask_64(Mask)) return true;
2108 
2109  unsigned ShiftAmt = Shift.getConstantOperandVal(1);
2110 
2111  // The amount of shift we're trying to fit into the addressing mode is taken
2112  // from the trailing zeros of the mask.
2113  unsigned AMShiftAmt = countTrailingZeros(Mask);
2114 
2115  // There is nothing we can do here unless the mask is removing some bits.
2116  // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
2117  if (AMShiftAmt == 0 || AMShiftAmt > 3) return true;
2118 
2119  MVT VT = N.getSimpleValueType();
2120  SDLoc DL(N);
2121  SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
2122  SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
2123  SDValue NewMask = DAG.getConstant(Mask >> AMShiftAmt, DL, VT);
2124  SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, NewSRL, NewMask);
2125  SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
2126  SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewAnd, NewSHLAmt);
2127 
2128  // Insert the new nodes into the topological ordering. We must do this in
2129  // a valid topological ordering as nothing is going to go back and re-sort
2130  // these nodes. We continually insert before 'N' in sequence as this is
2131  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
2132  // hierarchy left to express.
2133  insertDAGNode(DAG, N, NewSRLAmt);
2134  insertDAGNode(DAG, N, NewSRL);
2135  insertDAGNode(DAG, N, NewMask);
2136  insertDAGNode(DAG, N, NewAnd);
2137  insertDAGNode(DAG, N, NewSHLAmt);
2138  insertDAGNode(DAG, N, NewSHL);
2139  DAG.ReplaceAllUsesWith(N, NewSHL);
2140  DAG.RemoveDeadNode(N.getNode());
2141 
2142  AM.Scale = 1 << AMShiftAmt;
2143  AM.IndexReg = NewAnd;
2144  return false;
2145 }
2146 
2147 bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
2148  unsigned Depth) {
2149  SDLoc dl(N);
2150  LLVM_DEBUG({
2151  dbgs() << "MatchAddress: ";
2152  AM.dump(CurDAG);
2153  });
2154  // Limit recursion.
2155  if (Depth > 5)
2156  return matchAddressBase(N, AM);
2157 
2158  // If this is already a %rip relative address, we can only merge immediates
2159  // into it. Instead of handling this in every case, we handle it here.
2160  // RIP relative addressing: %rip + 32-bit displacement!
2161  if (AM.isRIPRelative()) {
2162  // FIXME: JumpTable and ExternalSymbol address currently don't like
2163  // displacements. It isn't very important, but this should be fixed for
2164  // consistency.
2165  if (!(AM.ES || AM.MCSym) && AM.JT != -1)
2166  return true;
2167 
2168  if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
2169  if (!foldOffsetIntoAddress(Cst->getSExtValue(), AM))
2170  return false;
2171  return true;
2172  }
2173 
2174  switch (N.getOpcode()) {
2175  default: break;
2176  case ISD::LOCAL_RECOVER: {
2177  if (!AM.hasSymbolicDisplacement() && AM.Disp == 0)
2178  if (const auto *ESNode = dyn_cast<MCSymbolSDNode>(N.getOperand(0))) {
2179  // Use the symbol and don't prefix it.
2180  AM.MCSym = ESNode->getMCSymbol();
2181  return false;
2182  }
2183  break;
2184  }
2185  case ISD::Constant: {
2186  uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
2187  if (!foldOffsetIntoAddress(Val, AM))
2188  return false;
2189  break;
2190  }
2191 
2192  case X86ISD::Wrapper:
2193  case X86ISD::WrapperRIP:
2194  if (!matchWrapper(N, AM))
2195  return false;
2196  break;
2197 
2198  case ISD::LOAD:
2199  if (!matchLoadInAddress(cast<LoadSDNode>(N), AM))
2200  return false;
2201  break;
2202 
2203  case ISD::FrameIndex:
2204  if (AM.BaseType == X86ISelAddressMode::RegBase &&
2205  AM.Base_Reg.getNode() == nullptr &&
2206  (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
2207  AM.BaseType = X86ISelAddressMode::FrameIndexBase;
2208  AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
2209  return false;
2210  }
2211  break;
2212 
2213  case ISD::SHL:
2214  if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
2215  break;
2216 
2217  if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
2218  unsigned Val = CN->getZExtValue();
2219  // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
2220  // that the base operand remains free for further matching. If
2221  // the base doesn't end up getting used, a post-processing step
2222  // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
2223  if (Val == 1 || Val == 2 || Val == 3) {
2224  AM.Scale = 1 << Val;
2225  SDValue ShVal = N.getOperand(0);
2226 
2227  // Okay, we know that we have a scale by now. However, if the scaled
2228  // value is an add of something and a constant, we can fold the
2229  // constant into the disp field here.
2230  if (CurDAG->isBaseWithConstantOffset(ShVal)) {
2231  AM.IndexReg = ShVal.getOperand(0);
2232  ConstantSDNode *AddVal = cast<ConstantSDNode>(ShVal.getOperand(1));
2233  uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val;
2234  if (!foldOffsetIntoAddress(Disp, AM))
2235  return false;
2236  }
2237 
2238  AM.IndexReg = ShVal;
2239  return false;
2240  }
2241  }
2242  break;
2243 
2244  case ISD::SRL: {
2245  // Scale must not be used already.
2246  if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
2247 
2248  // We only handle up to 64-bit values here as those are what matter for
2249  // addressing mode optimizations.
2250  assert(N.getSimpleValueType().getSizeInBits() <= 64 &&
2251  "Unexpected value size!");
2252 
2253  SDValue And = N.getOperand(0);
2254  if (And.getOpcode() != ISD::AND) break;
2255  SDValue X = And.getOperand(0);
2256 
2257  // The mask used for the transform is expected to be post-shift, but we
2258  // found the shift first so just apply the shift to the mask before passing
2259  // it down.
2260  if (!isa<ConstantSDNode>(N.getOperand(1)) ||
2261  !isa<ConstantSDNode>(And.getOperand(1)))
2262  break;
2263  uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
2264 
2265  // Try to fold the mask and shift into the scale, and return false if we
2266  // succeed.
2267  if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
2268  return false;
2269  break;
2270  }
2271 
2272  case ISD::SMUL_LOHI:
2273  case ISD::UMUL_LOHI:
2274  // A mul_lohi where we need the low part can be folded as a plain multiply.
2275  if (N.getResNo() != 0) break;
2277  case ISD::MUL:
2278  case X86ISD::MUL_IMM:
2279  // X*[3,5,9] -> X+X*[2,4,8]
2280  if (AM.BaseType == X86ISelAddressMode::RegBase &&
2281  AM.Base_Reg.getNode() == nullptr &&
2282  AM.IndexReg.getNode() == nullptr) {
2283  if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1)))
2284  if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
2285  CN->getZExtValue() == 9) {
2286  AM.Scale = unsigned(CN->getZExtValue())-1;
2287 
2288  SDValue MulVal = N.getOperand(0);
2289  SDValue Reg;
2290 
2291  // Okay, we know that we have a scale by now. However, if the scaled
2292  // value is an add of something and a constant, we can fold the
2293  // constant into the disp field here.
2294  if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
2295  isa<ConstantSDNode>(MulVal.getOperand(1))) {
2296  Reg = MulVal.getOperand(0);
2297  ConstantSDNode *AddVal =
2298  cast<ConstantSDNode>(MulVal.getOperand(1));
2299  uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
2300  if (foldOffsetIntoAddress(Disp, AM))
2301  Reg = N.getOperand(0);
2302  } else {
2303  Reg = N.getOperand(0);
2304  }
2305 
2306  AM.IndexReg = AM.Base_Reg = Reg;
2307  return false;
2308  }
2309  }
2310  break;
2311 
2312  case ISD::SUB: {
2313  // Given A-B, if A can be completely folded into the address and
2314  // the index field with the index field unused, use -B as the index.
2315  // This is a win if a has multiple parts that can be folded into
2316  // the address. Also, this saves a mov if the base register has
2317  // other uses, since it avoids a two-address sub instruction, however
2318  // it costs an additional mov if the index register has other uses.
2319 
2320  // Add an artificial use to this node so that we can keep track of
2321  // it if it gets CSE'd with a different node.
2322  HandleSDNode Handle(N);
2323 
2324  // Test if the LHS of the sub can be folded.
2325  X86ISelAddressMode Backup = AM;
2326  if (matchAddressRecursively(N.getOperand(0), AM, Depth+1)) {
2327  N = Handle.getValue();
2328  AM = Backup;
2329  break;
2330  }
2331  N = Handle.getValue();
2332  // Test if the index field is free for use.
2333  if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
2334  AM = Backup;
2335  break;
2336  }
2337 
2338  int Cost = 0;
2339  SDValue RHS = N.getOperand(1);
2340  // If the RHS involves a register with multiple uses, this
2341  // transformation incurs an extra mov, due to the neg instruction
2342  // clobbering its operand.
2343  if (!RHS.getNode()->hasOneUse() ||
2344  RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
2345  RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
2346  RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
2347  (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
2348  RHS.getOperand(0).getValueType() == MVT::i32))
2349  ++Cost;
2350  // If the base is a register with multiple uses, this
2351  // transformation may save a mov.
2352  if ((AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode() &&
2353  !AM.Base_Reg.getNode()->hasOneUse()) ||
2354  AM.BaseType == X86ISelAddressMode::FrameIndexBase)
2355  --Cost;
2356  // If the folded LHS was interesting, this transformation saves
2357  // address arithmetic.
2358  if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
2359  ((AM.Disp != 0) && (Backup.Disp == 0)) +
2360  (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
2361  --Cost;
2362  // If it doesn't look like it may be an overall win, don't do it.
2363  if (Cost >= 0) {
2364  AM = Backup;
2365  break;
2366  }
2367 
2368  // Ok, the transformation is legal and appears profitable. Go for it.
2369  // Negation will be emitted later to avoid creating dangling nodes if this
2370  // was an unprofitable LEA.
2371  AM.IndexReg = RHS;
2372  AM.NegateIndex = true;
2373  AM.Scale = 1;
2374  return false;
2375  }
2376 
2377  case ISD::ADD:
2378  if (!matchAdd(N, AM, Depth))
2379  return false;
2380  break;
2381 
2382  case ISD::OR:
2383  // We want to look through a transform in InstCombine and DAGCombiner that
2384  // turns 'add' into 'or', so we can treat this 'or' exactly like an 'add'.
2385  // Example: (or (and x, 1), (shl y, 3)) --> (add (and x, 1), (shl y, 3))
2386  // An 'lea' can then be used to match the shift (multiply) and add:
2387  // and $1, %esi
2388  // lea (%rsi, %rdi, 8), %rax
2389  if (CurDAG->haveNoCommonBitsSet(N.getOperand(0), N.getOperand(1)) &&
2390  !matchAdd(N, AM, Depth))
2391  return false;
2392  break;
2393 
2394  case ISD::AND: {
2395  // Perform some heroic transforms on an and of a constant-count shift
2396  // with a constant to enable use of the scaled offset field.
2397 
2398  // Scale must not be used already.
2399  if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
2400 
2401  // We only handle up to 64-bit values here as those are what matter for
2402  // addressing mode optimizations.
2403  assert(N.getSimpleValueType().getSizeInBits() <= 64 &&
2404  "Unexpected value size!");
2405 
2406  if (!isa<ConstantSDNode>(N.getOperand(1)))
2407  break;
2408 
2409  if (N.getOperand(0).getOpcode() == ISD::SRL) {
2410  SDValue Shift = N.getOperand(0);
2411  SDValue X = Shift.getOperand(0);
2412 
2413  uint64_t Mask = N.getConstantOperandVal(1);
2414 
2415  // Try to fold the mask and shift into an extract and scale.
2416  if (!foldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
2417  return false;
2418 
2419  // Try to fold the mask and shift directly into the scale.
2420  if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
2421  return false;
2422 
2423  // Try to fold the mask and shift into BEXTR and scale.
2424  if (!foldMaskedShiftToBEXTR(*CurDAG, N, Mask, Shift, X, AM, *Subtarget))
2425  return false;
2426  }
2427 
2428  // Try to swap the mask and shift to place shifts which can be done as
2429  // a scale on the outside of the mask.
2430  if (!foldMaskedShiftToScaledMask(*CurDAG, N, AM))
2431  return false;
2432 
2433  break;
2434  }
2435  case ISD::ZERO_EXTEND: {
2436  // Try to widen a zexted shift left to the same size as its use, so we can
2437  // match the shift as a scale factor.
2438  if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
2439  break;
2440  if (N.getOperand(0).getOpcode() != ISD::SHL || !N.getOperand(0).hasOneUse())
2441  break;
2442 
2443  // Give up if the shift is not a valid scale factor [1,2,3].
2444  SDValue Shl = N.getOperand(0);
2445  auto *ShAmtC = dyn_cast<ConstantSDNode>(Shl.getOperand(1));
2446  if (!ShAmtC || ShAmtC->getZExtValue() > 3)
2447  break;
2448 
2449  // The narrow shift must only shift out zero bits (it must be 'nuw').
2450  // That makes it safe to widen to the destination type.
2452  ShAmtC->getZExtValue());
2453  if (!CurDAG->MaskedValueIsZero(Shl.getOperand(0), HighZeros))
2454  break;
2455 
2456  // zext (shl nuw i8 %x, C) to i32 --> shl (zext i8 %x to i32), (zext C)
2457  MVT VT = N.getSimpleValueType();
2458  SDLoc DL(N);
2459  SDValue Zext = CurDAG->getNode(ISD::ZERO_EXTEND, DL, VT, Shl.getOperand(0));
2460  SDValue NewShl = CurDAG->getNode(ISD::SHL, DL, VT, Zext, Shl.getOperand(1));
2461 
2462  // Convert the shift to scale factor.
2463  AM.Scale = 1 << ShAmtC->getZExtValue();
2464  AM.IndexReg = Zext;
2465 
2466  insertDAGNode(*CurDAG, N, Zext);
2467  insertDAGNode(*CurDAG, N, NewShl);
2468  CurDAG->ReplaceAllUsesWith(N, NewShl);
2469  CurDAG->RemoveDeadNode(N.getNode());
2470  return false;
2471  }
2472  }
2473 
2474  return matchAddressBase(N, AM);
2475 }
2476 
2477 /// Helper for MatchAddress. Add the specified node to the
2478 /// specified addressing mode without any further recursion.
2479 bool X86DAGToDAGISel::matchAddressBase(SDValue N, X86ISelAddressMode &AM) {
2480  // Is the base register already occupied?
2481  if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
2482  // If so, check to see if the scale index register is set.
2483  if (!AM.IndexReg.getNode()) {
2484  AM.IndexReg = N;
2485  AM.Scale = 1;
2486  return false;
2487  }
2488 
2489  // Otherwise, we cannot select it.
2490  return true;
2491  }
2492 
2493  // Default, generate it as a register.
2494  AM.BaseType = X86ISelAddressMode::RegBase;
2495  AM.Base_Reg = N;
2496  return false;
2497 }
2498 
2499 bool X86DAGToDAGISel::matchVectorAddressRecursively(SDValue N,
2500  X86ISelAddressMode &AM,
2501  unsigned Depth) {
2502  SDLoc dl(N);
2503  LLVM_DEBUG({
2504  dbgs() << "MatchVectorAddress: ";
2505  AM.dump(CurDAG);
2506  });
2507  // Limit recursion.
2508  if (Depth > 5)
2509  return matchAddressBase(N, AM);
2510 
2511  // TODO: Support other operations.
2512  switch (N.getOpcode()) {
2513  case ISD::Constant: {
2514  uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
2515  if (!foldOffsetIntoAddress(Val, AM))
2516  return false;
2517  break;
2518  }
2519  case X86ISD::Wrapper:
2520  if (!matchWrapper(N, AM))
2521  return false;
2522  break;
2523  case ISD::ADD: {
2524  // Add an artificial use to this node so that we can keep track of
2525  // it if it gets CSE'd with a different node.
2526  HandleSDNode Handle(N);
2527 
2528  X86ISelAddressMode Backup = AM;
2529  if (!matchVectorAddressRecursively(N.getOperand(0), AM, Depth + 1) &&
2530  !matchVectorAddressRecursively(Handle.getValue().getOperand(1), AM,
2531  Depth + 1))
2532  return false;
2533  AM = Backup;
2534 
2535  // Try again after commuting the operands.
2536  if (!matchVectorAddressRecursively(Handle.getValue().getOperand(1), AM,
2537  Depth + 1) &&
2538  !matchVectorAddressRecursively(Handle.getValue().getOperand(0), AM,
2539  Depth + 1))
2540  return false;
2541  AM = Backup;
2542 
2543  N = Handle.getValue();
2544  break;
2545  }
2546  }
2547 
2548  return matchAddressBase(N, AM);
2549 }
2550 
2551 /// Helper for selectVectorAddr. Handles things that can be folded into a
2552 /// gather/scatter address. The index register and scale should have already
2553 /// been handled.
2554 bool X86DAGToDAGISel::matchVectorAddress(SDValue N, X86ISelAddressMode &AM) {
2555  return matchVectorAddressRecursively(N, AM, 0);
2556 }
2557 
2558 bool X86DAGToDAGISel::selectVectorAddr(MemSDNode *Parent, SDValue BasePtr,
2559  SDValue IndexOp, SDValue ScaleOp,
2560  SDValue &Base, SDValue &Scale,
2561  SDValue &Index, SDValue &Disp,
2562  SDValue &Segment) {
2563  X86ISelAddressMode AM;
2564  AM.IndexReg = IndexOp;
2565  AM.Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
2566 
2567  unsigned AddrSpace = Parent->getPointerInfo().getAddrSpace();
2568  if (AddrSpace == X86AS::GS)
2569  AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
2570  if (AddrSpace == X86AS::FS)
2571  AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
2572  if (AddrSpace == X86AS::SS)
2573  AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
2574 
2575  SDLoc DL(BasePtr);
2576  MVT VT = BasePtr.getSimpleValueType();
2577 
2578  // Try to match into the base and displacement fields.
2579  if (matchVectorAddress(BasePtr, AM))
2580  return false;
2581 
2582  getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2583  return true;
2584 }
2585 
2586 /// Returns true if it is able to pattern match an addressing mode.
2587 /// It returns the operands which make up the maximal addressing mode it can
2588 /// match by reference.
2589 ///
2590 /// Parent is the parent node of the addr operand that is being matched. It
2591 /// is always a load, store, atomic node, or null. It is only null when
2592 /// checking memory operands for inline asm nodes.
2593 bool X86DAGToDAGISel::selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
2594  SDValue &Scale, SDValue &Index,
2595  SDValue &Disp, SDValue &Segment) {
2596  X86ISelAddressMode AM;
2597 
2598  if (Parent &&
2599  // This list of opcodes are all the nodes that have an "addr:$ptr" operand
2600  // that are not a MemSDNode, and thus don't have proper addrspace info.
2601  Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
2602  Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
2603  Parent->getOpcode() != X86ISD::TLSCALL && // Fixme
2604  Parent->getOpcode() != X86ISD::ENQCMD && // Fixme
2605  Parent->getOpcode() != X86ISD::ENQCMDS && // Fixme
2606  Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp
2607  Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp
2608  unsigned AddrSpace =
2609  cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
2610  if (AddrSpace == X86AS::GS)
2611  AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
2612  if (AddrSpace == X86AS::FS)
2613  AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
2614  if (AddrSpace == X86AS::SS)
2615  AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
2616  }
2617 
2618  // Save the DL and VT before calling matchAddress, it can invalidate N.
2619  SDLoc DL(N);
2620  MVT VT = N.getSimpleValueType();
2621 
2622  if (matchAddress(N, AM))
2623  return false;
2624 
2625  getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2626  return true;
2627 }
2628 
2629 bool X86DAGToDAGISel::selectMOV64Imm32(SDValue N, SDValue &Imm) {
2630  // In static codegen with small code model, we can get the address of a label
2631  // into a register with 'movl'
2632  if (N->getOpcode() != X86ISD::Wrapper)
2633  return false;
2634 
2635  N = N.getOperand(0);
2636 
2637  // At least GNU as does not accept 'movl' for TPOFF relocations.
2638  // FIXME: We could use 'movl' when we know we are targeting MC.
2639  if (N->getOpcode() == ISD::TargetGlobalTLSAddress)
2640  return false;
2641 
2642  Imm = N;
2643  if (N->getOpcode() != ISD::TargetGlobalAddress)
2644  return TM.getCodeModel() == CodeModel::Small;
2645 
2647  cast<GlobalAddressSDNode>(N)->getGlobal()->getAbsoluteSymbolRange();
2648  if (!CR)
2649  return TM.getCodeModel() == CodeModel::Small;
2650 
2651  return CR->getUnsignedMax().ult(1ull << 32);
2652 }
2653 
2654 bool X86DAGToDAGISel::selectLEA64_32Addr(SDValue N, SDValue &Base,
2655  SDValue &Scale, SDValue &Index,
2656  SDValue &Disp, SDValue &Segment) {
2657  // Save the debug loc before calling selectLEAAddr, in case it invalidates N.
2658  SDLoc DL(N);
2659 
2660  if (!selectLEAAddr(N, Base, Scale, Index, Disp, Segment))
2661  return false;
2662 
2663  RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base);
2664  if (RN && RN->getReg() == 0)
2665  Base = CurDAG->getRegister(0, MVT::i64);
2666  else if (Base.getValueType() == MVT::i32 && !isa<FrameIndexSDNode>(Base)) {
2667  // Base could already be %rip, particularly in the x32 ABI.
2668  SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL,
2669  MVT::i64), 0);
2670  Base = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef,
2671  Base);
2672  }
2673 
2674  RN = dyn_cast<RegisterSDNode>(Index);
2675  if (RN && RN->getReg() == 0)
2676  Index = CurDAG->getRegister(0, MVT::i64);
2677  else {
2678  assert(Index.getValueType() == MVT::i32 &&
2679  "Expect to be extending 32-bit registers for use in LEA");
2680  SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL,
2681  MVT::i64), 0);
2682  Index = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef,
2683  Index);
2684  }
2685 
2686  return true;
2687 }
2688 
2689 /// Calls SelectAddr and determines if the maximal addressing
2690 /// mode it matches can be cost effectively emitted as an LEA instruction.
2691 bool X86DAGToDAGISel::selectLEAAddr(SDValue N,
2692  SDValue &Base, SDValue &Scale,
2693  SDValue &Index, SDValue &Disp,
2694  SDValue &Segment) {
2695  X86ISelAddressMode AM;
2696 
2697  // Save the DL and VT before calling matchAddress, it can invalidate N.
2698  SDLoc DL(N);
2699  MVT VT = N.getSimpleValueType();
2700 
2701  // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
2702  // segments.
2703  SDValue Copy = AM.Segment;
2704  SDValue T = CurDAG->getRegister(0, MVT::i32);
2705  AM.Segment = T;
2706  if (matchAddress(N, AM))
2707  return false;
2708  assert (T == AM.Segment);
2709  AM.Segment = Copy;
2710 
2711  unsigned Complexity = 0;
2712  if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode())
2713  Complexity = 1;
2714  else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
2715  Complexity = 4;
2716 
2717  if (AM.IndexReg.getNode())
2718  Complexity++;
2719 
2720  // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
2721  // a simple shift.
2722  if (AM.Scale > 1)
2723  Complexity++;
2724 
2725  // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
2726  // to a LEA. This is determined with some experimentation but is by no means
2727  // optimal (especially for code size consideration). LEA is nice because of
2728  // its three-address nature. Tweak the cost function again when we can run
2729  // convertToThreeAddress() at register allocation time.
2730  if (AM.hasSymbolicDisplacement()) {
2731  // For X86-64, always use LEA to materialize RIP-relative addresses.
2732  if (Subtarget->is64Bit())
2733  Complexity = 4;
2734  else
2735  Complexity += 2;
2736  }
2737 
2738  // Heuristic: try harder to form an LEA from ADD if the operands set flags.
2739  // Unlike ADD, LEA does not affect flags, so we will be less likely to require
2740  // duplicating flag-producing instructions later in the pipeline.
2741  if (N.getOpcode() == ISD::ADD) {
2742  auto isMathWithFlags = [](SDValue V) {
2743  switch (V.getOpcode()) {
2744  case X86ISD::ADD:
2745  case X86ISD::SUB:
2746  case X86ISD::ADC:
2747  case X86ISD::SBB:
2748  /* TODO: These opcodes can be added safely, but we may want to justify
2749  their inclusion for different reasons (better for reg-alloc).
2750  case X86ISD::SMUL:
2751  case X86ISD::UMUL:
2752  case X86ISD::OR:
2753  case X86ISD::XOR:
2754  case X86ISD::AND:
2755  */
2756  // Value 1 is the flag output of the node - verify it's not dead.
2757  return !SDValue(V.getNode(), 1).use_empty();
2758  default:
2759  return false;
2760  }
2761  };
2762  // TODO: This could be an 'or' rather than 'and' to make the transform more
2763  // likely to happen. We might want to factor in whether there's a
2764  // load folding opportunity for the math op that disappears with LEA.
2765  if (isMathWithFlags(N.getOperand(0)) && isMathWithFlags(N.getOperand(1)))
2766  Complexity++;
2767  }
2768 
2769  if (AM.Disp)
2770  Complexity++;
2771 
2772  // If it isn't worth using an LEA, reject it.
2773  if (Complexity <= 2)
2774  return false;
2775 
2776  getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2777  return true;
2778 }
2779 
2780 /// This is only run on TargetGlobalTLSAddress nodes.
2781 bool X86DAGToDAGISel::selectTLSADDRAddr(SDValue N, SDValue &Base,
2782  SDValue &Scale, SDValue &Index,
2783  SDValue &Disp, SDValue &Segment) {
2784  assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
2785  const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
2786 
2787  X86ISelAddressMode AM;
2788  AM.GV = GA->getGlobal();
2789  AM.Disp += GA->getOffset();
2790  AM.SymbolFlags = GA->getTargetFlags();
2791 
2792  if (Subtarget->is32Bit()) {
2793  AM.Scale = 1;
2794  AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
2795  }
2796 
2797  MVT VT = N.getSimpleValueType();
2798  getAddressOperands(AM, SDLoc(N), VT, Base, Scale, Index, Disp, Segment);
2799  return true;
2800 }
2801 
2802 bool X86DAGToDAGISel::selectRelocImm(SDValue N, SDValue &Op) {
2803  // Keep track of the original value type and whether this value was
2804  // truncated. If we see a truncation from pointer type to VT that truncates
2805  // bits that are known to be zero, we can use a narrow reference.
2806  EVT VT = N.getValueType();
2807  bool WasTruncated = false;
2808  if (N.getOpcode() == ISD::TRUNCATE) {
2809  WasTruncated = true;
2810  N = N.getOperand(0);
2811  }
2812 
2813  if (N.getOpcode() != X86ISD::Wrapper)
2814  return false;
2815 
2816  // We can only use non-GlobalValues as immediates if they were not truncated,
2817  // as we do not have any range information. If we have a GlobalValue and the
2818  // address was not truncated, we can select it as an operand directly.
2819  unsigned Opc = N.getOperand(0)->getOpcode();
2820  if (Opc != ISD::TargetGlobalAddress || !WasTruncated) {
2821  Op = N.getOperand(0);
2822  // We can only select the operand directly if we didn't have to look past a
2823  // truncate.
2824  return !WasTruncated;
2825  }
2826 
2827  // Check that the global's range fits into VT.
2828  auto *GA = cast<GlobalAddressSDNode>(N.getOperand(0));
2830  if (!CR || CR->getUnsignedMax().uge(1ull << VT.getSizeInBits()))
2831  return false;
2832 
2833  // Okay, we can use a narrow reference.
2834  Op = CurDAG->getTargetGlobalAddress(GA->getGlobal(), SDLoc(N), VT,
2835  GA->getOffset(), GA->getTargetFlags());
2836  return true;
2837 }
2838 
2839 bool X86DAGToDAGISel::tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
2840  SDValue &Base, SDValue &Scale,
2841  SDValue &Index, SDValue &Disp,
2842  SDValue &Segment) {
2843  assert(Root && P && "Unknown root/parent nodes");
2844  if (!ISD::isNON_EXTLoad(N.getNode()) ||
2845  !IsProfitableToFold(N, P, Root) ||
2846  !IsLegalToFold(N, P, Root, OptLevel))
2847  return false;
2848 
2849  return selectAddr(N.getNode(),
2850  N.getOperand(1), Base, Scale, Index, Disp, Segment);
2851 }
2852 
2853 bool X86DAGToDAGISel::tryFoldBroadcast(SDNode *Root, SDNode *P, SDValue N,
2854  SDValue &Base, SDValue &Scale,
2855  SDValue &Index, SDValue &Disp,
2856  SDValue &Segment) {
2857  assert(Root && P && "Unknown root/parent nodes");
2858  if (N->getOpcode() != X86ISD::VBROADCAST_LOAD ||
2859  !IsProfitableToFold(N, P, Root) ||
2860  !IsLegalToFold(N, P, Root, OptLevel))
2861  return false;
2862 
2863  return selectAddr(N.getNode(),
2864  N.getOperand(1), Base, Scale, Index, Disp, Segment);
2865 }
2866 
2867 /// Return an SDNode that returns the value of the global base register.
2868 /// Output instructions required to initialize the global base register,
2869 /// if necessary.
2870 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
2871  unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
2872  auto &DL = MF->getDataLayout();
2873  return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode();
2874 }
2875 
2876 bool X86DAGToDAGISel::isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const {
2877  if (N->getOpcode() == ISD::TRUNCATE)
2878  N = N->getOperand(0).getNode();
2879  if (N->getOpcode() != X86ISD::Wrapper)
2880  return false;
2881 
2882  auto *GA = dyn_cast<GlobalAddressSDNode>(N->getOperand(0));
2883  if (!GA)
2884  return false;
2885 
2887  if (!CR)
2888  return Width == 32 && TM.getCodeModel() == CodeModel::Small;
2889 
2890  return CR->getSignedMin().sge(-1ull << Width) &&
2891  CR->getSignedMax().slt(1ull << Width);
2892 }
2893 
2895  assert(N->isMachineOpcode() && "Unexpected node");
2897  unsigned Opc = N->getMachineOpcode();
2898  if (Opc == X86::JCC_1)
2899  CC = static_cast<X86::CondCode>(N->getConstantOperandVal(1));
2900  else if (Opc == X86::SETCCr)
2901  CC = static_cast<X86::CondCode>(N->getConstantOperandVal(0));
2902  else if (Opc == X86::SETCCm)
2903  CC = static_cast<X86::CondCode>(N->getConstantOperandVal(5));
2904  else if (Opc == X86::CMOV16rr || Opc == X86::CMOV32rr ||
2905  Opc == X86::CMOV64rr)
2906  CC = static_cast<X86::CondCode>(N->getConstantOperandVal(2));
2907  else if (Opc == X86::CMOV16rm || Opc == X86::CMOV32rm ||
2908  Opc == X86::CMOV64rm)
2909  CC = static_cast<X86::CondCode>(N->getConstantOperandVal(6));
2910 
2911  return CC;
2912 }
2913 
2914 /// Test whether the given X86ISD::CMP node has any users that use a flag
2915 /// other than ZF.
2916 bool X86DAGToDAGISel::onlyUsesZeroFlag(SDValue Flags) const {
2917  // Examine each user of the node.
2918  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
2919  UI != UE; ++UI) {
2920  // Only check things that use the flags.
2921  if (UI.getUse().getResNo() != Flags.getResNo())
2922  continue;
2923  // Only examine CopyToReg uses that copy to EFLAGS.
2924  if (UI->getOpcode() != ISD::CopyToReg ||
2925  cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
2926  return false;
2927  // Examine each user of the CopyToReg use.
2928  for (SDNode::use_iterator FlagUI = UI->use_begin(),
2929  FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
2930  // Only examine the Flag result.
2931  if (FlagUI.getUse().getResNo() != 1) continue;
2932  // Anything unusual: assume conservatively.
2933  if (!FlagUI->isMachineOpcode()) return false;
2934  // Examine the condition code of the user.
2935  X86::CondCode CC = getCondFromNode(*FlagUI);
2936 
2937  switch (CC) {
2938  // Comparisons which only use the zero flag.
2939  case X86::COND_E: case X86::COND_NE:
2940  continue;
2941  // Anything else: assume conservatively.
2942  default:
2943  return false;
2944  }
2945  }
2946  }
2947  return true;
2948 }
2949 
2950 /// Test whether the given X86ISD::CMP node has any uses which require the SF
2951 /// flag to be accurate.
2952 bool X86DAGToDAGISel::hasNoSignFlagUses(SDValue Flags) const {
2953  // Examine each user of the node.
2954  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
2955  UI != UE; ++UI) {
2956  // Only check things that use the flags.
2957  if (UI.getUse().getResNo() != Flags.getResNo())
2958  continue;
2959  // Only examine CopyToReg uses that copy to EFLAGS.
2960  if (UI->getOpcode() != ISD::CopyToReg ||
2961  cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
2962  return false;
2963  // Examine each user of the CopyToReg use.
2964  for (SDNode::use_iterator FlagUI = UI->use_begin(),
2965  FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
2966  // Only examine the Flag result.
2967  if (FlagUI.getUse().getResNo() != 1) continue;
2968  // Anything unusual: assume conservatively.
2969  if (!FlagUI->isMachineOpcode()) return false;
2970  // Examine the condition code of the user.
2971  X86::CondCode CC = getCondFromNode(*FlagUI);
2972 
2973  switch (CC) {
2974  // Comparisons which don't examine the SF flag.
2975  case X86::COND_A: case X86::COND_AE:
2976  case X86::COND_B: case X86::COND_BE:
2977  case X86::COND_E: case X86::COND_NE:
2978  case X86::COND_O: case X86::COND_NO:
2979  case X86::COND_P: case X86::COND_NP:
2980  continue;
2981  // Anything else: assume conservatively.
2982  default:
2983  return false;
2984  }
2985  }
2986  }
2987  return true;
2988 }
2989 
2991  switch (CC) {
2992  // Comparisons which don't examine the CF flag.
2993  case X86::COND_O: case X86::COND_NO:
2994  case X86::COND_E: case X86::COND_NE:
2995  case X86::COND_S: case X86::COND_NS:
2996  case X86::COND_P: case X86::COND_NP:
2997  case X86::COND_L: case X86::COND_GE:
2998  case X86::COND_G: case X86::COND_LE:
2999  return false;
3000  // Anything else: assume conservatively.
3001  default:
3002  return true;
3003  }
3004 }
3005 
3006 /// Test whether the given node which sets flags has any uses which require the
3007 /// CF flag to be accurate.
3008  bool X86DAGToDAGISel::hasNoCarryFlagUses(SDValue Flags) const {
3009  // Examine each user of the node.
3010  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
3011  UI != UE; ++UI) {
3012  // Only check things that use the flags.
3013  if (UI.getUse().getResNo() != Flags.getResNo())
3014  continue;
3015 
3016  unsigned UIOpc = UI->getOpcode();
3017 
3018  if (UIOpc == ISD::CopyToReg) {
3019  // Only examine CopyToReg uses that copy to EFLAGS.
3020  if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
3021  return false;
3022  // Examine each user of the CopyToReg use.
3023  for (SDNode::use_iterator FlagUI = UI->use_begin(), FlagUE = UI->use_end();
3024  FlagUI != FlagUE; ++FlagUI) {
3025  // Only examine the Flag result.
3026  if (FlagUI.getUse().getResNo() != 1)
3027  continue;
3028  // Anything unusual: assume conservatively.
3029  if (!FlagUI->isMachineOpcode())
3030  return false;
3031  // Examine the condition code of the user.
3032  X86::CondCode CC = getCondFromNode(*FlagUI);
3033 
3034  if (mayUseCarryFlag(CC))
3035  return false;
3036  }
3037 
3038  // This CopyToReg is ok. Move on to the next user.
3039  continue;
3040  }
3041 
3042  // This might be an unselected node. So look for the pre-isel opcodes that
3043  // use flags.
3044  unsigned CCOpNo;
3045  switch (UIOpc) {
3046  default:
3047  // Something unusual. Be conservative.
3048  return false;
3049  case X86ISD::SETCC: CCOpNo = 0; break;
3050  case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
3051  case X86ISD::CMOV: CCOpNo = 2; break;
3052  case X86ISD::BRCOND: CCOpNo = 2; break;
3053  }
3054 
3055  X86::CondCode CC = (X86::CondCode)UI->getConstantOperandVal(CCOpNo);
3056  if (mayUseCarryFlag(CC))
3057  return false;
3058  }
3059  return true;
3060 }
3061 
3062 /// Check whether or not the chain ending in StoreNode is suitable for doing
3063 /// the {load; op; store} to modify transformation.
3065  SDValue StoredVal, SelectionDAG *CurDAG,
3066  unsigned LoadOpNo,
3067  LoadSDNode *&LoadNode,
3068  SDValue &InputChain) {
3069  // Is the stored value result 0 of the operation?
3070  if (StoredVal.getResNo() != 0) return false;
3071 
3072  // Are there other uses of the operation other than the store?
3073  if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
3074 
3075  // Is the store non-extending and non-indexed?
3076  if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
3077  return false;
3078 
3079  SDValue Load = StoredVal->getOperand(LoadOpNo);
3080  // Is the stored value a non-extending and non-indexed load?
3081  if (!ISD::isNormalLoad(Load.getNode())) return false;
3082 
3083  // Return LoadNode by reference.
3084  LoadNode = cast<LoadSDNode>(Load);
3085 
3086  // Is store the only read of the loaded value?
3087  if (!Load.hasOneUse())
3088  return false;
3089 
3090  // Is the address of the store the same as the load?
3091  if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
3092  LoadNode->getOffset() != StoreNode->getOffset())
3093  return false;
3094 
3095  bool FoundLoad = false;
3096  SmallVector<SDValue, 4> ChainOps;
3097  SmallVector<const SDNode *, 4> LoopWorklist;
3099  const unsigned int Max = 1024;
3100 
3101  // Visualization of Load-Op-Store fusion:
3102  // -------------------------
3103  // Legend:
3104  // *-lines = Chain operand dependencies.
3105  // |-lines = Normal operand dependencies.
3106  // Dependencies flow down and right. n-suffix references multiple nodes.
3107  //
3108  // C Xn C
3109  // * * *
3110  // * * *
3111  // Xn A-LD Yn TF Yn
3112  // * * \ | * |
3113  // * * \ | * |
3114  // * * \ | => A--LD_OP_ST
3115  // * * \| \
3116  // TF OP \
3117  // * | \ Zn
3118  // * | \
3119  // A-ST Zn
3120  //
3121 
3122  // This merge induced dependences from: #1: Xn -> LD, OP, Zn
3123  // #2: Yn -> LD
3124  // #3: ST -> Zn
3125 
3126  // Ensure the transform is safe by checking for the dual
3127  // dependencies to make sure we do not induce a loop.
3128 
3129  // As LD is a predecessor to both OP and ST we can do this by checking:
3130  // a). if LD is a predecessor to a member of Xn or Yn.
3131  // b). if a Zn is a predecessor to ST.
3132 
3133  // However, (b) can only occur through being a chain predecessor to
3134  // ST, which is the same as Zn being a member or predecessor of Xn,
3135  // which is a subset of LD being a predecessor of Xn. So it's
3136  // subsumed by check (a).
3137 
3138  SDValue Chain = StoreNode->getChain();
3139 
3140  // Gather X elements in ChainOps.
3141  if (Chain == Load.getValue(1)) {
3142  FoundLoad = true;
3143  ChainOps.push_back(Load.getOperand(0));
3144  } else if (Chain.getOpcode() == ISD::TokenFactor) {
3145  for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
3146  SDValue Op = Chain.getOperand(i);
3147  if (Op == Load.getValue(1)) {
3148  FoundLoad = true;
3149  // Drop Load, but keep its chain. No cycle check necessary.
3150  ChainOps.push_back(Load.getOperand(0));
3151  continue;
3152  }
3153  LoopWorklist.push_back(Op.getNode());
3154  ChainOps.push_back(Op);
3155  }
3156  }
3157 
3158  if (!FoundLoad)
3159  return false;
3160 
3161  // Worklist is currently Xn. Add Yn to worklist.
3162  for (SDValue Op : StoredVal->ops())
3163  if (Op.getNode() != LoadNode)
3164  LoopWorklist.push_back(Op.getNode());
3165 
3166  // Check (a) if Load is a predecessor to Xn + Yn
3167  if (SDNode::hasPredecessorHelper(Load.getNode(), Visited, LoopWorklist, Max,
3168  true))
3169  return false;
3170 
3171  InputChain =
3172  CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ChainOps);
3173  return true;
3174 }
3175 
3176 // Change a chain of {load; op; store} of the same value into a simple op
3177 // through memory of that value, if the uses of the modified value and its
3178 // address are suitable.
3179 //
3180 // The tablegen pattern memory operand pattern is currently not able to match
3181 // the case where the EFLAGS on the original operation are used.
3182 //
3183 // To move this to tablegen, we'll need to improve tablegen to allow flags to
3184 // be transferred from a node in the pattern to the result node, probably with
3185 // a new keyword. For example, we have this
3186 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
3187 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
3188 // (implicit EFLAGS)]>;
3189 // but maybe need something like this
3190 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
3191 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
3192 // (transferrable EFLAGS)]>;
3193 //
3194 // Until then, we manually fold these and instruction select the operation
3195 // here.
3196 bool X86DAGToDAGISel::foldLoadStoreIntoMemOperand(SDNode *Node) {
3197  StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
3198  SDValue StoredVal = StoreNode->getOperand(1);
3199  unsigned Opc = StoredVal->getOpcode();
3200 
3201  // Before we try to select anything, make sure this is memory operand size
3202  // and opcode we can handle. Note that this must match the code below that
3203  // actually lowers the opcodes.
3204  EVT MemVT = StoreNode->getMemoryVT();
3205  if (MemVT != MVT::i64 && MemVT != MVT::i32 && MemVT != MVT::i16 &&
3206  MemVT != MVT::i8)
3207  return false;
3208 
3209  bool IsCommutable = false;
3210  bool IsNegate = false;
3211  switch (Opc) {
3212  default:
3213  return false;
3214  case X86ISD::SUB:
3215  IsNegate = isNullConstant(StoredVal.getOperand(0));
3216  break;
3217  case X86ISD::SBB:
3218  break;
3219  case X86ISD::ADD:
3220  case X86ISD::ADC:
3221  case X86ISD::AND:
3222  case X86ISD::OR:
3223  case X86ISD::XOR:
3224  IsCommutable = true;
3225  break;
3226  }
3227 
3228  unsigned LoadOpNo = IsNegate ? 1 : 0;
3229  LoadSDNode *LoadNode = nullptr;
3230  SDValue InputChain;
3231  if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadOpNo,
3232  LoadNode, InputChain)) {
3233  if (!IsCommutable)
3234  return false;
3235 
3236  // This operation is commutable, try the other operand.
3237  LoadOpNo = 1;
3238  if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadOpNo,
3239  LoadNode, InputChain))
3240  return false;
3241  }
3242 
3243  SDValue Base, Scale, Index, Disp, Segment;
3244  if (!selectAddr(LoadNode, LoadNode->getBasePtr(), Base, Scale, Index, Disp,
3245  Segment))
3246  return false;
3247 
3248  auto SelectOpcode = [&](unsigned Opc64, unsigned Opc32, unsigned Opc16,
3249  unsigned Opc8) {
3250  switch (MemVT.getSimpleVT().SimpleTy) {
3251  case MVT::i64:
3252  return Opc64;
3253  case MVT::i32:
3254  return Opc32;
3255  case MVT::i16:
3256  return Opc16;
3257  case MVT::i8:
3258  return Opc8;
3259  default:
3260  llvm_unreachable("Invalid size!");
3261  }
3262  };
3263 
3265  switch (Opc) {
3266  case X86ISD::SUB:
3267  // Handle negate.
3268  if (IsNegate) {
3269  unsigned NewOpc = SelectOpcode(X86::NEG64m, X86::NEG32m, X86::NEG16m,
3270  X86::NEG8m);
3271  const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
3272  Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32,
3273  MVT::Other, Ops);
3274  break;
3275  }
3277  case X86ISD::ADD:
3278  // Try to match inc/dec.
3279  if (!Subtarget->slowIncDec() || CurDAG->shouldOptForSize()) {
3280  bool IsOne = isOneConstant(StoredVal.getOperand(1));
3281  bool IsNegOne = isAllOnesConstant(StoredVal.getOperand(1));
3282  // ADD/SUB with 1/-1 and carry flag isn't used can use inc/dec.
3283  if ((IsOne || IsNegOne) && hasNoCarryFlagUses(StoredVal.getValue(1))) {
3284  unsigned NewOpc =
3285  ((Opc == X86ISD::ADD) == IsOne)
3286  ? SelectOpcode(X86::INC64m, X86::INC32m, X86::INC16m, X86::INC8m)
3287  : SelectOpcode(X86::DEC64m, X86::DEC32m, X86::DEC16m, X86::DEC8m);
3288  const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
3289  Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32,
3290  MVT::Other, Ops);
3291  break;
3292  }
3293  }
3295  case X86ISD::ADC:
3296  case X86ISD::SBB:
3297  case X86ISD::AND:
3298  case X86ISD::OR:
3299  case X86ISD::XOR: {
3300  auto SelectRegOpcode = [SelectOpcode](unsigned Opc) {
3301  switch (Opc) {
3302  case X86ISD::ADD:
3303  return SelectOpcode(X86::ADD64mr, X86::ADD32mr, X86::ADD16mr,
3304  X86::ADD8mr);
3305  case X86ISD::ADC:
3306  return SelectOpcode(X86::ADC64mr, X86::ADC32mr, X86::ADC16mr,
3307  X86::ADC8mr);
3308  case X86ISD::SUB:
3309  return SelectOpcode(X86::SUB64mr, X86::SUB32mr, X86::SUB16mr,
3310  X86::SUB8mr);
3311  case X86ISD::SBB:
3312  return SelectOpcode(X86::SBB64mr, X86::SBB32mr, X86::SBB16mr,
3313  X86::SBB8mr);
3314  case X86ISD::AND:
3315  return SelectOpcode(X86::AND64mr, X86::AND32mr, X86::AND16mr,
3316  X86::AND8mr);
3317  case X86ISD::OR:
3318  return SelectOpcode(X86::OR64mr, X86::OR32mr, X86::OR16mr, X86::OR8mr);
3319  case X86ISD::XOR:
3320  return SelectOpcode(X86::XOR64mr, X86::XOR32mr, X86::XOR16mr,
3321  X86::XOR8mr);
3322  default:
3323  llvm_unreachable("Invalid opcode!");
3324  }
3325  };
3326  auto SelectImm8Opcode = [SelectOpcode](unsigned Opc) {
3327  switch (Opc) {
3328  case X86ISD::ADD:
3329  return SelectOpcode(X86::ADD64mi8, X86::ADD32mi8, X86::ADD16mi8, 0);
3330  case X86ISD::ADC:
3331  return SelectOpcode(X86::ADC64mi8, X86::ADC32mi8, X86::ADC16mi8, 0);
3332  case X86ISD::SUB:
3333  return SelectOpcode(X86::SUB64mi8, X86::SUB32mi8, X86::SUB16mi8, 0);
3334  case X86ISD::SBB:
3335  return SelectOpcode(X86::SBB64mi8, X86::SBB32mi8, X86::SBB16mi8, 0);
3336  case X86ISD::AND:
3337  return SelectOpcode(X86::AND64mi8, X86::AND32mi8, X86::AND16mi8, 0);
3338  case X86ISD::OR:
3339  return SelectOpcode(X86::OR64mi8, X86::OR32mi8, X86::OR16mi8, 0);
3340  case X86ISD::XOR:
3341  return SelectOpcode(X86::XOR64mi8, X86::XOR32mi8, X86::XOR16mi8, 0);
3342  default:
3343  llvm_unreachable("Invalid opcode!");
3344  }
3345  };
3346  auto SelectImmOpcode = [SelectOpcode](unsigned Opc) {
3347  switch (Opc) {
3348  case X86ISD::ADD:
3349  return SelectOpcode(X86::ADD64mi32, X86::ADD32mi, X86::ADD16mi,
3350  X86::ADD8mi);
3351  case X86ISD::ADC:
3352  return SelectOpcode(X86::ADC64mi32, X86::ADC32mi, X86::ADC16mi,
3353  X86::ADC8mi);
3354  case X86ISD::SUB:
3355  return SelectOpcode(X86::SUB64mi32, X86::SUB32mi, X86::SUB16mi,
3356  X86::SUB8mi);
3357  case X86ISD::SBB:
3358  return SelectOpcode(X86::SBB64mi32, X86::SBB32mi, X86::SBB16mi,
3359  X86::SBB8mi);
3360  case X86ISD::AND:
3361  return SelectOpcode(X86::AND64mi32, X86::AND32mi, X86::AND16mi,
3362  X86::AND8mi);
3363  case X86ISD::OR:
3364  return SelectOpcode(X86::OR64mi32, X86::OR32mi, X86::OR16mi,
3365  X86::OR8mi);
3366  case X86ISD::XOR:
3367  return SelectOpcode(X86::XOR64mi32, X86::XOR32mi, X86::XOR16mi,
3368  X86::XOR8mi);
3369  default:
3370  llvm_unreachable("Invalid opcode!");
3371  }
3372  };
3373 
3374  unsigned NewOpc = SelectRegOpcode(Opc);
3375  SDValue Operand = StoredVal->getOperand(1-LoadOpNo);
3376 
3377  // See if the operand is a constant that we can fold into an immediate
3378  // operand.
3379  if (auto *OperandC = dyn_cast<ConstantSDNode>(Operand)) {
3380  int64_t OperandV = OperandC->getSExtValue();
3381 
3382  // Check if we can shrink the operand enough to fit in an immediate (or
3383  // fit into a smaller immediate) by negating it and switching the
3384  // operation.
3385  if ((Opc == X86ISD::ADD || Opc == X86ISD::SUB) &&
3386  ((MemVT != MVT::i8 && !isInt<8>(OperandV) && isInt<8>(-OperandV)) ||
3387  (MemVT == MVT::i64 && !isInt<32>(OperandV) &&
3388  isInt<32>(-OperandV))) &&
3389  hasNoCarryFlagUses(StoredVal.getValue(1))) {
3390  OperandV = -OperandV;
3391  Opc = Opc == X86ISD::ADD ? X86ISD::SUB : X86ISD::ADD;
3392  }
3393 
3394  // First try to fit this into an Imm8 operand. If it doesn't fit, then try
3395  // the larger immediate operand.
3396  if (MemVT != MVT::i8 && isInt<8>(OperandV)) {
3397  Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
3398  NewOpc = SelectImm8Opcode(Opc);
3399  } else if (MemVT != MVT::i64 || isInt<32>(OperandV)) {
3400  Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
3401  NewOpc = SelectImmOpcode(Opc);
3402  }
3403  }
3404 
3405  if (Opc == X86ISD::ADC || Opc == X86ISD::SBB) {
3406  SDValue CopyTo =
3407  CurDAG->getCopyToReg(InputChain, SDLoc(Node), X86::EFLAGS,
3408  StoredVal.getOperand(2), SDValue());
3409 
3410  const SDValue Ops[] = {Base, Scale, Index, Disp,
3411  Segment, Operand, CopyTo, CopyTo.getValue(1)};
3412  Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other,
3413  Ops);
3414  } else {
3415  const SDValue Ops[] = {Base, Scale, Index, Disp,
3416  Segment, Operand, InputChain};
3417  Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other,
3418  Ops);
3419  }
3420  break;
3421  }
3422  default:
3423  llvm_unreachable("Invalid opcode!");
3424  }
3425 
3426  MachineMemOperand *MemOps[] = {StoreNode->getMemOperand(),
3427  LoadNode->getMemOperand()};
3428  CurDAG->setNodeMemRefs(Result, MemOps);
3429 
3430  // Update Load Chain uses as well.
3431  ReplaceUses(SDValue(LoadNode, 1), SDValue(Result, 1));
3432  ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
3433  ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
3434  CurDAG->RemoveDeadNode(Node);
3435  return true;
3436 }
3437 
3438 // See if this is an X & Mask that we can match to BEXTR/BZHI.
3439 // Where Mask is one of the following patterns:
3440 // a) x & (1 << nbits) - 1
3441 // b) x & ~(-1 << nbits)
3442 // c) x & (-1 >> (32 - y))
3443 // d) x << (32 - y) >> (32 - y)
3444 bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) {
3445  assert(
3446  (Node->getOpcode() == ISD::AND || Node->getOpcode() == ISD::SRL) &&
3447  "Should be either an and-mask, or right-shift after clearing high bits.");
3448 
3449  // BEXTR is BMI instruction, BZHI is BMI2 instruction. We need at least one.
3450  if (!Subtarget->hasBMI() && !Subtarget->hasBMI2())
3451  return false;
3452 
3453  MVT NVT = Node->getSimpleValueType(0);
3454 
3455  // Only supported for 32 and 64 bits.
3456  if (NVT != MVT::i32 && NVT != MVT::i64)
3457  return false;
3458 
3459  SDValue NBits;
3460  bool NegateNBits;
3461 
3462  // If we have BMI2's BZHI, we are ok with muti-use patterns.
3463  // Else, if we only have BMI1's BEXTR, we require one-use.
3464  const bool AllowExtraUsesByDefault = Subtarget->hasBMI2();
3465  auto checkUses = [AllowExtraUsesByDefault](SDValue Op, unsigned NUses,
3466  Optional<bool> AllowExtraUses) {
3467  return AllowExtraUses.getValueOr(AllowExtraUsesByDefault) ||
3468  Op.getNode()->hasNUsesOfValue(NUses, Op.getResNo());
3469  };
3470  auto checkOneUse = [checkUses](SDValue Op,
3471  Optional<bool> AllowExtraUses = None) {
3472  return checkUses(Op, 1, AllowExtraUses);
3473  };
3474  auto checkTwoUse = [checkUses](SDValue Op,
3475  Optional<bool> AllowExtraUses = None) {
3476  return checkUses(Op, 2, AllowExtraUses);
3477  };
3478 
3479  auto peekThroughOneUseTruncation = [checkOneUse](SDValue V) {
3480  if (V->getOpcode() == ISD::TRUNCATE && checkOneUse(V)) {
3481  assert(V.getSimpleValueType() == MVT::i32 &&
3482  V.getOperand(0).getSimpleValueType() == MVT::i64 &&
3483  "Expected i64 -> i32 truncation");
3484  V = V.getOperand(0);
3485  }
3486  return V;
3487  };
3488 
3489  // a) x & ((1 << nbits) + (-1))
3490  auto matchPatternA = [checkOneUse, peekThroughOneUseTruncation, &NBits,
3491  &NegateNBits](SDValue Mask) -> bool {
3492  // Match `add`. Must only have one use!
3493  if (Mask->getOpcode() != ISD::ADD || !checkOneUse(Mask))
3494  return false;
3495  // We should be adding all-ones constant (i.e. subtracting one.)
3496  if (!isAllOnesConstant(Mask->getOperand(1)))
3497  return false;
3498  // Match `1 << nbits`. Might be truncated. Must only have one use!
3499  SDValue M0 = peekThroughOneUseTruncation(Mask->getOperand(0));
3500  if (M0->getOpcode() != ISD::SHL || !checkOneUse(M0))
3501  return false;
3502  if (!isOneConstant(M0->getOperand(0)))
3503  return false;
3504  NBits = M0->getOperand(1);
3505  NegateNBits = false;
3506  return true;
3507  };
3508 
3509  auto isAllOnes = [this, peekThroughOneUseTruncation, NVT](SDValue V) {
3510  V = peekThroughOneUseTruncation(V);
3511  return CurDAG->MaskedValueIsAllOnes(
3512  V, APInt::getLowBitsSet(V.getSimpleValueType().getSizeInBits(),
3513  NVT.getSizeInBits()));
3514  };
3515 
3516  // b) x & ~(-1 << nbits)
3517  auto matchPatternB = [checkOneUse, isAllOnes, peekThroughOneUseTruncation,
3518  &NBits, &NegateNBits](SDValue Mask) -> bool {
3519  // Match `~()`. Must only have one use!
3520  if (Mask.getOpcode() != ISD::XOR || !checkOneUse(Mask))
3521  return false;
3522  // The -1 only has to be all-ones for the final Node's NVT.
3523  if (!isAllOnes(Mask->getOperand(1)))
3524  return false;
3525  // Match `-1 << nbits`. Might be truncated. Must only have one use!
3526  SDValue M0 = peekThroughOneUseTruncation(Mask->getOperand(0));
3527  if (M0->getOpcode() != ISD::SHL || !checkOneUse(M0))
3528  return false;
3529  // The -1 only has to be all-ones for the final Node's NVT.
3530  if (!isAllOnes(M0->getOperand(0)))
3531  return false;
3532  NBits = M0->getOperand(1);
3533  NegateNBits = false;
3534  return true;
3535  };
3536 
3537  // Try to match potentially-truncated shift amount as `(bitwidth - y)`,
3538  // or leave the shift amount as-is, but then we'll have to negate it.
3539  auto canonicalizeShiftAmt = [&NBits, &NegateNBits](SDValue ShiftAmt,
3540  unsigned Bitwidth) {
3541  NBits = ShiftAmt;
3542  NegateNBits = true;
3543  // Skip over a truncate of the shift amount, if any.
3544  if (NBits.getOpcode() == ISD::TRUNCATE)
3545  NBits = NBits.getOperand(0);
3546  // Try to match the shift amount as (bitwidth - y). It should go away, too.
3547  // If it doesn't match, that's fine, we'll just negate it ourselves.
3548  if (NBits.getOpcode() != ISD::SUB)
3549  return;
3550  auto *V0 = dyn_cast<ConstantSDNode>(NBits.getOperand(0));
3551  if (!V0 || V0->getZExtValue() != Bitwidth)
3552  return;
3553  NBits = NBits.getOperand(1);
3554  NegateNBits = false;
3555  };
3556 
3557  // c) x & (-1 >> z) but then we'll have to subtract z from bitwidth
3558  // or
3559  // c) x & (-1 >> (32 - y))
3560  auto matchPatternC = [checkOneUse, peekThroughOneUseTruncation, &NegateNBits,
3561  canonicalizeShiftAmt](SDValue Mask) -> bool {
3562  // The mask itself may be truncated.
3563  Mask = peekThroughOneUseTruncation(Mask);
3564  unsigned Bitwidth = Mask.getSimpleValueType().getSizeInBits();
3565  // Match `l>>`. Must only have one use!
3566  if (Mask.getOpcode() != ISD::SRL || !checkOneUse(Mask))
3567  return false;
3568  // We should be shifting truly all-ones constant.
3569  if (!isAllOnesConstant(Mask.getOperand(0)))
3570  return false;
3571  SDValue M1 = Mask.getOperand(1);
3572  // The shift amount should not be used externally.
3573  if (!checkOneUse(M1))
3574  return false;
3575  canonicalizeShiftAmt(M1, Bitwidth);
3576  // Pattern c. is non-canonical, and is expanded into pattern d. iff there
3577  // is no extra use of the mask. Clearly, there was one since we are here.
3578  // But at the same time, if we need to negate the shift amount,
3579  // then we don't want the mask to stick around, else it's unprofitable.
3580  return !NegateNBits;
3581  };
3582 
3583  SDValue X;
3584 
3585  // d) x << z >> z but then we'll have to subtract z from bitwidth
3586  // or
3587  // d) x << (32 - y) >> (32 - y)
3588  auto matchPatternD = [checkOneUse, checkTwoUse, canonicalizeShiftAmt,
3589  AllowExtraUsesByDefault, &NegateNBits,
3590  &X](SDNode *Node) -> bool {
3591  if (Node->getOpcode() != ISD::SRL)
3592  return false;
3593  SDValue N0 = Node->getOperand(0);
3594  if (N0->getOpcode() != ISD::SHL)
3595  return false;
3596  unsigned Bitwidth = N0.getSimpleValueType().getSizeInBits();
3597  SDValue N1 = Node->getOperand(1);
3598  SDValue N01 = N0->getOperand(1);
3599  // Both of the shifts must be by the exact same value.
3600  if (N1 != N01)
3601  return false;
3602  canonicalizeShiftAmt(N1, Bitwidth);
3603  // There should not be any external uses of the inner shift / shift amount.
3604  // Note that while we are generally okay with external uses given BMI2,
3605  // iff we need to negate the shift amount, we are not okay with extra uses.
3606  const bool AllowExtraUses = AllowExtraUsesByDefault && !NegateNBits;
3607  if (!checkOneUse(N0, AllowExtraUses) || !checkTwoUse(N1, AllowExtraUses))
3608  return false;
3609  X = N0->getOperand(0);
3610  return true;
3611  };
3612 
3613  auto matchLowBitMask = [matchPatternA, matchPatternB,
3614  matchPatternC](SDValue Mask) -> bool {
3615  return matchPatternA(Mask) || matchPatternB(Mask) || matchPatternC(Mask);
3616  };
3617 
3618  if (Node->getOpcode() == ISD::AND) {
3619  X = Node->getOperand(0);
3620  SDValue Mask = Node->getOperand(1);
3621 
3622  if (matchLowBitMask(Mask)) {
3623  // Great.
3624  } else {
3625  std::swap(X, Mask);
3626  if (!matchLowBitMask(Mask))
3627  return false;
3628  }
3629  } else if (!matchPatternD(Node))
3630  return false;
3631 
3632  // If we need to negate the shift amount, require BMI2 BZHI support.
3633  // It's just too unprofitable for BMI1 BEXTR.
3634  if (NegateNBits && !Subtarget->hasBMI2())
3635  return false;
3636 
3637  SDLoc DL(Node);
3638 
3639  // Truncate the shift amount.
3640  NBits = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NBits);
3641  insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3642 
3643  // Insert 8-bit NBits into lowest 8 bits of 32-bit register.
3644  // All the other bits are undefined, we do not care about them.
3645  SDValue ImplDef = SDValue(
3646  CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::i32), 0);
3647  insertDAGNode(*CurDAG, SDValue(Node, 0), ImplDef);
3648 
3649  SDValue SRIdxVal = CurDAG->getTargetConstant(X86::sub_8bit, DL, MVT::i32);
3650  insertDAGNode(*CurDAG, SDValue(Node, 0), SRIdxVal);
3651  NBits = SDValue(CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
3652  MVT::i32, ImplDef, NBits, SRIdxVal),
3653  0);
3654  insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3655 
3656  // We might have matched the amount of high bits to be cleared,
3657  // but we want the amount of low bits to be kept, so negate it then.
3658  if (NegateNBits) {
3659  SDValue BitWidthC = CurDAG->getConstant(NVT.getSizeInBits(), DL, MVT::i32);
3660  insertDAGNode(*CurDAG, SDValue(Node, 0), BitWidthC);
3661 
3662  NBits = CurDAG->getNode(ISD::SUB, DL, MVT::i32, BitWidthC, NBits);
3663  insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3664  }
3665 
3666  if (Subtarget->hasBMI2()) {
3667  // Great, just emit the the BZHI..
3668  if (NVT != MVT::i32) {
3669  // But have to place the bit count into the wide-enough register first.
3670  NBits = CurDAG->getNode(ISD::ANY_EXTEND, DL, NVT, NBits);
3671  insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3672  }
3673 
3674  SDValue Extract = CurDAG->getNode(X86ISD::BZHI, DL, NVT, X, NBits);
3675  ReplaceNode(Node, Extract.getNode());
3676  SelectCode(Extract.getNode());
3677  return true;
3678  }
3679 
3680  // Else, if we do *NOT* have BMI2, let's find out if the if the 'X' is
3681  // *logically* shifted (potentially with one-use trunc inbetween),
3682  // and the truncation was the only use of the shift,
3683  // and if so look past one-use truncation.
3684  {
3685  SDValue RealX = peekThroughOneUseTruncation(X);
3686  // FIXME: only if the shift is one-use?
3687  if (RealX != X && RealX.getOpcode() == ISD::SRL)
3688  X = RealX;
3689  }
3690 
3691  MVT XVT = X.getSimpleValueType();
3692 
3693  // Else, emitting BEXTR requires one more step.
3694  // The 'control' of BEXTR has the pattern of:
3695  // [15...8 bit][ 7...0 bit] location
3696  // [ bit count][ shift] name
3697  // I.e. 0b000000011'00000001 means (x >> 0b1) & 0b11
3698 
3699  // Shift NBits left by 8 bits, thus producing 'control'.
3700  // This makes the low 8 bits to be zero.
3701  SDValue C8 = CurDAG->getConstant(8, DL, MVT::i8);
3702  insertDAGNode(*CurDAG, SDValue(Node, 0), C8);
3703  SDValue Control = CurDAG->getNode(ISD::SHL, DL, MVT::i32, NBits, C8);
3704  insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3705 
3706  // If the 'X' is *logically* shifted, we can fold that shift into 'control'.
3707  // FIXME: only if the shift is one-use?
3708  if (X.getOpcode() == ISD::SRL) {
3709  SDValue ShiftAmt = X.getOperand(1);
3710  X = X.getOperand(0);
3711 
3712  assert(ShiftAmt.getValueType() == MVT::i8 &&
3713  "Expected shift amount to be i8");
3714 
3715  // Now, *zero*-extend the shift amount. The bits 8...15 *must* be zero!
3716  // We could zext to i16 in some form, but we intentionally don't do that.
3717  SDValue OrigShiftAmt = ShiftAmt;
3718  ShiftAmt = CurDAG->getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShiftAmt);
3719  insertDAGNode(*CurDAG, OrigShiftAmt, ShiftAmt);
3720 
3721  // And now 'or' these low 8 bits of shift amount into the 'control'.
3722  Control = CurDAG->getNode(ISD::OR, DL, MVT::i32, Control, ShiftAmt);
3723  insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3724  }
3725 
3726  // But have to place the 'control' into the wide-enough register first.
3727  if (XVT != MVT::i32) {
3728  Control = CurDAG->getNode(ISD::ANY_EXTEND, DL, XVT, Control);
3729  insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3730  }
3731 
3732  // And finally, form the BEXTR itself.
3733  SDValue Extract = CurDAG->getNode(X86ISD::BEXTR, DL, XVT, X, Control);
3734 
3735  // The 'X' was originally truncated. Do that now.
3736  if (XVT != NVT) {
3737  insertDAGNode(*CurDAG, SDValue(Node, 0), Extract);
3738  Extract = CurDAG->getNode(ISD::TRUNCATE, DL, NVT, Extract);
3739  }
3740 
3741  ReplaceNode(Node, Extract.getNode());
3742  SelectCode(Extract.getNode());
3743 
3744  return true;
3745 }
3746 
3747 // See if this is an (X >> C1) & C2 that we can match to BEXTR/BEXTRI.
3748 MachineSDNode *X86DAGToDAGISel::matchBEXTRFromAndImm(SDNode *Node) {
3749  MVT NVT = Node->getSimpleValueType(0);
3750  SDLoc dl(Node);
3751 
3752  SDValue N0 = Node->getOperand(0);
3753  SDValue N1 = Node->getOperand(1);
3754 
3755  // If we have TBM we can use an immediate for the control. If we have BMI
3756  // we should only do this if the BEXTR instruction is implemented well.
3757  // Otherwise moving the control into a register makes this more costly.
3758  // TODO: Maybe load folding, greater than 32-bit masks, or a guarantee of LICM
3759  // hoisting the move immediate would make it worthwhile with a less optimal
3760  // BEXTR?
3761  bool PreferBEXTR =
3762  Subtarget->hasTBM() || (Subtarget->hasBMI() && Subtarget->hasFastBEXTR());
3763  if (!PreferBEXTR && !Subtarget->hasBMI2())
3764  return nullptr;
3765 
3766  // Must have a shift right.
3767  if (N0->getOpcode() != ISD::SRL && N0->getOpcode() != ISD::SRA)
3768  return nullptr;
3769 
3770  // Shift can't have additional users.
3771  if (!N0->hasOneUse())
3772  return nullptr;
3773 
3774  // Only supported for 32 and 64 bits.
3775  if (NVT != MVT::i32 && NVT != MVT::i64)
3776  return nullptr;
3777 
3778  // Shift amount and RHS of and must be constant.
3779  ConstantSDNode *MaskCst = dyn_cast<ConstantSDNode>(N1);
3780  ConstantSDNode *ShiftCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
3781  if (!MaskCst || !ShiftCst)
3782  return nullptr;
3783 
3784  // And RHS must be a mask.
3785  uint64_t Mask = MaskCst->getZExtValue();
3786  if (!isMask_64(Mask))
3787  return nullptr;
3788 
3789  uint64_t Shift = ShiftCst->getZExtValue();
3790  uint64_t MaskSize = countPopulation(Mask);
3791 
3792  // Don't interfere with something that can be handled by extracting AH.
3793  // TODO: If we are able to fold a load, BEXTR might still be better than AH.
3794  if (Shift == 8 && MaskSize == 8)
3795  return nullptr;
3796 
3797  // Make sure we are only using bits that were in the original value, not
3798  // shifted in.
3799  if (Shift + MaskSize > NVT.getSizeInBits())
3800  return nullptr;
3801 
3802  // BZHI, if available, is always fast, unlike BEXTR. But even if we decide
3803  // that we can't use BEXTR, it is only worthwhile using BZHI if the mask
3804  // does not fit into 32 bits. Load folding is not a sufficient reason.
3805  if (!PreferBEXTR && MaskSize <= 32)
3806  return nullptr;
3807 
3808  SDValue Control;
3809  unsigned ROpc, MOpc;
3810 
3811  if (!PreferBEXTR) {
3812  assert(Subtarget->hasBMI2() && "We must have BMI2's BZHI then.");
3813  // If we can't make use of BEXTR then we can't fuse shift+mask stages.
3814  // Let's perform the mask first, and apply shift later. Note that we need to
3815  // widen the mask to account for the fact that we'll apply shift afterwards!
3816  Control = CurDAG->getTargetConstant(Shift + MaskSize, dl, NVT);
3817  ROpc = NVT == MVT::i64 ? X86::BZHI64rr : X86::BZHI32rr;
3818  MOpc = NVT == MVT::i64 ? X86::BZHI64rm : X86::BZHI32rm;
3819  unsigned NewOpc = NVT == MVT::i64 ? X86::MOV32ri64 : X86::MOV32ri;
3820  Control = SDValue(CurDAG->getMachineNode(NewOpc, dl, NVT, Control), 0);
3821  } else {
3822  // The 'control' of BEXTR has the pattern of:
3823  // [15...8 bit][ 7...0 bit] location
3824  // [ bit count][ shift] name
3825  // I.e. 0b000000011'00000001 means (x >> 0b1) & 0b11
3826  Control = CurDAG->getTargetConstant(Shift | (MaskSize << 8), dl, NVT);
3827  if (Subtarget->hasTBM()) {
3828  ROpc = NVT == MVT::i64 ? X86::BEXTRI64ri : X86::BEXTRI32ri;
3829  MOpc = NVT == MVT::i64 ? X86::BEXTRI64mi : X86::BEXTRI32mi;
3830  } else {
3831  assert(Subtarget->hasBMI() && "We must have BMI1's BEXTR then.");
3832  // BMI requires the immediate to placed in a register.
3833  ROpc = NVT == MVT::i64 ? X86::BEXTR64rr : X86::BEXTR32rr;
3834  MOpc = NVT == MVT::i64 ? X86::BEXTR64rm : X86::BEXTR32rm;
3835  unsigned NewOpc = NVT == MVT::i64 ? X86::MOV32ri64 : X86::MOV32ri;
3836  Control = SDValue(CurDAG->getMachineNode(NewOpc, dl, NVT, Control), 0);
3837  }
3838  }
3839 
3840  MachineSDNode *NewNode;
3841  SDValue Input = N0->getOperand(0);
3842  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3843  if (tryFoldLoad(Node, N0.getNode(), Input, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3844  SDValue Ops[] = {
3845  Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Control, Input.getOperand(0)};
3846  SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
3847  NewNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3848  // Update the chain.
3849  ReplaceUses(Input.getValue(1), SDValue(NewNode, 2));
3850  // Record the mem-refs
3851  CurDAG->setNodeMemRefs(NewNode, {cast<LoadSDNode>(Input)->getMemOperand()});
3852  } else {
3853  NewNode = CurDAG->getMachineNode(ROpc, dl, NVT, MVT::i32, Input, Control);
3854  }
3855 
3856  if (!PreferBEXTR) {
3857  // We still need to apply the shift.
3858  SDValue ShAmt = CurDAG->getTargetConstant(Shift, dl, NVT);
3859  unsigned NewOpc = NVT == MVT::i64 ? X86::SHR64ri : X86::SHR32ri;
3860  NewNode =
3861  CurDAG->getMachineNode(NewOpc, dl, NVT, SDValue(NewNode, 0), ShAmt);
3862  }
3863 
3864  return NewNode;
3865 }
3866 
3867 // Emit a PCMISTR(I/M) instruction.
3868 MachineSDNode *X86DAGToDAGISel::emitPCMPISTR(unsigned ROpc, unsigned MOpc,
3869  bool MayFoldLoad, const SDLoc &dl,
3870  MVT VT, SDNode *Node) {
3871  SDValue N0 = Node->getOperand(0);
3872  SDValue N1 = Node->getOperand(1);
3873  SDValue Imm = Node->getOperand(2);
3874  const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
3875  Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
3876 
3877  // Try to fold a load. No need to check alignment.
3878  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3879  if (MayFoldLoad && tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3880  SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
3881  N1.getOperand(0) };
3882  SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other);
3883  MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3884  // Update the chain.
3885  ReplaceUses(N1.getValue(1), SDValue(CNode, 2));
3886  // Record the mem-refs
3887  CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
3888  return CNode;
3889  }
3890 
3891  SDValue Ops[] = { N0, N1, Imm };
3892  SDVTList VTs = CurDAG->getVTList(VT, MVT::i32);
3893  MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops);
3894  return CNode;
3895 }
3896 
3897 // Emit a PCMESTR(I/M) instruction. Also return the Glue result in case we need
3898 // to emit a second instruction after this one. This is needed since we have two
3899 // copyToReg nodes glued before this and we need to continue that glue through.
3900 MachineSDNode *X86DAGToDAGISel::emitPCMPESTR(unsigned ROpc, unsigned MOpc,
3901  bool MayFoldLoad, const SDLoc &dl,
3902  MVT VT, SDNode *Node,
3903  SDValue &InFlag) {
3904  SDValue N0 = Node->getOperand(0);
3905  SDValue N2 = Node->getOperand(2);
3906  SDValue Imm = Node->getOperand(4);
3907  const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
3908  Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
3909 
3910  // Try to fold a load. No need to check alignment.
3911  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3912  if (MayFoldLoad && tryFoldLoad(Node, N2, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3913  SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
3914  N2.getOperand(0), InFlag };
3915  SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other, MVT::Glue);
3916  MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3917  InFlag = SDValue(CNode, 3);
3918  // Update the chain.
3919  ReplaceUses(N2.getValue(1), SDValue(CNode, 2));
3920  // Record the mem-refs
3921  CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N2)->getMemOperand()});
3922  return CNode;
3923  }
3924 
3925  SDValue Ops[] = { N0, N2, Imm, InFlag };
3926  SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Glue);
3927  MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops);
3928  InFlag = SDValue(CNode, 2);
3929  return CNode;
3930 }
3931 
3932 bool X86DAGToDAGISel::tryShiftAmountMod(SDNode *N) {
3933  EVT VT = N->getValueType(0);
3934 
3935  // Only handle scalar shifts.
3936  if (VT.isVector())
3937  return false;
3938 
3939  // Narrower shifts only mask to 5 bits in hardware.
3940  unsigned Size = VT == MVT::i64 ? 64 : 32;
3941 
3942  SDValue OrigShiftAmt = N->getOperand(1);
3943  SDValue ShiftAmt = OrigShiftAmt;
3944  SDLoc DL(N);
3945 
3946  // Skip over a truncate of the shift amount.
3947  if (ShiftAmt->getOpcode() == ISD::TRUNCATE)
3948  ShiftAmt = ShiftAmt->getOperand(0);
3949 
3950  // This function is called after X86DAGToDAGISel::matchBitExtract(),
3951  // so we are not afraid that we might mess up BZHI/BEXTR pattern.
3952 
3953  SDValue NewShiftAmt;
3954  if (ShiftAmt->getOpcode() == ISD::ADD || ShiftAmt->getOpcode() == ISD::SUB) {
3955  SDValue Add0 = ShiftAmt->getOperand(0);
3956  SDValue Add1 = ShiftAmt->getOperand(1);
3957  auto *Add0C = dyn_cast<ConstantSDNode>(Add0);
3958  auto *Add1C = dyn_cast<ConstantSDNode>(Add1);
3959  // If we are shifting by X+/-N where N == 0 mod Size, then just shift by X
3960  // to avoid the ADD/SUB.
3961  if (Add1C && Add1C->getAPIntValue().urem(Size) == 0) {
3962  NewShiftAmt = Add0;
3963  // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
3964  // to generate a NEG instead of a SUB of a constant.
3965  } else if (ShiftAmt->getOpcode() == ISD::SUB && Add0C &&
3966  Add0C->getZExtValue() != 0) {
3967  EVT SubVT = ShiftAmt.getValueType();
3968  SDValue X;
3969  if (Add0C->getZExtValue() % Size == 0)
3970  X = Add1;
3971  else if (ShiftAmt.hasOneUse() && Size == 64 &&
3972  Add0C->getZExtValue() % 32 == 0) {
3973  // We have a 64-bit shift by (n*32-x), turn it into -(x+n*32).
3974  // This is mainly beneficial if we already compute (x+n*32).
3975  if (Add1.getOpcode() == ISD::TRUNCATE) {
3976  Add1 = Add1.getOperand(0);
3977  SubVT = Add1.getValueType();
3978  }
3979  if (Add0.getValueType() != SubVT) {
3980  Add0 = CurDAG->getZExtOrTrunc(Add0, DL, SubVT);
3981  insertDAGNode(*CurDAG, OrigShiftAmt, Add0);
3982  }
3983 
3984  X = CurDAG->getNode(ISD::ADD, DL, SubVT, Add1, Add0);
3985  insertDAGNode(*CurDAG, OrigShiftAmt, X);
3986  } else
3987  return false;
3988  // Insert a negate op.
3989  // TODO: This isn't guaranteed to replace the sub if there is a logic cone
3990  // that uses it that's not a shift.
3991  SDValue Zero = CurDAG->getConstant(0, DL, SubVT);
3992  SDValue Neg = CurDAG->getNode(ISD::SUB, DL, SubVT, Zero, X);
3993  NewShiftAmt = Neg;
3994 
3995  // Insert these operands into a valid topological order so they can
3996  // get selected independently.
3997  insertDAGNode(*CurDAG, OrigShiftAmt, Zero);
3998  insertDAGNode(*CurDAG, OrigShiftAmt, Neg);
3999  } else
4000  return false;
4001  } else
4002  return false;
4003 
4004  if (NewShiftAmt.getValueType() != MVT::i8) {
4005  // Need to truncate the shift amount.
4006  NewShiftAmt = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NewShiftAmt);
4007  // Add to a correct topological ordering.
4008  insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt);
4009  }
4010 
4011  // Insert a new mask to keep the shift amount legal. This should be removed
4012  // by isel patterns.
4013  NewShiftAmt = CurDAG->getNode(ISD::AND, DL, MVT::i8, NewShiftAmt,
4014  CurDAG->getConstant(Size - 1, DL, MVT::i8));
4015  // Place in a correct topological ordering.
4016  insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt);
4017 
4018  SDNode *UpdatedNode = CurDAG->UpdateNodeOperands(N, N->getOperand(0),
4019  NewShiftAmt);
4020  if (UpdatedNode != N) {
4021  // If we found an existing node, we should replace ourselves with that node
4022  // and wait for it to be selected after its other users.
4023  ReplaceNode(N, UpdatedNode);
4024  return true;
4025  }
4026 
4027  // If the original shift amount is now dead, delete it so that we don't run
4028  // it through isel.
4029  if (OrigShiftAmt.getNode()->use_empty())
4030  CurDAG->RemoveDeadNode(OrigShiftAmt.getNode());
4031 
4032  // Now that we've optimized the shift amount, defer to normal isel to get
4033  // load folding and legacy vs BMI2 selection without repeating it here.
4034  SelectCode(N);
4035  return true;
4036 }
4037 
4038 bool X86DAGToDAGISel::tryShrinkShlLogicImm(SDNode *N) {
4039  MVT NVT = N->getSimpleValueType(0);
4040  unsigned Opcode = N->getOpcode();
4041  SDLoc dl(N);
4042 
4043  // For operations of the form (x << C1) op C2, check if we can use a smaller
4044  // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
4045  SDValue Shift = N->getOperand(0);
4046  SDValue N1 = N->getOperand(1);
4047 
4048  ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
4049  if (!Cst)
4050  return false;
4051 
4052  int64_t Val = Cst->getSExtValue();
4053 
4054  // If we have an any_extend feeding the AND, look through it to see if there
4055  // is a shift behind it. But only if the AND doesn't use the extended bits.
4056  // FIXME: Generalize this to other ANY_EXTEND than i32 to i64?
4057  bool FoundAnyExtend = false;
4058  if (Shift.getOpcode() == ISD::ANY_EXTEND && Shift.hasOneUse() &&
4059  Shift.getOperand(0).getSimpleValueType() == MVT::i32 &&
4060  isUInt<32>(Val)) {
4061  FoundAnyExtend = true;
4062  Shift = Shift.getOperand(0);
4063  }
4064 
4065  if (Shift.getOpcode() != ISD::SHL || !Shift.hasOneUse())
4066  return false;
4067 
4068  // i8 is unshrinkable, i16 should be promoted to i32.
4069  if (NVT != MVT::i32 && NVT != MVT::i64)
4070  return false;
4071 
4072  ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
4073  if (!ShlCst)
4074  return false;
4075 
4076  uint64_t ShAmt = ShlCst->getZExtValue();
4077 
4078  // Make sure that we don't change the operation by removing bits.
4079  // This only matters for OR and XOR, AND is unaffected.
4080  uint64_t RemovedBitsMask = (1ULL << ShAmt) - 1;
4081  if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
4082  return false;
4083 
4084  // Check the minimum bitwidth for the new constant.
4085  // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
4086  auto CanShrinkImmediate = [&](int64_t &ShiftedVal) {
4087  if (Opcode == ISD::AND) {
4088  // AND32ri is the same as AND64ri32 with zext imm.
4089  // Try this before sign extended immediates below.
4090  ShiftedVal = (uint64_t)Val >> ShAmt;
4091  if (NVT == MVT::i64 && !isUInt<32>(Val) && isUInt<32>(ShiftedVal))
4092  return true;
4093  // Also swap order when the AND can become MOVZX.
4094  if (ShiftedVal == UINT8_MAX || ShiftedVal == UINT16_MAX)
4095  return true;
4096  }
4097  ShiftedVal = Val >> ShAmt;
4098  if ((!isInt<8>(Val) && isInt<8>(ShiftedVal)) ||
4099  (!isInt<32>(Val) && isInt<32>(ShiftedVal)))
4100  return true;
4101  if (Opcode != ISD::AND) {
4102  // MOV32ri+OR64r/XOR64r is cheaper than MOV64ri64+OR64rr/XOR64rr
4103  ShiftedVal = (uint64_t)Val >> ShAmt;
4104  if (NVT == MVT::i64 && !isUInt<32>(Val) && isUInt<32>(ShiftedVal))
4105  return true;
4106  }
4107  return false;
4108  };
4109 
4110  int64_t ShiftedVal;
4111  if (!CanShrinkImmediate(ShiftedVal))
4112  return false;
4113 
4114  // Ok, we can reorder to get a smaller immediate.
4115 
4116  // But, its possible the original immediate allowed an AND to become MOVZX.
4117  // Doing this late due to avoid the MakedValueIsZero call as late as
4118  // possible.
4119  if (Opcode == ISD::AND) {
4120  // Find the smallest zext this could possibly be.
4121  unsigned ZExtWidth = Cst->getAPIntValue().getActiveBits();
4122  ZExtWidth = PowerOf2Ceil(std::max(ZExtWidth, 8U));
4123 
4124  // Figure out which bits need to be zero to achieve that mask.
4125  APInt NeededMask = APInt::getLowBitsSet(NVT.getSizeInBits(),
4126  ZExtWidth);
4127  NeededMask &= ~Cst->getAPIntValue();
4128 
4129  if (CurDAG->MaskedValueIsZero(N->getOperand(0), NeededMask))
4130  return false;
4131  }
4132 
4133  SDValue X = Shift.getOperand(0);
4134  if (FoundAnyExtend) {
4135  SDValue NewX = CurDAG->getNode(ISD::ANY_EXTEND, dl, NVT, X);
4136  insertDAGNode(*CurDAG, SDValue(N, 0), NewX);
4137  X = NewX;
4138  }
4139 
4140  SDValue NewCst = CurDAG->getConstant(ShiftedVal, dl, NVT);
4141  insertDAGNode(*CurDAG, SDValue(N, 0), NewCst);
4142  SDValue NewBinOp = CurDAG->getNode(Opcode, dl, NVT, X, NewCst);
4143  insertDAGNode(*CurDAG, SDValue(N, 0), NewBinOp);
4144  SDValue NewSHL = CurDAG->getNode(ISD::SHL, dl, NVT, NewBinOp,
4145  Shift.getOperand(1));
4146  ReplaceNode(N, NewSHL.getNode());
4147  SelectCode(NewSHL.getNode());
4148  return true;
4149 }
4150 
4151 bool X86DAGToDAGISel::matchVPTERNLOG(SDNode *Root, SDNode *ParentA,
4152  SDNode *ParentB, SDNode *ParentC,
4153  SDValue A, SDValue B, SDValue C,
4154  uint8_t Imm) {
4155  assert(A.isOperandOf(ParentA) && B.isOperandOf(ParentB) &&
4156  C.isOperandOf(ParentC) && "Incorrect parent node");
4157 
4158  auto tryFoldLoadOrBCast =
4159  [this](SDNode *Root, SDNode *P, SDValue &L, SDValue &Base, SDValue &Scale,
4160  SDValue &Index, SDValue &Disp, SDValue &Segment) {
4161  if (tryFoldLoad(Root, P, L, Base, Scale, Index, Disp, Segment))
4162  return true;
4163 
4164  // Not a load, check for broadcast which may be behind a bitcast.
4165  if (L.getOpcode() == ISD::BITCAST && L.hasOneUse()) {
4166  P = L.getNode();
4167  L = L.getOperand(0);
4168  }
4169 
4170  if (L.getOpcode() != X86ISD::VBROADCAST_LOAD)
4171  return false;
4172 
4173  // Only 32 and 64 bit broadcasts are supported.
4174  auto *MemIntr = cast<MemIntrinsicSDNode>(L);
4175  unsigned Size = MemIntr->getMemoryVT().getSizeInBits();
4176  if (Size != 32 && Size != 64)
4177  return false;
4178 
4179  return tryFoldBroadcast(Root, P, L, Base, Scale, Index, Disp, Segment);
4180  };
4181 
4182  bool FoldedLoad = false;
4183  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4184  if (tryFoldLoadOrBCast(Root, ParentC, C, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
4185  FoldedLoad = true;
4186  } else if (tryFoldLoadOrBCast(Root, ParentA, A, Tmp0, Tmp1, Tmp2, Tmp3,
4187  Tmp4)) {
4188  FoldedLoad = true;
4189  std::swap(A, C);
4190  // Swap bits 1/4 and 3/6.
4191  uint8_t OldImm = Imm;
4192  Imm = OldImm & 0xa5;
4193  if (OldImm & 0x02) Imm |= 0x10;
4194  if (OldImm & 0x10) Imm |= 0x02;
4195  if (OldImm & 0x08) Imm |= 0x40;
4196  if (OldImm & 0x40) Imm |= 0x08;
4197  } else if (tryFoldLoadOrBCast(Root, ParentB, B, Tmp0, Tmp1, Tmp2, Tmp3,
4198  Tmp4)) {
4199  FoldedLoad = true;
4200  std::swap(B, C);
4201  // Swap bits 1/2 and 5/6.
4202  uint8_t OldImm = Imm;
4203  Imm = OldImm & 0x99;
4204  if (OldImm & 0x02) Imm |= 0x04;
4205  if (OldImm & 0x04) Imm |= 0x02;
4206  if (OldImm & 0x20) Imm |= 0x40;
4207  if (OldImm & 0x40) Imm |= 0x20;
4208  }
4209 
4210  SDLoc DL(Root);
4211 
4212  SDValue TImm = CurDAG->getTargetConstant(Imm, DL, MVT::i8);
4213 
4214  MVT NVT = Root->getSimpleValueType(0);
4215 
4216  MachineSDNode *MNode;
4217  if (FoldedLoad) {
4218  SDVTList VTs = CurDAG->getVTList(NVT, MVT::Other);
4219 
4220  unsigned Opc;
4221  if (C.getOpcode() == X86ISD::VBROADCAST_LOAD) {
4222  auto *MemIntr = cast<MemIntrinsicSDNode>(C);
4223  unsigned EltSize = MemIntr->getMemoryVT().getSizeInBits();
4224  assert((EltSize == 32 || EltSize == 64) && "Unexpected broadcast size!");
4225 
4226  bool UseD = EltSize == 32;
4227  if (NVT.is128BitVector())
4228  Opc = UseD ? X86::VPTERNLOGDZ128rmbi : X86::VPTERNLOGQZ128rmbi;
4229  else if (NVT.is256BitVector())
4230  Opc = UseD ? X86::VPTERNLOGDZ256rmbi : X86::VPTERNLOGQZ256rmbi;
4231  else if (NVT.is512BitVector())
4232  Opc = UseD ? X86::VPTERNLOGDZrmbi : X86::VPTERNLOGQZrmbi;
4233  else
4234  llvm_unreachable("Unexpected vector size!");
4235  } else {
4236  bool UseD = NVT.getVectorElementType() == MVT::i32;
4237  if (NVT.is128BitVector())
4238  Opc = UseD ? X86::VPTERNLOGDZ128rmi : X86::VPTERNLOGQZ128rmi;
4239  else if (NVT.is256BitVector())
4240  Opc = UseD ? X86::VPTERNLOGDZ256rmi : X86::VPTERNLOGQZ256rmi;
4241  else if (NVT.is512BitVector())
4242  Opc = UseD ? X86::VPTERNLOGDZrmi : X86::VPTERNLOGQZrmi;
4243  else
4244  llvm_unreachable("Unexpected vector size!");
4245  }
4246 
4247  SDValue Ops[] = {A, B, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, TImm, C.getOperand(0)};
4248  MNode = CurDAG->getMachineNode(Opc, DL, VTs, Ops);
4249 
4250  // Update the chain.
4251  ReplaceUses(C.getValue(1), SDValue(MNode, 1));
4252  // Record the mem-refs
4253  CurDAG->setNodeMemRefs(MNode, {cast<MemSDNode>(C)->getMemOperand()});
4254  } else {
4255  bool UseD = NVT.getVectorElementType() == MVT::i32;
4256  unsigned Opc;
4257  if (NVT.is128BitVector())
4258  Opc = UseD ? X86::VPTERNLOGDZ128rri : X86::VPTERNLOGQZ128rri;
4259  else if (NVT.is256BitVector())
4260  Opc = UseD ? X86::VPTERNLOGDZ256rri : X86::VPTERNLOGQZ256rri;
4261  else if (NVT.is512BitVector())
4262  Opc = UseD ? X86::VPTERNLOGDZrri : X86::VPTERNLOGQZrri;
4263  else
4264  llvm_unreachable("Unexpected vector size!");
4265 
4266  MNode = CurDAG->getMachineNode(Opc, DL, NVT, {A, B, C, TImm});
4267  }
4268 
4269  ReplaceUses(SDValue(Root, 0), SDValue(MNode, 0));
4270  CurDAG->RemoveDeadNode(Root);
4271  return true;
4272 }
4273 
4274 // Try to match two logic ops to a VPTERNLOG.
4275 // FIXME: Handle more complex patterns that use an operand more than once?
4276 bool X86DAGToDAGISel::tryVPTERNLOG(SDNode *N) {
4277  MVT NVT = N->getSimpleValueType(0);
4278 
4279  // Make sure we support VPTERNLOG.
4280  if (!NVT.isVector() || !Subtarget->hasAVX512() ||
4281  NVT.getVectorElementType() == MVT::i1)
4282  return false;
4283 
4284  // We need VLX for 128/256-bit.
4285  if (!(Subtarget->hasVLX() || NVT.is512BitVector()))
4286  return false;
4287 
4288  SDValue N0 = N->getOperand(0);
4289  SDValue N1 = N->getOperand(1);
4290 
4291  auto getFoldableLogicOp = [](SDValue Op) {
4292  // Peek through single use bitcast.
4293  if (Op.getOpcode() == ISD::BITCAST && Op.hasOneUse())
4294  Op = Op.getOperand(0);
4295 
4296  if (!Op.hasOneUse())
4297  return SDValue();
4298 
4299  unsigned Opc = Op.getOpcode();
4300  if (Opc == ISD::AND || Opc == ISD::OR || Opc == ISD::XOR ||
4301  Opc == X86ISD::ANDNP)
4302  return Op;
4303 
4304  return SDValue();
4305  };
4306 
4307  SDValue A, FoldableOp;
4308  if ((FoldableOp = getFoldableLogicOp(N1))) {
4309  A = N0;
4310  } else if ((FoldableOp = getFoldableLogicOp(N0))) {
4311  A = N1;
4312  } else
4313  return false;
4314 
4315  SDValue B = FoldableOp.getOperand(0);
4316  SDValue C = FoldableOp.getOperand(1);
4317  SDNode *ParentA = N;
4318  SDNode *ParentB = FoldableOp.getNode();
4319  SDNode *ParentC = FoldableOp.getNode();
4320 
4321  // We can build the appropriate control immediate by performing the logic
4322  // operation we're matching using these constants for A, B, and C.
4323  uint8_t TernlogMagicA = 0xf0;
4324  uint8_t TernlogMagicB = 0xcc;
4325  uint8_t TernlogMagicC = 0xaa;
4326 
4327  // Some of the inputs may be inverted, peek through them and invert the
4328  // magic values accordingly.
4329  // TODO: There may be a bitcast before the xor that we should peek through.
4330  auto PeekThroughNot = [](SDValue &Op, SDNode *&Parent, uint8_t &Magic) {
4331  if (Op.getOpcode() == ISD::XOR && Op.hasOneUse() &&
4332  ISD::isBuildVectorAllOnes(Op.getOperand(1).getNode())) {
4333  Magic = ~Magic;
4334  Parent = Op.getNode();
4335  Op = Op.getOperand(0);
4336  }
4337  };
4338 
4339  PeekThroughNot(A, ParentA, TernlogMagicA);
4340  PeekThroughNot(B, ParentB, TernlogMagicB);
4341  PeekThroughNot(C, ParentC, TernlogMagicC);
4342 
4343  uint8_t Imm;
4344  switch (FoldableOp.getOpcode()) {
4345  default: llvm_unreachable("Unexpected opcode!");
4346  case ISD::AND: Imm = TernlogMagicB & TernlogMagicC; break;
4347  case ISD::OR: Imm = TernlogMagicB | TernlogMagicC; break;
4348  case ISD::XOR: Imm = TernlogMagicB ^ TernlogMagicC; break;
4349  case X86ISD::ANDNP: Imm = ~(TernlogMagicB) & TernlogMagicC; break;
4350  }
4351 
4352  switch (N->getOpcode()) {
4353  default: llvm_unreachable("Unexpected opcode!");
4354  case X86ISD::ANDNP:
4355  if (A == N0)
4356  Imm &= ~TernlogMagicA;
4357  else
4358  Imm = ~(Imm) & TernlogMagicA;
4359  break;
4360  case ISD::AND: Imm &= TernlogMagicA; break;
4361  case ISD::OR: Imm |= TernlogMagicA; break;
4362  case ISD::XOR: Imm ^= TernlogMagicA; break;
4363  }
4364 
4365  return matchVPTERNLOG(N, ParentA, ParentB, ParentC, A, B, C, Imm);
4366 }
4367 
4368 /// If the high bits of an 'and' operand are known zero, try setting the
4369 /// high bits of an 'and' constant operand to produce a smaller encoding by
4370 /// creating a small, sign-extended negative immediate rather than a large
4371 /// positive one. This reverses a transform in SimplifyDemandedBits that
4372 /// shrinks mask constants by clearing bits. There is also a possibility that
4373 /// the 'and' mask can be made -1, so the 'and' itself is unnecessary. In that
4374 /// case, just replace the 'and'. Return 'true' if the node is replaced.
4375 bool X86DAGToDAGISel::shrinkAndImmediate(SDNode *And) {
4376  // i8 is unshrinkable, i16 should be promoted to i32, and vector ops don't
4377  // have immediate operands.
4378  MVT VT = And->getSimpleValueType(0);
4379  if (VT != MVT::i32 && VT != MVT::i64)
4380  return false;
4381 
4382  auto *And1C = dyn_cast<ConstantSDNode>(And->getOperand(1));
4383  if (!And1C)
4384  return false;
4385 
4386  // Bail out if the mask constant is already negative. It's can't shrink more.
4387  // If the upper 32 bits of a 64 bit mask are all zeros, we have special isel
4388  // patterns to use a 32-bit and instead of a 64-bit and by relying on the
4389  // implicit zeroing of 32 bit ops. So we should check if the lower 32 bits
4390  // are negative too.
4391  APInt MaskVal = And1C->getAPIntValue();
4392  unsigned MaskLZ = MaskVal.countLeadingZeros();
4393  if (!MaskLZ || (VT == MVT::i64 && MaskLZ == 32))
4394  return false;
4395 
4396  // Don't extend into the upper 32 bits of a 64 bit mask.
4397  if (VT == MVT::i64 && MaskLZ >= 32) {
4398  MaskLZ -= 32;
4399  MaskVal = MaskVal.trunc(32);
4400  }
4401 
4402  SDValue And0 = And->getOperand(0);
4403  APInt HighZeros = APInt::getHighBitsSet(MaskVal.getBitWidth(), MaskLZ);
4404  APInt NegMaskVal = MaskVal | HighZeros;
4405 
4406  // If a negative constant would not allow a smaller encoding, there's no need
4407  // to continue. Only change the constant when we know it's a win.
4408  unsigned MinWidth = NegMaskVal.getMinSignedBits();
4409  if (MinWidth > 32 || (MinWidth > 8 && MaskVal.getMinSignedBits() <= 32))
4410  return false;
4411 
4412  // Extend masks if we truncated above.
4413  if (VT == MVT::i64 && MaskVal.getBitWidth() < 64) {
4414  NegMaskVal = NegMaskVal.zext(64);
4415  HighZeros = HighZeros.zext(64);
4416  }
4417 
4418  // The variable operand must be all zeros in the top bits to allow using the
4419  // new, negative constant as the mask.
4420  if (!CurDAG->MaskedValueIsZero(And0, HighZeros))
4421  return false;
4422 
4423  // Check if the mask is -1. In that case, this is an unnecessary instruction
4424  // that escaped earlier analysis.
4425  if (NegMaskVal.isAllOnes()) {
4426  ReplaceNode(And, And0.getNode());
4427  return true;
4428  }
4429 
4430  // A negative mask allows a smaller encoding. Create a new 'and' node.
4431  SDValue NewMask = CurDAG->getConstant(NegMaskVal, SDLoc(And), VT);
4432  insertDAGNode(*CurDAG, SDValue(And, 0), NewMask);
4433  SDValue NewAnd = CurDAG->getNode(ISD::AND, SDLoc(And), VT, And0, NewMask);
4434  ReplaceNode(And, NewAnd.getNode());
4435  SelectCode(NewAnd.getNode());
4436  return true;
4437 }
4438 
4439 static unsigned getVPTESTMOpc(MVT TestVT, bool IsTestN, bool FoldedLoad,
4440  bool FoldedBCast, bool Masked) {
4441 #define VPTESTM_CASE(VT, SUFFIX) \
4442 case MVT::VT: \
4443  if (Masked) \
4444  return IsTestN ? X86::VPTESTNM##SUFFIX##k: X86::VPTESTM##SUFFIX##k; \
4445  return IsTestN ? X86::VPTESTNM##SUFFIX : X86::VPTESTM##SUFFIX;
4446 
4447 
4448 #define VPTESTM_BROADCAST_CASES(SUFFIX) \
4449 default: llvm_unreachable("Unexpected VT!"); \
4450 VPTESTM_CASE(v4i32, DZ128##SUFFIX) \
4451 VPTESTM_CASE(v2i64, QZ128##SUFFIX) \
4452 VPTESTM_CASE(v8i32, DZ256##SUFFIX) \
4453 VPTESTM_CASE(v4i64, QZ256##SUFFIX) \
4454 VPTESTM_CASE(v16i32, DZ##SUFFIX) \
4455 VPTESTM_CASE(v8i64, QZ##SUFFIX)
4456 
4457 #define VPTESTM_FULL_CASES(SUFFIX) \
4458 VPTESTM_BROADCAST_CASES(SUFFIX) \
4459 VPTESTM_CASE(v16i8, BZ128##SUFFIX) \
4460 VPTESTM_CASE(v8i16, WZ128##SUFFIX) \
4461 VPTESTM_CASE(v32i8, BZ256##SUFFIX) \
4462 VPTESTM_CASE(v16i16, WZ256##SUFFIX) \
4463 VPTESTM_CASE(v64i8, BZ##SUFFIX) \
4464 VPTESTM_CASE(v32i16, WZ##SUFFIX)
4465 
4466  if (FoldedBCast) {
4467  switch (TestVT.SimpleTy) {
4469  }
4470  }
4471 
4472  if (FoldedLoad) {
4473  switch (TestVT.SimpleTy) {
4474  VPTESTM_FULL_CASES(rm)
4475  }
4476  }
4477 
4478  switch (TestVT.SimpleTy) {
4479  VPTESTM_FULL_CASES(rr)
4480  }
4481 
4482 #undef VPTESTM_FULL_CASES
4483 #undef VPTESTM_BROADCAST_CASES
4484 #undef VPTESTM_CASE
4485 }
4486 
4487 // Try to create VPTESTM instruction. If InMask is not null, it will be used
4488 // to form a masked operation.
4489 bool X86DAGToDAGISel::tryVPTESTM(SDNode *Root, SDValue Setcc,
4490  SDValue InMask) {
4491  assert(Subtarget->hasAVX512() && "Expected AVX512!");
4493  "Unexpected VT!");
4494 
4495  // Look for equal and not equal compares.
4496  ISD::CondCode CC = cast<CondCodeSDNode>(Setcc.getOperand(2))->get();
4497  if (CC != ISD::SETEQ && CC != ISD::SETNE)
4498  return false;
4499 
4500  SDValue SetccOp0 = Setcc.getOperand(0);
4501  SDValue SetccOp1 = Setcc.getOperand(1);
4502 
4503  // Canonicalize the all zero vector to the RHS.
4504  if (ISD::isBuildVectorAllZeros(SetccOp0.getNode()))
4505  std::swap(SetccOp0, SetccOp1);
4506 
4507  // See if we're comparing against zero.
4508  if (!ISD::isBuildVectorAllZeros(SetccOp1.getNode()))
4509  return false;
4510 
4511  SDValue N0 = SetccOp0;
4512 
4513  MVT CmpVT = N0.getSimpleValueType();
4514  MVT CmpSVT = CmpVT.getVectorElementType();
4515 
4516  // Start with both operands the same. We'll try to refine this.
4517  SDValue Src0 = N0;
4518  SDValue Src1 = N0;
4519 
4520  {
4521  // Look through single use bitcasts.
4522  SDValue N0Temp = N0;
4523  if (N0Temp.getOpcode() == ISD::BITCAST && N0Temp.hasOneUse())
4524  N0Temp = N0.getOperand(0);
4525 
4526  // Look for single use AND.
4527  if (N0Temp.getOpcode() == ISD::AND && N0Temp.hasOneUse()) {
4528  Src0 = N0Temp.getOperand(0);
4529  Src1 = N0Temp.getOperand(1);
4530  }
4531  }
4532 
4533  // Without VLX we need to widen the operation.
4534  bool Widen = !Subtarget->hasVLX() && !CmpVT.is512BitVector();
4535 
4536  auto tryFoldLoadOrBCast = [&](SDNode *Root, SDNode *P, SDValue &L,
4537  SDValue &Base, SDValue &Scale, SDValue &Index,
4538  SDValue &Disp, SDValue &Segment) {
4539  // If we need to widen, we can't fold the load.
4540  if (!Widen)
4541  if (tryFoldLoad(Root, P, L, Base, Scale, Index, Disp, Segment))
4542  return true;
4543 
4544  // If we didn't fold a load, try to match broadcast. No widening limitation
4545  // for this. But only 32 and 64 bit types are supported.
4546  if (CmpSVT != MVT::i32 && CmpSVT != MVT::i64)
4547  return false;
4548 
4549  // Look through single use bitcasts.
4550  if (L.getOpcode() == ISD::BITCAST && L.hasOneUse()) {
4551  P = L.getNode();
4552  L = L.getOperand(0);
4553  }
4554 
4555  if (L.getOpcode() != X86ISD::VBROADCAST_LOAD)
4556  return false;
4557 
4558  auto *MemIntr = cast<MemIntrinsicSDNode>(L);
4559  if (MemIntr->getMemoryVT().getSizeInBits() != CmpSVT.getSizeInBits())
4560  return false;
4561 
4562  return tryFoldBroadcast(Root, P, L, Base, Scale, Index, Disp, Segment);
4563  };
4564 
4565  // We can only fold loads if the sources are unique.
4566  bool CanFoldLoads = Src0 != Src1;
4567 
4568  bool FoldedLoad = false;
4569  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4570  if (CanFoldLoads) {
4571  FoldedLoad = tryFoldLoadOrBCast(Root, N0.getNode(), Src1, Tmp0, Tmp1, Tmp2,
4572  Tmp3, Tmp4);
4573  if (!FoldedLoad) {
4574  // And is commutative.
4575  FoldedLoad = tryFoldLoadOrBCast(Root, N0.getNode(), Src0, Tmp0, Tmp1,
4576  Tmp2, Tmp3, Tmp4);
4577  if (FoldedLoad)
4578  std::swap(Src0, Src1);
4579  }
4580  }
4581 
4582  bool FoldedBCast = FoldedLoad && Src1.getOpcode() == X86ISD::VBROADCAST_LOAD;
4583 
4584  bool IsMasked = InMask.getNode() != nullptr;
4585 
4586  SDLoc dl(Root);
4587 
4588  MVT ResVT = Setcc.getSimpleValueType();
4589  MVT MaskVT = ResVT;
4590  if (Widen) {
4591  // Widen the inputs using insert_subreg or copy_to_regclass.
4592  unsigned Scale = CmpVT.is128BitVector() ? 4 : 2;
4593  unsigned SubReg = CmpVT.is128BitVector() ? X86::sub_xmm : X86::sub_ymm;
4594  unsigned NumElts = CmpVT.getVectorNumElements() * Scale;
4595  CmpVT = MVT::getVectorVT(CmpSVT, NumElts);
4596  MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
4597  SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, dl,
4598  CmpVT), 0);
4599  Src0 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src0);
4600 
4601  if (!FoldedBCast)
4602  Src1 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src1);
4603 
4604  if (IsMasked) {
4605  // Widen the mask.
4606  unsigned RegClass = TLI->getRegClassFor(MaskVT)->getID();
4607  SDValue RC = CurDAG->getTargetConstant(RegClass, dl, MVT::i32);
4608  InMask = SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
4609  dl, MaskVT, InMask, RC), 0);
4610  }
4611  }
4612 
4613  bool IsTestN = CC == ISD::SETEQ;
4614  unsigned Opc = getVPTESTMOpc(CmpVT, IsTestN, FoldedLoad, FoldedBCast,
4615  IsMasked);
4616 
4617  MachineSDNode *CNode;
4618  if (FoldedLoad) {
4619  SDVTList VTs = CurDAG->getVTList(MaskVT, MVT::Other);
4620 
4621  if (IsMasked) {
4622  SDValue Ops[] = { InMask, Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4,
4623  Src1.getOperand(0) };
4624  CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4625  } else {
4626  SDValue Ops[] = { Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4,
4627  Src1.getOperand(0) };
4628  CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4629  }
4630 
4631  // Update the chain.
4632  ReplaceUses(Src1.getValue(1), SDValue(CNode, 1));
4633  // Record the mem-refs
4634  CurDAG->setNodeMemRefs(CNode, {cast<MemSDNode>(Src1)->getMemOperand()});
4635  } else {
4636  if (IsMasked)
4637  CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, InMask, Src0, Src1);
4638  else
4639  CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, Src0, Src1);
4640  }
4641 
4642  // If we widened, we need to shrink the mask VT.
4643  if (Widen) {
4644  unsigned RegClass = TLI->getRegClassFor(ResVT)->getID();
4645  SDValue RC = CurDAG->getTargetConstant(RegClass, dl, MVT::i32);
4646  CNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
4647  dl, ResVT, SDValue(CNode, 0), RC);
4648  }
4649 
4650  ReplaceUses(SDValue(Root, 0), SDValue(CNode, 0));
4651  CurDAG->RemoveDeadNode(Root);
4652  return true;
4653 }
4654 
4655 // Try to match the bitselect pattern (or (and A, B), (andn A, C)). Turn it
4656 // into vpternlog.
4657 bool X86DAGToDAGISel::tryMatchBitSelect(SDNode *N) {
4658  assert(N->getOpcode() == ISD::OR && "Unexpected opcode!");
4659 
4660  MVT NVT = N->getSimpleValueType(0);
4661 
4662  // Make sure we support VPTERNLOG.
4663  if (!NVT.isVector() || !Subtarget->hasAVX512())
4664  return false;
4665 
4666  // We need VLX for 128/256-bit.
4667  if (!(Subtarget->hasVLX() || NVT.is512BitVector()))
4668  return false;
4669 
4670  SDValue N0 = N->getOperand(0);
4671  SDValue N1 = N->getOperand(1);
4672 
4673  // Canonicalize AND to LHS.
4674  if (N1.getOpcode() == ISD::AND)
4675  std::swap(N0, N1);
4676 
4677  if (N0.getOpcode() != ISD::AND ||
4678  N1.getOpcode() != X86ISD::ANDNP ||
4679  !N0.hasOneUse() || !N1.hasOneUse())
4680  return false;
4681 
4682  // ANDN is not commutable, use it to pick down A and C.
4683  SDValue A = N1.getOperand(0);
4684  SDValue C = N1.getOperand(1);
4685 
4686  // AND is commutable, if one operand matches A, the other operand is B.
4687  // Otherwise this isn't a match.
4688  SDValue B;
4689  if (N0.getOperand(0) == A)
4690  B = N0.getOperand(1);
4691  else if (N0.getOperand(1) == A)
4692  B = N0.getOperand(0);
4693  else
4694  return false;
4695 
4696  SDLoc dl(N);
4697  SDValue Imm = CurDAG->getTargetConstant(0xCA, dl, MVT::i8);
4698  SDValue Ternlog = CurDAG->getNode(X86ISD::VPTERNLOG, dl, NVT, A, B, C, Imm);
4699  ReplaceNode(N, Ternlog.getNode());
4700 
4701  return matchVPTERNLOG(Ternlog.getNode(), Ternlog.getNode(), Ternlog.getNode(),
4702  Ternlog.getNode(), A, B, C, 0xCA);
4703 }
4704 
4705 void X86DAGToDAGISel::Select(SDNode *Node) {
4706  MVT NVT = Node->getSimpleValueType(0);
4707  unsigned Opcode = Node->getOpcode();
4708  SDLoc dl(Node);
4709 
4710  if (Node->isMachineOpcode()) {
4711  LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
4712  Node->setNodeId(-1);
4713  return; // Already selected.
4714  }
4715 
4716  switch (Opcode) {
4717  default: break;
4718  case ISD::INTRINSIC_W_CHAIN: {
4719  unsigned IntNo = Node->getConstantOperandVal(1);
4720  switch (IntNo) {
4721  default: break;
4722  case Intrinsic::x86_encodekey128:
4723  case Intrinsic::x86_encodekey256: {
4724  if (!Subtarget->hasKL())
4725  break;
4726 
4727  unsigned Opcode;
4728  switch (IntNo) {
4729  default: llvm_unreachable("Impossible intrinsic");
4730  case Intrinsic::x86_encodekey128: Opcode = X86::ENCODEKEY128; break;
4731  case Intrinsic::x86_encodekey256: Opcode = X86::ENCODEKEY256; break;
4732  }
4733 
4734  SDValue Chain = Node->getOperand(0);
4735  Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM0, Node->getOperand(3),
4736  SDValue());
4737  if (Opcode == X86::ENCODEKEY256)
4738  Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM1, Node->getOperand(4),
4739  Chain.getValue(1));
4740 
4741  MachineSDNode *Res = CurDAG->getMachineNode(
4742  Opcode, dl, Node->getVTList(),
4743  {Node->getOperand(2), Chain, Chain.getValue(1)});
4744  ReplaceNode(Node, Res);
4745  return;
4746  }
4747  case Intrinsic::x86_tileloadd64_internal:
4748  case Intrinsic::x86_tileloaddt164_internal: {
4749  if (!Subtarget->hasAMXTILE())
4750  break;
4751  unsigned Opc = IntNo == Intrinsic::x86_tileloadd64_internal
4752  ? X86::PTILELOADDV
4753  : X86::PTILELOADDT1V;
4754  // _tile_loadd_internal(row, col, buf, STRIDE)
4755  SDValue Base = Node->getOperand(4);
4756  SDValue Scale = getI8Imm(1, dl);
4757  SDValue Index = Node->getOperand(5);
4758  SDValue Disp = CurDAG->getTargetConstant(0, dl, MVT::i32);
4759  SDValue Segment = CurDAG->getRegister(0, MVT::i16);
4760  SDValue Chain = Node->getOperand(0);
4761  MachineSDNode *CNode;
4762  SDValue Ops[] = {Node->getOperand(2),
4763  Node->getOperand(3),
4764  Base,
4765  Scale,
4766  Index,
4767  Disp,
4768  Segment,
4769  Chain};
4770  CNode = CurDAG->getMachineNode(Opc, dl, {MVT::x86amx, MVT::Other}, Ops);
4771  ReplaceNode(Node, CNode);
4772  return;
4773  }
4774  }
4775  break;
4776  }
4777  case ISD::INTRINSIC_VOID: {
4778  unsigned IntNo = Node->getConstantOperandVal(1);
4779  switch (IntNo) {
4780  default: break;
4781  case Intrinsic::x86_sse3_monitor:
4782  case Intrinsic::x86_monitorx:
4783  case Intrinsic::x86_clzero: {
4784  bool Use64BitPtr = Node->getOperand(2).getValueType() == MVT::i64;
4785 
4786  unsigned Opc = 0;
4787  switch (IntNo) {
4788  default: llvm_unreachable("Unexpected intrinsic!");
4789  case Intrinsic::x86_sse3_monitor:
4790  if (!Subtarget->hasSSE3())
4791  break;
4792  Opc = Use64BitPtr ? X86::MONITOR64rrr : X86::MONITOR32rrr;
4793  break;
4794  case Intrinsic::x86_monitorx:
4795  if (!Subtarget->hasMWAITX())
4796  break;
4797  Opc = Use64BitPtr ? X86::MONITORX64rrr : X86::MONITORX32rrr;
4798  break;
4799  case Intrinsic::x86_clzero:
4800  if (!Subtarget->hasCLZERO())
4801  break;
4802  Opc = Use64BitPtr ? X86::CLZERO64r : X86::CLZERO32r;
4803  break;
4804  }
4805 
4806  if (Opc) {
4807  unsigned PtrReg = Use64BitPtr ? X86::RAX : X86::EAX;
4808  SDValue Chain = CurDAG->getCopyToReg(Node->getOperand(0), dl, PtrReg,
4809  Node->getOperand(2), SDValue());
4810  SDValue InFlag = Chain.getValue(1);
4811 
4812  if (IntNo == Intrinsic::x86_sse3_monitor ||
4813  IntNo == Intrinsic::x86_monitorx) {
4814  // Copy the other two operands to ECX and EDX.
4815  Chain = CurDAG->getCopyToReg(Chain, dl, X86::ECX, Node->getOperand(3),
4816  InFlag);
4817  InFlag = Chain.getValue(1);
4818  Chain = CurDAG->getCopyToReg(Chain, dl, X86::EDX, Node->getOperand(4),
4819  InFlag);
4820  InFlag = Chain.getValue(1);
4821  }
4822 
4823  MachineSDNode *CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
4824  { Chain, InFlag});
4825  ReplaceNode(Node, CNode);
4826  return;
4827  }
4828 
4829  break;
4830  }
4831  case Intrinsic::x86_tilestored64_internal: {
4832  unsigned Opc = X86::PTILESTOREDV;
4833  // _tile_stored_internal(row, col, buf, STRIDE, c)
4834  SDValue Base = Node->getOperand(4);
4835  SDValue Scale = getI8Imm(1, dl);
4836  SDValue Index = Node->getOperand(5);
4837  SDValue Disp = CurDAG->getTargetConstant(0, dl, MVT::i32);
4838  SDValue Segment = CurDAG->getRegister(0, MVT::i16);
4839  SDValue Chain = Node->getOperand(0);
4840  MachineSDNode *CNode;
4841  SDValue Ops[] = {Node->getOperand(2),
4842  Node->getOperand(3),
4843  Base,
4844  Scale,
4845  Index,
4846  Disp,
4847  Segment,
4848  Node->getOperand(6),
4849  Chain};
4850  CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
4851  ReplaceNode(Node, CNode);
4852  return;
4853  }
4854  case Intrinsic::x86_tileloadd64:
4855  case Intrinsic::x86_tileloaddt164:
4856  case Intrinsic::x86_tilestored64: {
4857  if (!Subtarget->hasAMXTILE())
4858  break;
4859  unsigned Opc;
4860  switch (IntNo) {
4861  default: llvm_unreachable("Unexpected intrinsic!");
4862  case Intrinsic::x86_tileloadd64: Opc = X86::PTILELOADD; break;
4863  case Intrinsic::x86_tileloaddt164: Opc = X86::PTILELOADDT1; break;
4864  case Intrinsic::x86_tilestored64: Opc = X86::PTILESTORED; break;
4865  }
4866  // FIXME: Match displacement and scale.
4867  unsigned TIndex = Node->getConstantOperandVal(2);
4868  SDValue TReg = getI8Imm(TIndex, dl);
4869  SDValue Base = Node->getOperand(3);
4870  SDValue Scale = getI8Imm(1, dl);
4871  SDValue Index = Node->getOperand(4);
4872  SDValue Disp = CurDAG->getTargetConstant(0, dl, MVT::i32);
4873  SDValue Segment = CurDAG->getRegister(0, MVT::i16);
4874  SDValue Chain = Node->getOperand(0);
4875  MachineSDNode *CNode;
4876  if (Opc == X86::PTILESTORED) {
4877  SDValue Ops[] = { Base, Scale, Index, Disp, Segment, TReg, Chain };
4878  CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
4879  } else {
4880  SDValue Ops[] = { TReg, Base, Scale, Index, Disp, Segment, Chain };
4881  CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
4882  }
4883  ReplaceNode(Node, CNode);
4884  return;
4885  }
4886  }
4887  break;
4888  }
4889  case ISD::BRIND:
4890  case X86ISD::NT_BRIND: {
4891  if (Subtarget->isTargetNaCl())
4892  // NaCl has its own pass where jmp %r32 are converted to jmp %r64. We
4893  // leave the instruction alone.
4894  break;
4895  if (Subtarget->isTarget64BitILP32()) {
4896  // Converts a 32-bit register to a 64-bit, zero-extended version of
4897  // it. This is needed because x86-64 can do many things, but jmp %r32
4898  // ain't one of them.
4899  SDValue Target = Node->getOperand(1);
4900  assert(Target.getValueType() == MVT::i32 && "Unexpected VT!");
4901  SDValue ZextTarget = CurDAG->getZExtOrTrunc(Target, dl, MVT::i64);
4902  SDValue Brind = CurDAG->getNode(Opcode, dl, MVT::Other,
4903  Node->getOperand(0), ZextTarget);
4904  ReplaceNode(Node, Brind.getNode());
4905  SelectCode(ZextTarget.getNode());
4906  SelectCode(Brind.getNode());
4907  return;
4908  }
4909  break;
4910  }
4911  case X86ISD::GlobalBaseReg:
4912  ReplaceNode(Node, getGlobalBaseReg());
4913  return;
4914 
4915  case ISD::BITCAST:
4916  // Just drop all 128/256/512-bit bitcasts.
4917  if (NVT.is512BitVector() || NVT.is256BitVector() || NVT.is128BitVector() ||
4918  NVT == MVT::f128) {
4919  ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
4920  CurDAG->RemoveDeadNode(Node);
4921  return;
4922  }
4923  break;
4924 
4925  case ISD::SRL:
4926  if (matchBitExtract(Node))
4927  return;
4929  case ISD::SRA:
4930  case ISD::SHL:
4931  if (tryShiftAmountMod(Node))
4932  return;
4933  break;
4934 
4935  case X86ISD::VPTERNLOG: {
4936  uint8_t Imm = cast<ConstantSDNode>(Node->getOperand(3))->getZExtValue();
4937  if (matchVPTERNLOG(Node, Node, Node, Node, Node->getOperand(0),
4938  Node->getOperand(1), Node->getOperand(2), Imm))
4939  return;
4940  break;
4941  }
4942 
4943  case X86ISD::ANDNP:
4944  if (tryVPTERNLOG(Node))
4945  return;
4946  break;
4947 
4948  case ISD::AND:
4949  if (NVT.isVector() && NVT.getVectorElementType() == MVT::i1) {
4950  // Try to form a masked VPTESTM. Operands can be in either order.
4951  SDValue N0 = Node->getOperand(0);
4952  SDValue N1 = Node->getOperand(1);
4953  if (N0.getOpcode() == ISD::SETCC && N0.hasOneUse() &&
4954  tryVPTESTM(Node, N0, N1))
4955  return;
4956  if (N1.getOpcode() == ISD::SETCC && N1.hasOneUse() &&
4957  tryVPTESTM(Node, N1, N0))
4958  return;
4959  }
4960 
4961  if (MachineSDNode *NewNode = matchBEXTRFromAndImm(Node)) {
4962  ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0));
4963  CurDAG->RemoveDeadNode(Node);
4964  return;
4965  }
4966  if (matchBitExtract(Node))
4967  return;
4968  if (AndImmShrink && shrinkAndImmediate(Node))
4969  return;
4970 
4972  case ISD::OR:
4973  case ISD::XOR:
4974  if (tryShrinkShlLogicImm(Node))
4975  return;
4976  if (Opcode == ISD::OR && tryMatchBitSelect(Node))
4977  return;
4978  if (tryVPTERNLOG(Node))
4979  return;
4980 
4982  case ISD::ADD:
4983  case ISD::SUB: {
4984  // Try to avoid folding immediates with multiple uses for optsize.
4985  // This code tries to select to register form directly to avoid going
4986  // through the isel table which might fold the immediate. We can't change
4987  // the patterns on the add/sub/and/or/xor with immediate paterns in the
4988  // tablegen files to check immediate use count without making the patterns
4989  // unavailable to the fast-isel table.
4990  if (!CurDAG->shouldOptForSize())
4991  break;
4992 
4993  // Only handle i8/i16/i32/i64.
4994  if (NVT != MVT::i8 && NVT != MVT::i16 && NVT != MVT::i32 && NVT != MVT::i64)
4995  break;
4996 
4997  SDValue N0 = Node->getOperand(0);
4998  SDValue N1 = Node->getOperand(1);
4999 
5000  ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
5001  if (!Cst)
5002  break;
5003 
5004  int64_t Val = Cst->getSExtValue();
5005 
5006  // Make sure its an immediate that is considered foldable.
5007  // FIXME: Handle unsigned 32 bit immediates for 64-bit AND.
5008  if (!isInt<8>(Val) && !isInt<32>(Val))
5009  break;
5010 
5011  // If this can match to INC/DEC, let it go.
5012  if (Opcode == ISD::ADD && (Val == 1 || Val == -1))
5013  break;
5014 
5015  // Check if we should avoid folding this immediate.
5016  if (!shouldAvoidImmediateInstFormsForSize(N1.getNode()))
5017  break;
5018 
5019  // We should not fold the immediate. So we need a register form instead.
5020  unsigned ROpc, MOpc;
5021  switch (NVT.SimpleTy) {
5022  default: llvm_unreachable("Unexpected VT!");
5023  case MVT::i8:
5024  switch (Opcode) {
5025  default: llvm_unreachable("Unexpected opcode!");
5026  case ISD::ADD: ROpc = X86::ADD8rr; MOpc = X86::ADD8rm; break;
5027  case ISD::SUB: ROpc = X86::SUB8rr; MOpc = X86::SUB8rm; break;
5028  case ISD::AND: ROpc = X86::AND8rr; MOpc = X86::AND8rm; break;
5029  case ISD::OR: ROpc = X86::OR8rr; MOpc = X86::OR8rm; break;
5030  case ISD::XOR: ROpc = X86::XOR8rr; MOpc = X86::XOR8rm; break;
5031  }
5032  break;
5033  case MVT::i16:
5034  switch (Opcode) {
5035  default: llvm_unreachable("Unexpected opcode!");
5036  case ISD::ADD: ROpc = X86::ADD16rr; MOpc = X86::ADD16rm; break;
5037  case ISD::SUB: ROpc = X86::SUB16rr; MOpc = X86::SUB16rm; break;
5038  case ISD::AND: ROpc = X86::AND16rr; MOpc = X86::AND16rm; break;
5039  case ISD::OR: ROpc = X86::OR16rr; MOpc = X86::OR16rm; break;
5040  case ISD::XOR: ROpc = X86::XOR16rr; MOpc = X86::XOR16rm; break;
5041  }
5042  break;
5043  case MVT::i32:
5044  switch (Opcode) {
5045  default: llvm_unreachable("Unexpected opcode!");
5046  case ISD::ADD: ROpc = X86::ADD32rr; MOpc = X86::ADD32rm; break;
5047  case ISD::SUB: ROpc = X86::SUB32rr; MOpc = X86::SUB32rm; break;
5048  case ISD::AND: ROpc = X86::AND32rr; MOpc = X86::AND32rm; break;
5049  case ISD::OR: ROpc = X86::OR32rr; MOpc = X86::OR32rm; break;
5050  case ISD::XOR: ROpc = X86::XOR32rr; MOpc = X86::XOR32rm; break;
5051  }
5052  break;
5053  case MVT::i64:
5054  switch (Opcode) {
5055  default: llvm_unreachable("Unexpected opcode!");
5056  case ISD::ADD: ROpc = X86::ADD64rr; MOpc = X86::ADD64rm; break;
5057  case ISD::SUB: ROpc = X86::SUB64rr; MOpc = X86::SUB64rm; break;
5058  case ISD::AND: ROpc = X86::AND64rr; MOpc = X86::AND64rm; break;
5059  case ISD::OR: ROpc = X86::OR64rr; MOpc = X86::OR64rm; break;
5060  case ISD::XOR: ROpc = X86::XOR64rr; MOpc = X86::XOR64rm; break;
5061  }
5062  break;
5063  }
5064 
5065  // Ok this is a AND/OR/XOR/ADD/SUB with constant.
5066 
5067  // If this is a not a subtract, we can still try to fold a load.
5068  if (Opcode != ISD::SUB) {
5069  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
5070  if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
5071  SDValue Ops[] = { N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
5072  SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
5073  MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
5074  // Update the chain.
5075  ReplaceUses(N0.getValue(1), SDValue(CNode, 2));
5076  // Record the mem-refs
5077  CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N0)->getMemOperand()});
5078  ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
5079  CurDAG->RemoveDeadNode(Node);
5080  return;
5081  }
5082  }
5083 
5084  CurDAG->SelectNodeTo(Node, ROpc, NVT, MVT::i32, N0, N1);
5085  return;
5086  }
5087 
5088  case X86ISD::SMUL:
5089  // i16/i32/i64 are handled with isel patterns.
5090  if (NVT != MVT::i8)
5091  break;
5093  case X86ISD::UMUL: {
5094  SDValue N0 = Node->getOperand(0);
5095  SDValue N1 = Node->getOperand(1);
5096 
5097  unsigned LoReg, ROpc, MOpc;
5098  switch (NVT.SimpleTy) {
5099  default: llvm_unreachable("Unsupported VT!");
5100  case MVT::i8:
5101  LoReg = X86::AL;
5102  ROpc = Opcode == X86ISD::SMUL ? X86::IMUL8r : X86::MUL8r;
5103  MOpc = Opcode == X86ISD::SMUL ? X86::IMUL8m : X86::MUL8m;
5104  break;
5105  case MVT::i16:
5106  LoReg = X86::AX;
5107  ROpc = X86::MUL16r;
5108  MOpc = X86::MUL16m;
5109  break;
5110  case MVT::i32:
5111  LoReg = X86::EAX;
5112  ROpc = X86::MUL32r;
5113  MOpc = X86::MUL32m;
5114  break;
5115  case MVT::i64:
5116  LoReg = X86::RAX;
5117  ROpc = X86::MUL64r;
5118  MOpc = X86::MUL64m;
5119  break;
5120  }
5121 
5122  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
5123  bool FoldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
5124  // Multiply is commutative.
5125  if (!FoldedLoad) {
5126  FoldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
5127  if (FoldedLoad)
5128  std::swap(N0, N1);
5129  }
5130 
5131  SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
5132  N0, SDValue()).getValue(1);
5133 
5134  MachineSDNode *CNode;
5135  if (FoldedLoad) {
5136  // i16/i32/i64 use an instruction that produces a low and high result even
5137  // though only the low result is used.
5138  SDVTList VTs;
5139  if (NVT == MVT::i8)
5140  VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
5141  else
5142  VTs = CurDAG->getVTList(NVT, NVT, MVT::i32, MVT::Other);
5143 
5144  SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
5145  InFlag };
5146  CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
5147 
5148  // Update the chain.
5149  ReplaceUses(N1.getValue(1), SDValue(CNode, NVT == MVT::i8 ? 2 : 3));
5150  // Record the mem-refs
5151  CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
5152  } else {
5153  // i16/i32/i64 use an instruction that produces a low and high result even
5154  // though only the low result is used.
5155  SDVTList VTs;
5156  if (NVT == MVT::i8)
5157  VTs = CurDAG->getVTList(NVT, MVT::i32);
5158  else
5159  VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
516