LLVM  13.0.0git
X86ISelDAGToDAG.cpp
Go to the documentation of this file.
1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines a DAG pattern matching instruction selector for X86,
10 // converting from a legalized dag to a X86 dag.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "X86.h"
15 #include "X86MachineFunctionInfo.h"
16 #include "X86RegisterInfo.h"
17 #include "X86Subtarget.h"
18 #include "X86TargetMachine.h"
19 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Config/llvm-config.h"
23 #include "llvm/IR/ConstantRange.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/IR/Instructions.h"
26 #include "llvm/IR/Intrinsics.h"
27 #include "llvm/IR/IntrinsicsX86.h"
28 #include "llvm/IR/Type.h"
29 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/KnownBits.h"
33 #include <cstdint>
34 
35 using namespace llvm;
36 
37 #define DEBUG_TYPE "x86-isel"
38 
39 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
40 
41 static cl::opt<bool> AndImmShrink("x86-and-imm-shrink", cl::init(true),
42  cl::desc("Enable setting constant bits to reduce size of mask immediates"),
43  cl::Hidden);
44 
46  "x86-promote-anyext-load", cl::init(true),
47  cl::desc("Enable promoting aligned anyext load to wider load"), cl::Hidden);
48 
50 
51 //===----------------------------------------------------------------------===//
52 // Pattern Matcher Implementation
53 //===----------------------------------------------------------------------===//
54 
55 namespace {
56  /// This corresponds to X86AddressMode, but uses SDValue's instead of register
57  /// numbers for the leaves of the matched tree.
58  struct X86ISelAddressMode {
59  enum {
60  RegBase,
61  FrameIndexBase
62  } BaseType;
63 
64  // This is really a union, discriminated by BaseType!
65  SDValue Base_Reg;
66  int Base_FrameIndex;
67 
68  unsigned Scale;
69  SDValue IndexReg;
70  int32_t Disp;
71  SDValue Segment;
72  const GlobalValue *GV;
73  const Constant *CP;
74  const BlockAddress *BlockAddr;
75  const char *ES;
76  MCSymbol *MCSym;
77  int JT;
78  Align Alignment; // CP alignment.
79  unsigned char SymbolFlags; // X86II::MO_*
80  bool NegateIndex = false;
81 
82  X86ISelAddressMode()
83  : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
84  Segment(), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr),
85  MCSym(nullptr), JT(-1), SymbolFlags(X86II::MO_NO_FLAG) {}
86 
87  bool hasSymbolicDisplacement() const {
88  return GV != nullptr || CP != nullptr || ES != nullptr ||
89  MCSym != nullptr || JT != -1 || BlockAddr != nullptr;
90  }
91 
92  bool hasBaseOrIndexReg() const {
93  return BaseType == FrameIndexBase ||
94  IndexReg.getNode() != nullptr || Base_Reg.getNode() != nullptr;
95  }
96 
97  /// Return true if this addressing mode is already RIP-relative.
98  bool isRIPRelative() const {
99  if (BaseType != RegBase) return false;
100  if (RegisterSDNode *RegNode =
101  dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
102  return RegNode->getReg() == X86::RIP;
103  return false;
104  }
105 
106  void setBaseReg(SDValue Reg) {
107  BaseType = RegBase;
108  Base_Reg = Reg;
109  }
110 
111 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
112  void dump(SelectionDAG *DAG = nullptr) {
113  dbgs() << "X86ISelAddressMode " << this << '\n';
114  dbgs() << "Base_Reg ";
115  if (Base_Reg.getNode())
116  Base_Reg.getNode()->dump(DAG);
117  else
118  dbgs() << "nul\n";
119  if (BaseType == FrameIndexBase)
120  dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n';
121  dbgs() << " Scale " << Scale << '\n'
122  << "IndexReg ";
123  if (NegateIndex)
124  dbgs() << "negate ";
125  if (IndexReg.getNode())
126  IndexReg.getNode()->dump(DAG);
127  else
128  dbgs() << "nul\n";
129  dbgs() << " Disp " << Disp << '\n'
130  << "GV ";
131  if (GV)
132  GV->dump();
133  else
134  dbgs() << "nul";
135  dbgs() << " CP ";
136  if (CP)
137  CP->dump();
138  else
139  dbgs() << "nul";
140  dbgs() << '\n'
141  << "ES ";
142  if (ES)
143  dbgs() << ES;
144  else
145  dbgs() << "nul";
146  dbgs() << " MCSym ";
147  if (MCSym)
148  dbgs() << MCSym;
149  else
150  dbgs() << "nul";
151  dbgs() << " JT" << JT << " Align" << Alignment.value() << '\n';
152  }
153 #endif
154  };
155 }
156 
157 namespace {
158  //===--------------------------------------------------------------------===//
159  /// ISel - X86-specific code to select X86 machine instructions for
160  /// SelectionDAG operations.
161  ///
162  class X86DAGToDAGISel final : public SelectionDAGISel {
163  /// Keep a pointer to the X86Subtarget around so that we can
164  /// make the right decision when generating code for different targets.
165  const X86Subtarget *Subtarget;
166 
167  /// If true, selector should try to optimize for minimum code size.
168  bool OptForMinSize;
169 
170  /// Disable direct TLS access through segment registers.
171  bool IndirectTlsSegRefs;
172 
173  public:
174  explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
175  : SelectionDAGISel(tm, OptLevel), Subtarget(nullptr),
176  OptForMinSize(false), IndirectTlsSegRefs(false) {}
177 
178  StringRef getPassName() const override {
179  return "X86 DAG->DAG Instruction Selection";
180  }
181 
182  bool runOnMachineFunction(MachineFunction &MF) override {
183  // Reset the subtarget each time through.
184  Subtarget = &MF.getSubtarget<X86Subtarget>();
185  IndirectTlsSegRefs = MF.getFunction().hasFnAttribute(
186  "indirect-tls-seg-refs");
187 
188  // OptFor[Min]Size are used in pattern predicates that isel is matching.
189  OptForMinSize = MF.getFunction().hasMinSize();
190  assert((!OptForMinSize || MF.getFunction().hasOptSize()) &&
191  "OptForMinSize implies OptForSize");
192 
194  return true;
195  }
196 
197  void emitFunctionEntryCode() override;
198 
199  bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override;
200 
201  void PreprocessISelDAG() override;
202  void PostprocessISelDAG() override;
203 
204 // Include the pieces autogenerated from the target description.
205 #include "X86GenDAGISel.inc"
206 
207  private:
208  void Select(SDNode *N) override;
209 
210  bool foldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
211  bool matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM,
212  bool AllowSegmentRegForX32 = false);
213  bool matchWrapper(SDValue N, X86ISelAddressMode &AM);
214  bool matchAddress(SDValue N, X86ISelAddressMode &AM);
215  bool matchVectorAddress(SDValue N, X86ISelAddressMode &AM);
216  bool matchAdd(SDValue &N, X86ISelAddressMode &AM, unsigned Depth);
217  bool matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
218  unsigned Depth);
219  bool matchAddressBase(SDValue N, X86ISelAddressMode &AM);
220  bool selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
221  SDValue &Scale, SDValue &Index, SDValue &Disp,
222  SDValue &Segment);
223  bool selectVectorAddr(MemSDNode *Parent, SDValue BasePtr, SDValue IndexOp,
224  SDValue ScaleOp, SDValue &Base, SDValue &Scale,
225  SDValue &Index, SDValue &Disp, SDValue &Segment);
226  bool selectMOV64Imm32(SDValue N, SDValue &Imm);
227  bool selectLEAAddr(SDValue N, SDValue &Base,
228  SDValue &Scale, SDValue &Index, SDValue &Disp,
229  SDValue &Segment);
230  bool selectLEA64_32Addr(SDValue N, SDValue &Base,
231  SDValue &Scale, SDValue &Index, SDValue &Disp,
232  SDValue &Segment);
233  bool selectTLSADDRAddr(SDValue N, SDValue &Base,
234  SDValue &Scale, SDValue &Index, SDValue &Disp,
235  SDValue &Segment);
236  bool selectRelocImm(SDValue N, SDValue &Op);
237 
238  bool tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
239  SDValue &Base, SDValue &Scale,
240  SDValue &Index, SDValue &Disp,
241  SDValue &Segment);
242 
243  // Convenience method where P is also root.
244  bool tryFoldLoad(SDNode *P, SDValue N,
245  SDValue &Base, SDValue &Scale,
246  SDValue &Index, SDValue &Disp,
247  SDValue &Segment) {
248  return tryFoldLoad(P, P, N, Base, Scale, Index, Disp, Segment);
249  }
250 
251  bool tryFoldBroadcast(SDNode *Root, SDNode *P, SDValue N,
252  SDValue &Base, SDValue &Scale,
253  SDValue &Index, SDValue &Disp,
254  SDValue &Segment);
255 
256  bool isProfitableToFormMaskedOp(SDNode *N) const;
257 
258  /// Implement addressing mode selection for inline asm expressions.
259  bool SelectInlineAsmMemoryOperand(const SDValue &Op,
260  unsigned ConstraintID,
261  std::vector<SDValue> &OutOps) override;
262 
263  void emitSpecialCodeForMain();
264 
265  inline void getAddressOperands(X86ISelAddressMode &AM, const SDLoc &DL,
266  MVT VT, SDValue &Base, SDValue &Scale,
267  SDValue &Index, SDValue &Disp,
268  SDValue &Segment) {
269  if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
270  Base = CurDAG->getTargetFrameIndex(
271  AM.Base_FrameIndex, TLI->getPointerTy(CurDAG->getDataLayout()));
272  else if (AM.Base_Reg.getNode())
273  Base = AM.Base_Reg;
274  else
275  Base = CurDAG->getRegister(0, VT);
276 
277  Scale = getI8Imm(AM.Scale, DL);
278 
279  // Negate the index if needed.
280  if (AM.NegateIndex) {
281  unsigned NegOpc = VT == MVT::i64 ? X86::NEG64r : X86::NEG32r;
282  SDValue Neg = SDValue(CurDAG->getMachineNode(NegOpc, DL, VT, MVT::i32,
283  AM.IndexReg), 0);
284  AM.IndexReg = Neg;
285  }
286 
287  if (AM.IndexReg.getNode())
288  Index = AM.IndexReg;
289  else
290  Index = CurDAG->getRegister(0, VT);
291 
292  // These are 32-bit even in 64-bit mode since RIP-relative offset
293  // is 32-bit.
294  if (AM.GV)
295  Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(),
296  MVT::i32, AM.Disp,
297  AM.SymbolFlags);
298  else if (AM.CP)
299  Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, AM.Alignment,
300  AM.Disp, AM.SymbolFlags);
301  else if (AM.ES) {
302  assert(!AM.Disp && "Non-zero displacement is ignored with ES.");
303  Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
304  } else if (AM.MCSym) {
305  assert(!AM.Disp && "Non-zero displacement is ignored with MCSym.");
306  assert(AM.SymbolFlags == 0 && "oo");
307  Disp = CurDAG->getMCSymbol(AM.MCSym, MVT::i32);
308  } else if (AM.JT != -1) {
309  assert(!AM.Disp && "Non-zero displacement is ignored with JT.");
310  Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
311  } else if (AM.BlockAddr)
312  Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp,
313  AM.SymbolFlags);
314  else
315  Disp = CurDAG->getTargetConstant(AM.Disp, DL, MVT::i32);
316 
317  if (AM.Segment.getNode())
318  Segment = AM.Segment;
319  else
320  Segment = CurDAG->getRegister(0, MVT::i16);
321  }
322 
323  // Utility function to determine whether we should avoid selecting
324  // immediate forms of instructions for better code size or not.
325  // At a high level, we'd like to avoid such instructions when
326  // we have similar constants used within the same basic block
327  // that can be kept in a register.
328  //
329  bool shouldAvoidImmediateInstFormsForSize(SDNode *N) const {
330  uint32_t UseCount = 0;
331 
332  // Do not want to hoist if we're not optimizing for size.
333  // TODO: We'd like to remove this restriction.
334  // See the comment in X86InstrInfo.td for more info.
335  if (!CurDAG->shouldOptForSize())
336  return false;
337 
338  // Walk all the users of the immediate.
339  for (SDNode::use_iterator UI = N->use_begin(),
340  UE = N->use_end(); (UI != UE) && (UseCount < 2); ++UI) {
341 
342  SDNode *User = *UI;
343 
344  // This user is already selected. Count it as a legitimate use and
345  // move on.
346  if (User->isMachineOpcode()) {
347  UseCount++;
348  continue;
349  }
350 
351  // We want to count stores of immediates as real uses.
352  if (User->getOpcode() == ISD::STORE &&
353  User->getOperand(1).getNode() == N) {
354  UseCount++;
355  continue;
356  }
357 
358  // We don't currently match users that have > 2 operands (except
359  // for stores, which are handled above)
360  // Those instruction won't match in ISEL, for now, and would
361  // be counted incorrectly.
362  // This may change in the future as we add additional instruction
363  // types.
364  if (User->getNumOperands() != 2)
365  continue;
366 
367  // If this is a sign-extended 8-bit integer immediate used in an ALU
368  // instruction, there is probably an opcode encoding to save space.
369  auto *C = dyn_cast<ConstantSDNode>(N);
370  if (C && isInt<8>(C->getSExtValue()))
371  continue;
372 
373  // Immediates that are used for offsets as part of stack
374  // manipulation should be left alone. These are typically
375  // used to indicate SP offsets for argument passing and
376  // will get pulled into stores/pushes (implicitly).
377  if (User->getOpcode() == X86ISD::ADD ||
378  User->getOpcode() == ISD::ADD ||
379  User->getOpcode() == X86ISD::SUB ||
380  User->getOpcode() == ISD::SUB) {
381 
382  // Find the other operand of the add/sub.
383  SDValue OtherOp = User->getOperand(0);
384  if (OtherOp.getNode() == N)
385  OtherOp = User->getOperand(1);
386 
387  // Don't count if the other operand is SP.
388  RegisterSDNode *RegNode;
389  if (OtherOp->getOpcode() == ISD::CopyFromReg &&
390  (RegNode = dyn_cast_or_null<RegisterSDNode>(
391  OtherOp->getOperand(1).getNode())))
392  if ((RegNode->getReg() == X86::ESP) ||
393  (RegNode->getReg() == X86::RSP))
394  continue;
395  }
396 
397  // ... otherwise, count this and move on.
398  UseCount++;
399  }
400 
401  // If we have more than 1 use, then recommend for hoisting.
402  return (UseCount > 1);
403  }
404 
405  /// Return a target constant with the specified value of type i8.
406  inline SDValue getI8Imm(unsigned Imm, const SDLoc &DL) {
407  return CurDAG->getTargetConstant(Imm, DL, MVT::i8);
408  }
409 
410  /// Return a target constant with the specified value, of type i32.
411  inline SDValue getI32Imm(unsigned Imm, const SDLoc &DL) {
412  return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
413  }
414 
415  /// Return a target constant with the specified value, of type i64.
416  inline SDValue getI64Imm(uint64_t Imm, const SDLoc &DL) {
417  return CurDAG->getTargetConstant(Imm, DL, MVT::i64);
418  }
419 
420  SDValue getExtractVEXTRACTImmediate(SDNode *N, unsigned VecWidth,
421  const SDLoc &DL) {
422  assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width");
423  uint64_t Index = N->getConstantOperandVal(1);
424  MVT VecVT = N->getOperand(0).getSimpleValueType();
425  return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
426  }
427 
428  SDValue getInsertVINSERTImmediate(SDNode *N, unsigned VecWidth,
429  const SDLoc &DL) {
430  assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width");
431  uint64_t Index = N->getConstantOperandVal(2);
432  MVT VecVT = N->getSimpleValueType(0);
433  return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
434  }
435 
436  // Helper to detect unneeded and instructions on shift amounts. Called
437  // from PatFrags in tablegen.
438  bool isUnneededShiftMask(SDNode *N, unsigned Width) const {
439  assert(N->getOpcode() == ISD::AND && "Unexpected opcode");
440  const APInt &Val = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue();
441 
442  if (Val.countTrailingOnes() >= Width)
443  return true;
444 
445  APInt Mask = Val | CurDAG->computeKnownBits(N->getOperand(0)).Zero;
446  return Mask.countTrailingOnes() >= Width;
447  }
448 
449  /// Return an SDNode that returns the value of the global base register.
450  /// Output instructions required to initialize the global base register,
451  /// if necessary.
452  SDNode *getGlobalBaseReg();
453 
454  /// Return a reference to the TargetMachine, casted to the target-specific
455  /// type.
456  const X86TargetMachine &getTargetMachine() const {
457  return static_cast<const X86TargetMachine &>(TM);
458  }
459 
460  /// Return a reference to the TargetInstrInfo, casted to the target-specific
461  /// type.
462  const X86InstrInfo *getInstrInfo() const {
463  return Subtarget->getInstrInfo();
464  }
465 
466  /// Address-mode matching performs shift-of-and to and-of-shift
467  /// reassociation in order to expose more scaled addressing
468  /// opportunities.
469  bool ComplexPatternFuncMutatesDAG() const override {
470  return true;
471  }
472 
473  bool isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const;
474 
475  // Indicates we should prefer to use a non-temporal load for this load.
476  bool useNonTemporalLoad(LoadSDNode *N) const {
477  if (!N->isNonTemporal())
478  return false;
479 
480  unsigned StoreSize = N->getMemoryVT().getStoreSize();
481 
482  if (N->getAlignment() < StoreSize)
483  return false;
484 
485  switch (StoreSize) {
486  default: llvm_unreachable("Unsupported store size");
487  case 4:
488  case 8:
489  return false;
490  case 16:
491  return Subtarget->hasSSE41();
492  case 32:
493  return Subtarget->hasAVX2();
494  case 64:
495  return Subtarget->hasAVX512();
496  }
497  }
498 
499  bool foldLoadStoreIntoMemOperand(SDNode *Node);
500  MachineSDNode *matchBEXTRFromAndImm(SDNode *Node);
501  bool matchBitExtract(SDNode *Node);
502  bool shrinkAndImmediate(SDNode *N);
503  bool isMaskZeroExtended(SDNode *N) const;
504  bool tryShiftAmountMod(SDNode *N);
505  bool tryShrinkShlLogicImm(SDNode *N);
506  bool tryVPTERNLOG(SDNode *N);
507  bool matchVPTERNLOG(SDNode *Root, SDNode *ParentA, SDNode *ParentBC,
508  SDValue A, SDValue B, SDValue C, uint8_t Imm);
509  bool tryVPTESTM(SDNode *Root, SDValue Setcc, SDValue Mask);
510  bool tryMatchBitSelect(SDNode *N);
511 
512  MachineSDNode *emitPCMPISTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
513  const SDLoc &dl, MVT VT, SDNode *Node);
514  MachineSDNode *emitPCMPESTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
515  const SDLoc &dl, MVT VT, SDNode *Node,
516  SDValue &InFlag);
517 
518  bool tryOptimizeRem8Extend(SDNode *N);
519 
520  bool onlyUsesZeroFlag(SDValue Flags) const;
521  bool hasNoSignFlagUses(SDValue Flags) const;
522  bool hasNoCarryFlagUses(SDValue Flags) const;
523  };
524 }
525 
526 
527 // Returns true if this masked compare can be implemented legally with this
528 // type.
529 static bool isLegalMaskCompare(SDNode *N, const X86Subtarget *Subtarget) {
530  unsigned Opcode = N->getOpcode();
531  if (Opcode == X86ISD::CMPM || Opcode == X86ISD::CMPMM ||
532  Opcode == X86ISD::STRICT_CMPM || Opcode == ISD::SETCC ||
533  Opcode == X86ISD::CMPMM_SAE || Opcode == X86ISD::VFPCLASS) {
534  // We can get 256-bit 8 element types here without VLX being enabled. When
535  // this happens we will use 512-bit operations and the mask will not be
536  // zero extended.
537  EVT OpVT = N->getOperand(0).getValueType();
538  // The first operand of X86ISD::STRICT_CMPM is chain, so we need to get the
539  // second operand.
540  if (Opcode == X86ISD::STRICT_CMPM)
541  OpVT = N->getOperand(1).getValueType();
542  if (OpVT.is256BitVector() || OpVT.is128BitVector())
543  return Subtarget->hasVLX();
544 
545  return true;
546  }
547  // Scalar opcodes use 128 bit registers, but aren't subject to the VLX check.
548  if (Opcode == X86ISD::VFPCLASSS || Opcode == X86ISD::FSETCCM ||
549  Opcode == X86ISD::FSETCCM_SAE)
550  return true;
551 
552  return false;
553 }
554 
555 // Returns true if we can assume the writer of the mask has zero extended it
556 // for us.
557 bool X86DAGToDAGISel::isMaskZeroExtended(SDNode *N) const {
558  // If this is an AND, check if we have a compare on either side. As long as
559  // one side guarantees the mask is zero extended, the AND will preserve those
560  // zeros.
561  if (N->getOpcode() == ISD::AND)
562  return isLegalMaskCompare(N->getOperand(0).getNode(), Subtarget) ||
563  isLegalMaskCompare(N->getOperand(1).getNode(), Subtarget);
564 
565  return isLegalMaskCompare(N, Subtarget);
566 }
567 
568 bool
569 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
570  if (OptLevel == CodeGenOpt::None) return false;
571 
572  if (!N.hasOneUse())
573  return false;
574 
575  if (N.getOpcode() != ISD::LOAD)
576  return true;
577 
578  // Don't fold non-temporal loads if we have an instruction for them.
579  if (useNonTemporalLoad(cast<LoadSDNode>(N)))
580  return false;
581 
582  // If N is a load, do additional profitability checks.
583  if (U == Root) {
584  switch (U->getOpcode()) {
585  default: break;
586  case X86ISD::ADD:
587  case X86ISD::ADC:
588  case X86ISD::SUB:
589  case X86ISD::SBB:
590  case X86ISD::AND:
591  case X86ISD::XOR:
592  case X86ISD::OR:
593  case ISD::ADD:
594  case ISD::ADDCARRY:
595  case ISD::AND:
596  case ISD::OR:
597  case ISD::XOR: {
598  SDValue Op1 = U->getOperand(1);
599 
600  // If the other operand is a 8-bit immediate we should fold the immediate
601  // instead. This reduces code size.
602  // e.g.
603  // movl 4(%esp), %eax
604  // addl $4, %eax
605  // vs.
606  // movl $4, %eax
607  // addl 4(%esp), %eax
608  // The former is 2 bytes shorter. In case where the increment is 1, then
609  // the saving can be 4 bytes (by using incl %eax).
610  if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) {
611  if (Imm->getAPIntValue().isSignedIntN(8))
612  return false;
613 
614  // If this is a 64-bit AND with an immediate that fits in 32-bits,
615  // prefer using the smaller and over folding the load. This is needed to
616  // make sure immediates created by shrinkAndImmediate are always folded.
617  // Ideally we would narrow the load during DAG combine and get the
618  // best of both worlds.
619  if (U->getOpcode() == ISD::AND &&
620  Imm->getAPIntValue().getBitWidth() == 64 &&
621  Imm->getAPIntValue().isIntN(32))
622  return false;
623 
624  // If this really a zext_inreg that can be represented with a movzx
625  // instruction, prefer that.
626  // TODO: We could shrink the load and fold if it is non-volatile.
627  if (U->getOpcode() == ISD::AND &&
628  (Imm->getAPIntValue() == UINT8_MAX ||
629  Imm->getAPIntValue() == UINT16_MAX ||
630  Imm->getAPIntValue() == UINT32_MAX))
631  return false;
632 
633  // ADD/SUB with can negate the immediate and use the opposite operation
634  // to fit 128 into a sign extended 8 bit immediate.
635  if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB) &&
636  (-Imm->getAPIntValue()).isSignedIntN(8))
637  return false;
638 
639  if ((U->getOpcode() == X86ISD::ADD || U->getOpcode() == X86ISD::SUB) &&
640  (-Imm->getAPIntValue()).isSignedIntN(8) &&
641  hasNoCarryFlagUses(SDValue(U, 1)))
642  return false;
643  }
644 
645  // If the other operand is a TLS address, we should fold it instead.
646  // This produces
647  // movl %gs:0, %eax
648  // leal i@NTPOFF(%eax), %eax
649  // instead of
650  // movl $i@NTPOFF, %eax
651  // addl %gs:0, %eax
652  // if the block also has an access to a second TLS address this will save
653  // a load.
654  // FIXME: This is probably also true for non-TLS addresses.
655  if (Op1.getOpcode() == X86ISD::Wrapper) {
656  SDValue Val = Op1.getOperand(0);
658  return false;
659  }
660 
661  // Don't fold load if this matches the BTS/BTR/BTC patterns.
662  // BTS: (or X, (shl 1, n))
663  // BTR: (and X, (rotl -2, n))
664  // BTC: (xor X, (shl 1, n))
665  if (U->getOpcode() == ISD::OR || U->getOpcode() == ISD::XOR) {
666  if (U->getOperand(0).getOpcode() == ISD::SHL &&
668  return false;
669 
670  if (U->getOperand(1).getOpcode() == ISD::SHL &&
672  return false;
673  }
674  if (U->getOpcode() == ISD::AND) {
675  SDValue U0 = U->getOperand(0);
676  SDValue U1 = U->getOperand(1);
677  if (U0.getOpcode() == ISD::ROTL) {
678  auto *C = dyn_cast<ConstantSDNode>(U0.getOperand(0));
679  if (C && C->getSExtValue() == -2)
680  return false;
681  }
682 
683  if (U1.getOpcode() == ISD::ROTL) {
684  auto *C = dyn_cast<ConstantSDNode>(U1.getOperand(0));
685  if (C && C->getSExtValue() == -2)
686  return false;
687  }
688  }
689 
690  break;
691  }
692  case ISD::SHL:
693  case ISD::SRA:
694  case ISD::SRL:
695  // Don't fold a load into a shift by immediate. The BMI2 instructions
696  // support folding a load, but not an immediate. The legacy instructions
697  // support folding an immediate, but can't fold a load. Folding an
698  // immediate is preferable to folding a load.
699  if (isa<ConstantSDNode>(U->getOperand(1)))
700  return false;
701 
702  break;
703  }
704  }
705 
706  // Prevent folding a load if this can implemented with an insert_subreg or
707  // a move that implicitly zeroes.
708  if (Root->getOpcode() == ISD::INSERT_SUBVECTOR &&
709  isNullConstant(Root->getOperand(2)) &&
710  (Root->getOperand(0).isUndef() ||
712  return false;
713 
714  return true;
715 }
716 
717 // Indicates it is profitable to form an AVX512 masked operation. Returning
718 // false will favor a masked register-register masked move or vblendm and the
719 // operation will be selected separately.
720 bool X86DAGToDAGISel::isProfitableToFormMaskedOp(SDNode *N) const {
721  assert(
722  (N->getOpcode() == ISD::VSELECT || N->getOpcode() == X86ISD::SELECTS) &&
723  "Unexpected opcode!");
724 
725  // If the operation has additional users, the operation will be duplicated.
726  // Check the use count to prevent that.
727  // FIXME: Are there cheap opcodes we might want to duplicate?
728  return N->getOperand(1).hasOneUse();
729 }
730 
731 /// Replace the original chain operand of the call with
732 /// load's chain operand and move load below the call's chain operand.
734  SDValue Call, SDValue OrigChain) {
736  SDValue Chain = OrigChain.getOperand(0);
737  if (Chain.getNode() == Load.getNode())
738  Ops.push_back(Load.getOperand(0));
739  else {
740  assert(Chain.getOpcode() == ISD::TokenFactor &&
741  "Unexpected chain operand");
742  for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
743  if (Chain.getOperand(i).getNode() == Load.getNode())
744  Ops.push_back(Load.getOperand(0));
745  else
746  Ops.push_back(Chain.getOperand(i));
747  SDValue NewChain =
748  CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, Ops);
749  Ops.clear();
750  Ops.push_back(NewChain);
751  }
752  Ops.append(OrigChain->op_begin() + 1, OrigChain->op_end());
753  CurDAG->UpdateNodeOperands(OrigChain.getNode(), Ops);
754  CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
755  Load.getOperand(1), Load.getOperand(2));
756 
757  Ops.clear();
758  Ops.push_back(SDValue(Load.getNode(), 1));
759  Ops.append(Call->op_begin() + 1, Call->op_end());
760  CurDAG->UpdateNodeOperands(Call.getNode(), Ops);
761 }
762 
763 /// Return true if call address is a load and it can be
764 /// moved below CALLSEQ_START and the chains leading up to the call.
765 /// Return the CALLSEQ_START by reference as a second output.
766 /// In the case of a tail call, there isn't a callseq node between the call
767 /// chain and the load.
768 static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
769  // The transformation is somewhat dangerous if the call's chain was glued to
770  // the call. After MoveBelowOrigChain the load is moved between the call and
771  // the chain, this can create a cycle if the load is not folded. So it is
772  // *really* important that we are sure the load will be folded.
773  if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
774  return false;
775  LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
776  if (!LD ||
777  !LD->isSimple() ||
778  LD->getAddressingMode() != ISD::UNINDEXED ||
779  LD->getExtensionType() != ISD::NON_EXTLOAD)
780  return false;
781 
782  // Now let's find the callseq_start.
783  while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
784  if (!Chain.hasOneUse())
785  return false;
786  Chain = Chain.getOperand(0);
787  }
788 
789  if (!Chain.getNumOperands())
790  return false;
791  // Since we are not checking for AA here, conservatively abort if the chain
792  // writes to memory. It's not safe to move the callee (a load) across a store.
793  if (isa<MemSDNode>(Chain.getNode()) &&
794  cast<MemSDNode>(Chain.getNode())->writeMem())
795  return false;
796  if (Chain.getOperand(0).getNode() == Callee.getNode())
797  return true;
798  if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
799  Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
800  Callee.getValue(1).hasOneUse())
801  return true;
802  return false;
803 }
804 
805 static bool isEndbrImm64(uint64_t Imm) {
806 // There may be some other prefix bytes between 0xF3 and 0x0F1EFA.
807 // i.g: 0xF3660F1EFA, 0xF3670F1EFA
808  if ((Imm & 0x00FFFFFF) != 0x0F1EFA)
809  return false;
810 
811  uint8_t OptionalPrefixBytes [] = {0x26, 0x2e, 0x36, 0x3e, 0x64,
812  0x65, 0x66, 0x67, 0xf0, 0xf2};
813  int i = 24; // 24bit 0x0F1EFA has matched
814  while (i < 64) {
815  uint8_t Byte = (Imm >> i) & 0xFF;
816  if (Byte == 0xF3)
817  return true;
818  if (!llvm::is_contained(OptionalPrefixBytes, Byte))
819  return false;
820  i += 8;
821  }
822 
823  return false;
824 }
825 
826 void X86DAGToDAGISel::PreprocessISelDAG() {
827  bool MadeChange = false;
828  for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
829  E = CurDAG->allnodes_end(); I != E; ) {
830  SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
831 
832  // This is for CET enhancement.
833  //
834  // ENDBR32 and ENDBR64 have specific opcodes:
835  // ENDBR32: F3 0F 1E FB
836  // ENDBR64: F3 0F 1E FA
837  // And we want that attackers won’t find unintended ENDBR32/64
838  // opcode matches in the binary
839  // Here’s an example:
840  // If the compiler had to generate asm for the following code:
841  // a = 0xF30F1EFA
842  // it could, for example, generate:
843  // mov 0xF30F1EFA, dword ptr[a]
844  // In such a case, the binary would include a gadget that starts
845  // with a fake ENDBR64 opcode. Therefore, we split such generation
846  // into multiple operations, let it not shows in the binary
847  if (N->getOpcode() == ISD::Constant) {
848  MVT VT = N->getSimpleValueType(0);
849  int64_t Imm = cast<ConstantSDNode>(N)->getSExtValue();
850  int32_t EndbrImm = Subtarget->is64Bit() ? 0xF30F1EFA : 0xF30F1EFB;
851  if (Imm == EndbrImm || isEndbrImm64(Imm)) {
852  // Check that the cf-protection-branch is enabled.
853  Metadata *CFProtectionBranch =
854  MF->getMMI().getModule()->getModuleFlag("cf-protection-branch");
855  if (CFProtectionBranch || IndirectBranchTracking) {
856  SDLoc dl(N);
857  SDValue Complement = CurDAG->getConstant(~Imm, dl, VT, false, true);
858  Complement = CurDAG->getNOT(dl, Complement, VT);
859  --I;
860  CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Complement);
861  ++I;
862  MadeChange = true;
863  continue;
864  }
865  }
866  }
867 
868  // If this is a target specific AND node with no flag usages, turn it back
869  // into ISD::AND to enable test instruction matching.
870  if (N->getOpcode() == X86ISD::AND && !N->hasAnyUseOfValue(1)) {
871  SDValue Res = CurDAG->getNode(ISD::AND, SDLoc(N), N->getValueType(0),
872  N->getOperand(0), N->getOperand(1));
873  --I;
874  CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
875  ++I;
876  MadeChange = true;
877  continue;
878  }
879 
880  /// Convert vector increment or decrement to sub/add with an all-ones
881  /// constant:
882  /// add X, <1, 1...> --> sub X, <-1, -1...>
883  /// sub X, <1, 1...> --> add X, <-1, -1...>
884  /// The all-ones vector constant can be materialized using a pcmpeq
885  /// instruction that is commonly recognized as an idiom (has no register
886  /// dependency), so that's better/smaller than loading a splat 1 constant.
887  if ((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
888  N->getSimpleValueType(0).isVector()) {
889 
890  APInt SplatVal;
891  if (X86::isConstantSplat(N->getOperand(1), SplatVal) &&
892  SplatVal.isOneValue()) {
893  SDLoc DL(N);
894 
895  MVT VT = N->getSimpleValueType(0);
896  unsigned NumElts = VT.getSizeInBits() / 32;
897  SDValue AllOnes =
898  CurDAG->getAllOnesConstant(DL, MVT::getVectorVT(MVT::i32, NumElts));
899  AllOnes = CurDAG->getBitcast(VT, AllOnes);
900 
901  unsigned NewOpcode = N->getOpcode() == ISD::ADD ? ISD::SUB : ISD::ADD;
902  SDValue Res =
903  CurDAG->getNode(NewOpcode, DL, VT, N->getOperand(0), AllOnes);
904  --I;
905  CurDAG->ReplaceAllUsesWith(N, Res.getNode());
906  ++I;
907  MadeChange = true;
908  continue;
909  }
910  }
911 
912  switch (N->getOpcode()) {
913  case X86ISD::VBROADCAST: {
914  MVT VT = N->getSimpleValueType(0);
915  // Emulate v32i16/v64i8 broadcast without BWI.
916  if (!Subtarget->hasBWI() && (VT == MVT::v32i16 || VT == MVT::v64i8)) {
917  MVT NarrowVT = VT == MVT::v32i16 ? MVT::v16i16 : MVT::v32i8;
918  SDLoc dl(N);
919  SDValue NarrowBCast =
920  CurDAG->getNode(X86ISD::VBROADCAST, dl, NarrowVT, N->getOperand(0));
921  SDValue Res =
922  CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, CurDAG->getUNDEF(VT),
923  NarrowBCast, CurDAG->getIntPtrConstant(0, dl));
924  unsigned Index = VT == MVT::v32i16 ? 16 : 32;
925  Res = CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, NarrowBCast,
926  CurDAG->getIntPtrConstant(Index, dl));
927 
928  --I;
929  CurDAG->ReplaceAllUsesWith(N, Res.getNode());
930  ++I;
931  MadeChange = true;
932  continue;
933  }
934 
935  break;
936  }
938  MVT VT = N->getSimpleValueType(0);
939  // Emulate v32i16/v64i8 broadcast without BWI.
940  if (!Subtarget->hasBWI() && (VT == MVT::v32i16 || VT == MVT::v64i8)) {
941  MVT NarrowVT = VT == MVT::v32i16 ? MVT::v16i16 : MVT::v32i8;
942  auto *MemNode = cast<MemSDNode>(N);
943  SDLoc dl(N);
944  SDVTList VTs = CurDAG->getVTList(NarrowVT, MVT::Other);
945  SDValue Ops[] = {MemNode->getChain(), MemNode->getBasePtr()};
946  SDValue NarrowBCast = CurDAG->getMemIntrinsicNode(
947  X86ISD::VBROADCAST_LOAD, dl, VTs, Ops, MemNode->getMemoryVT(),
948  MemNode->getMemOperand());
949  SDValue Res =
950  CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, CurDAG->getUNDEF(VT),
951  NarrowBCast, CurDAG->getIntPtrConstant(0, dl));
952  unsigned Index = VT == MVT::v32i16 ? 16 : 32;
953  Res = CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, NarrowBCast,
954  CurDAG->getIntPtrConstant(Index, dl));
955 
956  --I;
957  SDValue To[] = {Res, NarrowBCast.getValue(1)};
958  CurDAG->ReplaceAllUsesWith(N, To);
959  ++I;
960  MadeChange = true;
961  continue;
962  }
963 
964  break;
965  }
966  case ISD::VSELECT: {
967  // Replace VSELECT with non-mask conditions with with BLENDV.
968  if (N->getOperand(0).getValueType().getVectorElementType() == MVT::i1)
969  break;
970 
971  assert(Subtarget->hasSSE41() && "Expected SSE4.1 support!");
972  SDValue Blendv =
973  CurDAG->getNode(X86ISD::BLENDV, SDLoc(N), N->getValueType(0),
974  N->getOperand(0), N->getOperand(1), N->getOperand(2));
975  --I;
976  CurDAG->ReplaceAllUsesWith(N, Blendv.getNode());
977  ++I;
978  MadeChange = true;
979  continue;
980  }
981  case ISD::FP_ROUND:
983  case ISD::FP_TO_SINT:
984  case ISD::FP_TO_UINT:
986  case ISD::STRICT_FP_TO_UINT: {
987  // Replace vector fp_to_s/uint with their X86 specific equivalent so we
988  // don't need 2 sets of patterns.
989  if (!N->getSimpleValueType(0).isVector())
990  break;
991 
992  unsigned NewOpc;
993  switch (N->getOpcode()) {
994  default: llvm_unreachable("Unexpected opcode!");
995  case ISD::FP_ROUND: NewOpc = X86ISD::VFPROUND; break;
996  case ISD::STRICT_FP_ROUND: NewOpc = X86ISD::STRICT_VFPROUND; break;
997  case ISD::STRICT_FP_TO_SINT: NewOpc = X86ISD::STRICT_CVTTP2SI; break;
998  case ISD::FP_TO_SINT: NewOpc = X86ISD::CVTTP2SI; break;
999  case ISD::STRICT_FP_TO_UINT: NewOpc = X86ISD::STRICT_CVTTP2UI; break;
1000  case ISD::FP_TO_UINT: NewOpc = X86ISD::CVTTP2UI; break;
1001  }
1002  SDValue Res;
1003  if (N->isStrictFPOpcode())
1004  Res =
1005  CurDAG->getNode(NewOpc, SDLoc(N), {N->getValueType(0), MVT::Other},
1006  {N->getOperand(0), N->getOperand(1)});
1007  else
1008  Res =
1009  CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
1010  N->getOperand(0));
1011  --I;
1012  CurDAG->ReplaceAllUsesWith(N, Res.getNode());
1013  ++I;
1014  MadeChange = true;
1015  continue;
1016  }
1017  case ISD::SHL:
1018  case ISD::SRA:
1019  case ISD::SRL: {
1020  // Replace vector shifts with their X86 specific equivalent so we don't
1021  // need 2 sets of patterns.
1022  if (!N->getValueType(0).isVector())
1023  break;
1024 
1025  unsigned NewOpc;
1026  switch (N->getOpcode()) {
1027  default: llvm_unreachable("Unexpected opcode!");
1028  case ISD::SHL: NewOpc = X86ISD::VSHLV; break;
1029  case ISD::SRA: NewOpc = X86ISD::VSRAV; break;
1030  case ISD::SRL: NewOpc = X86ISD::VSRLV; break;
1031  }
1032  SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
1033  N->getOperand(0), N->getOperand(1));
1034  --I;
1035  CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
1036  ++I;
1037  MadeChange = true;
1038  continue;
1039  }
1040  case ISD::ANY_EXTEND:
1042  // Replace vector any extend with the zero extend equivalents so we don't
1043  // need 2 sets of patterns. Ignore vXi1 extensions.
1044  if (!N->getValueType(0).isVector())
1045  break;
1046 
1047  unsigned NewOpc;
1048  if (N->getOperand(0).getScalarValueSizeInBits() == 1) {
1049  assert(N->getOpcode() == ISD::ANY_EXTEND &&
1050  "Unexpected opcode for mask vector!");
1051  NewOpc = ISD::SIGN_EXTEND;
1052  } else {
1053  NewOpc = N->getOpcode() == ISD::ANY_EXTEND
1056  }
1057 
1058  SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
1059  N->getOperand(0));
1060  --I;
1061  CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
1062  ++I;
1063  MadeChange = true;
1064  continue;
1065  }
1066  case ISD::FCEIL:
1067  case ISD::STRICT_FCEIL:
1068  case ISD::FFLOOR:
1069  case ISD::STRICT_FFLOOR:
1070  case ISD::FTRUNC:
1071  case ISD::STRICT_FTRUNC:
1072  case ISD::FROUNDEVEN:
1074  case ISD::FNEARBYINT:
1076  case ISD::FRINT:
1077  case ISD::STRICT_FRINT: {
1078  // Replace fp rounding with their X86 specific equivalent so we don't
1079  // need 2 sets of patterns.
1080  unsigned Imm;
1081  switch (N->getOpcode()) {
1082  default: llvm_unreachable("Unexpected opcode!");
1083  case ISD::STRICT_FCEIL:
1084  case ISD::FCEIL: Imm = 0xA; break;
1085  case ISD::STRICT_FFLOOR:
1086  case ISD::FFLOOR: Imm = 0x9; break;
1087  case ISD::STRICT_FTRUNC:
1088  case ISD::FTRUNC: Imm = 0xB; break;
1090  case ISD::FROUNDEVEN: Imm = 0x8; break;
1092  case ISD::FNEARBYINT: Imm = 0xC; break;
1093  case ISD::STRICT_FRINT:
1094  case ISD::FRINT: Imm = 0x4; break;
1095  }
1096  SDLoc dl(N);
1097  bool IsStrict = N->isStrictFPOpcode();
1098  SDValue Res;
1099  if (IsStrict)
1100  Res = CurDAG->getNode(X86ISD::STRICT_VRNDSCALE, dl,
1101  {N->getValueType(0), MVT::Other},
1102  {N->getOperand(0), N->getOperand(1),
1103  CurDAG->getTargetConstant(Imm, dl, MVT::i32)});
1104  else
1105  Res = CurDAG->getNode(X86ISD::VRNDSCALE, dl, N->getValueType(0),
1106  N->getOperand(0),
1107  CurDAG->getTargetConstant(Imm, dl, MVT::i32));
1108  --I;
1109  CurDAG->ReplaceAllUsesWith(N, Res.getNode());
1110  ++I;
1111  MadeChange = true;
1112  continue;
1113  }
1114  case X86ISD::FANDN:
1115  case X86ISD::FAND:
1116  case X86ISD::FOR:
1117  case X86ISD::FXOR: {
1118  // Widen scalar fp logic ops to vector to reduce isel patterns.
1119  // FIXME: Can we do this during lowering/combine.
1120  MVT VT = N->getSimpleValueType(0);
1121  if (VT.isVector() || VT == MVT::f128)
1122  break;
1123 
1124  MVT VecVT = VT == MVT::f64 ? MVT::v2f64 : MVT::v4f32;
1125  SDLoc dl(N);
1126  SDValue Op0 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT,
1127  N->getOperand(0));
1128  SDValue Op1 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT,
1129  N->getOperand(1));
1130 
1131  SDValue Res;
1132  if (Subtarget->hasSSE2()) {
1133  EVT IntVT = EVT(VecVT).changeVectorElementTypeToInteger();
1134  Op0 = CurDAG->getNode(ISD::BITCAST, dl, IntVT, Op0);
1135  Op1 = CurDAG->getNode(ISD::BITCAST, dl, IntVT, Op1);
1136  unsigned Opc;
1137  switch (N->getOpcode()) {
1138  default: llvm_unreachable("Unexpected opcode!");
1139  case X86ISD::FANDN: Opc = X86ISD::ANDNP; break;
1140  case X86ISD::FAND: Opc = ISD::AND; break;
1141  case X86ISD::FOR: Opc = ISD::OR; break;
1142  case X86ISD::FXOR: Opc = ISD::XOR; break;
1143  }
1144  Res = CurDAG->getNode(Opc, dl, IntVT, Op0, Op1);
1145  Res = CurDAG->getNode(ISD::BITCAST, dl, VecVT, Res);
1146  } else {
1147  Res = CurDAG->getNode(N->getOpcode(), dl, VecVT, Op0, Op1);
1148  }
1149  Res = CurDAG->getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res,
1150  CurDAG->getIntPtrConstant(0, dl));
1151  --I;
1152  CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
1153  ++I;
1154  MadeChange = true;
1155  continue;
1156  }
1157  }
1158 
1159  if (OptLevel != CodeGenOpt::None &&
1160  // Only do this when the target can fold the load into the call or
1161  // jmp.
1162  !Subtarget->useIndirectThunkCalls() &&
1163  ((N->getOpcode() == X86ISD::CALL && !Subtarget->slowTwoMemOps()) ||
1164  (N->getOpcode() == X86ISD::TC_RETURN &&
1165  (Subtarget->is64Bit() ||
1166  !getTargetMachine().isPositionIndependent())))) {
1167  /// Also try moving call address load from outside callseq_start to just
1168  /// before the call to allow it to be folded.
1169  ///
1170  /// [Load chain]
1171  /// ^
1172  /// |
1173  /// [Load]
1174  /// ^ ^
1175  /// | |
1176  /// / \--
1177  /// / |
1178  ///[CALLSEQ_START] |
1179  /// ^ |
1180  /// | |
1181  /// [LOAD/C2Reg] |
1182  /// | |
1183  /// \ /
1184  /// \ /
1185  /// [CALL]
1186  bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
1187  SDValue Chain = N->getOperand(0);
1188  SDValue Load = N->getOperand(1);
1189  if (!isCalleeLoad(Load, Chain, HasCallSeq))
1190  continue;
1191  moveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
1192  ++NumLoadMoved;
1193  MadeChange = true;
1194  continue;
1195  }
1196 
1197  // Lower fpround and fpextend nodes that target the FP stack to be store and
1198  // load to the stack. This is a gross hack. We would like to simply mark
1199  // these as being illegal, but when we do that, legalize produces these when
1200  // it expands calls, then expands these in the same legalize pass. We would
1201  // like dag combine to be able to hack on these between the call expansion
1202  // and the node legalization. As such this pass basically does "really
1203  // late" legalization of these inline with the X86 isel pass.
1204  // FIXME: This should only happen when not compiled with -O0.
1205  switch (N->getOpcode()) {
1206  default: continue;
1207  case ISD::FP_ROUND:
1208  case ISD::FP_EXTEND:
1209  {
1210  MVT SrcVT = N->getOperand(0).getSimpleValueType();
1211  MVT DstVT = N->getSimpleValueType(0);
1212 
1213  // If any of the sources are vectors, no fp stack involved.
1214  if (SrcVT.isVector() || DstVT.isVector())
1215  continue;
1216 
1217  // If the source and destination are SSE registers, then this is a legal
1218  // conversion that should not be lowered.
1219  const X86TargetLowering *X86Lowering =
1220  static_cast<const X86TargetLowering *>(TLI);
1221  bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
1222  bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
1223  if (SrcIsSSE && DstIsSSE)
1224  continue;
1225 
1226  if (!SrcIsSSE && !DstIsSSE) {
1227  // If this is an FPStack extension, it is a noop.
1228  if (N->getOpcode() == ISD::FP_EXTEND)
1229  continue;
1230  // If this is a value-preserving FPStack truncation, it is a noop.
1231  if (N->getConstantOperandVal(1))
1232  continue;
1233  }
1234 
1235  // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
1236  // FPStack has extload and truncstore. SSE can fold direct loads into other
1237  // operations. Based on this, decide what we want to do.
1238  MVT MemVT = (N->getOpcode() == ISD::FP_ROUND) ? DstVT : SrcVT;
1239  SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
1240  int SPFI = cast<FrameIndexSDNode>(MemTmp)->getIndex();
1241  MachinePointerInfo MPI =
1242  MachinePointerInfo::getFixedStack(CurDAG->getMachineFunction(), SPFI);
1243  SDLoc dl(N);
1244 
1245  // FIXME: optimize the case where the src/dest is a load or store?
1246 
1247  SDValue Store = CurDAG->getTruncStore(
1248  CurDAG->getEntryNode(), dl, N->getOperand(0), MemTmp, MPI, MemVT);
1249  SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store,
1250  MemTmp, MPI, MemVT);
1251 
1252  // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
1253  // extload we created. This will cause general havok on the dag because
1254  // anything below the conversion could be folded into other existing nodes.
1255  // To avoid invalidating 'I', back it up to the convert node.
1256  --I;
1257  CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
1258  break;
1259  }
1260 
1261  //The sequence of events for lowering STRICT_FP versions of these nodes requires
1262  //dealing with the chain differently, as there is already a preexisting chain.
1263  case ISD::STRICT_FP_ROUND:
1264  case ISD::STRICT_FP_EXTEND:
1265  {
1266  MVT SrcVT = N->getOperand(1).getSimpleValueType();
1267  MVT DstVT = N->getSimpleValueType(0);
1268 
1269  // If any of the sources are vectors, no fp stack involved.
1270  if (SrcVT.isVector() || DstVT.isVector())
1271  continue;
1272 
1273  // If the source and destination are SSE registers, then this is a legal
1274  // conversion that should not be lowered.
1275  const X86TargetLowering *X86Lowering =
1276  static_cast<const X86TargetLowering *>(TLI);
1277  bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
1278  bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
1279  if (SrcIsSSE && DstIsSSE)
1280  continue;
1281 
1282  if (!SrcIsSSE && !DstIsSSE) {
1283  // If this is an FPStack extension, it is a noop.
1284  if (N->getOpcode() == ISD::STRICT_FP_EXTEND)
1285  continue;
1286  // If this is a value-preserving FPStack truncation, it is a noop.
1287  if (N->getConstantOperandVal(2))
1288  continue;
1289  }
1290 
1291  // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
1292  // FPStack has extload and truncstore. SSE can fold direct loads into other
1293  // operations. Based on this, decide what we want to do.
1294  MVT MemVT = (N->getOpcode() == ISD::STRICT_FP_ROUND) ? DstVT : SrcVT;
1295  SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
1296  int SPFI = cast<FrameIndexSDNode>(MemTmp)->getIndex();
1297  MachinePointerInfo MPI =
1298  MachinePointerInfo::getFixedStack(CurDAG->getMachineFunction(), SPFI);
1299  SDLoc dl(N);
1300 
1301  // FIXME: optimize the case where the src/dest is a load or store?
1302 
1303  //Since the operation is StrictFP, use the preexisting chain.
1304  SDValue Store, Result;
1305  if (!SrcIsSSE) {
1306  SDVTList VTs = CurDAG->getVTList(MVT::Other);
1307  SDValue Ops[] = {N->getOperand(0), N->getOperand(1), MemTmp};
1308  Store = CurDAG->getMemIntrinsicNode(X86ISD::FST, dl, VTs, Ops, MemVT,
1309  MPI, /*Align*/ None,
1311  if (N->getFlags().hasNoFPExcept()) {
1312  SDNodeFlags Flags = Store->getFlags();
1313  Flags.setNoFPExcept(true);
1314  Store->setFlags(Flags);
1315  }
1316  } else {
1317  assert(SrcVT == MemVT && "Unexpected VT!");
1318  Store = CurDAG->getStore(N->getOperand(0), dl, N->getOperand(1), MemTmp,
1319  MPI);
1320  }
1321 
1322  if (!DstIsSSE) {
1323  SDVTList VTs = CurDAG->getVTList(DstVT, MVT::Other);
1324  SDValue Ops[] = {Store, MemTmp};
1325  Result = CurDAG->getMemIntrinsicNode(
1326  X86ISD::FLD, dl, VTs, Ops, MemVT, MPI,
1327  /*Align*/ None, MachineMemOperand::MOLoad);
1328  if (N->getFlags().hasNoFPExcept()) {
1329  SDNodeFlags Flags = Result->getFlags();
1330  Flags.setNoFPExcept(true);
1331  Result->setFlags(Flags);
1332  }
1333  } else {
1334  assert(DstVT == MemVT && "Unexpected VT!");
1335  Result = CurDAG->getLoad(DstVT, dl, Store, MemTmp, MPI);
1336  }
1337 
1338  // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
1339  // extload we created. This will cause general havok on the dag because
1340  // anything below the conversion could be folded into other existing nodes.
1341  // To avoid invalidating 'I', back it up to the convert node.
1342  --I;
1343  CurDAG->ReplaceAllUsesWith(N, Result.getNode());
1344  break;
1345  }
1346  }
1347 
1348 
1349  // Now that we did that, the node is dead. Increment the iterator to the
1350  // next node to process, then delete N.
1351  ++I;
1352  MadeChange = true;
1353  }
1354 
1355  // Remove any dead nodes that may have been left behind.
1356  if (MadeChange)
1357  CurDAG->RemoveDeadNodes();
1358 }
1359 
1360 // Look for a redundant movzx/movsx that can occur after an 8-bit divrem.
1361 bool X86DAGToDAGISel::tryOptimizeRem8Extend(SDNode *N) {
1362  unsigned Opc = N->getMachineOpcode();
1363  if (Opc != X86::MOVZX32rr8 && Opc != X86::MOVSX32rr8 &&
1364  Opc != X86::MOVSX64rr8)
1365  return false;
1366 
1367  SDValue N0 = N->getOperand(0);
1368 
1369  // We need to be extracting the lower bit of an extend.
1370  if (!N0.isMachineOpcode() ||
1371  N0.getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG ||
1372  N0.getConstantOperandVal(1) != X86::sub_8bit)
1373  return false;
1374 
1375  // We're looking for either a movsx or movzx to match the original opcode.
1376  unsigned ExpectedOpc = Opc == X86::MOVZX32rr8 ? X86::MOVZX32rr8_NOREX
1377  : X86::MOVSX32rr8_NOREX;
1378  SDValue N00 = N0.getOperand(0);
1379  if (!N00.isMachineOpcode() || N00.getMachineOpcode() != ExpectedOpc)
1380  return false;
1381 
1382  if (Opc == X86::MOVSX64rr8) {
1383  // If we had a sign extend from 8 to 64 bits. We still need to go from 32
1384  // to 64.
1385  MachineSDNode *Extend = CurDAG->getMachineNode(X86::MOVSX64rr32, SDLoc(N),
1386  MVT::i64, N00);
1387  ReplaceUses(N, Extend);
1388  } else {
1389  // Ok we can drop this extend and just use the original extend.
1390  ReplaceUses(N, N00.getNode());
1391  }
1392 
1393  return true;
1394 }
1395 
1396 void X86DAGToDAGISel::PostprocessISelDAG() {
1397  // Skip peepholes at -O0.
1398  if (TM.getOptLevel() == CodeGenOpt::None)
1399  return;
1400 
1401  SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
1402 
1403  bool MadeChange = false;
1404  while (Position != CurDAG->allnodes_begin()) {
1405  SDNode *N = &*--Position;
1406  // Skip dead nodes and any non-machine opcodes.
1407  if (N->use_empty() || !N->isMachineOpcode())
1408  continue;
1409 
1410  if (tryOptimizeRem8Extend(N)) {
1411  MadeChange = true;
1412  continue;
1413  }
1414 
1415  // Look for a TESTrr+ANDrr pattern where both operands of the test are
1416  // the same. Rewrite to remove the AND.
1417  unsigned Opc = N->getMachineOpcode();
1418  if ((Opc == X86::TEST8rr || Opc == X86::TEST16rr ||
1419  Opc == X86::TEST32rr || Opc == X86::TEST64rr) &&
1420  N->getOperand(0) == N->getOperand(1) &&
1421  N->isOnlyUserOf(N->getOperand(0).getNode()) &&
1422  N->getOperand(0).isMachineOpcode()) {
1423  SDValue And = N->getOperand(0);
1424  unsigned N0Opc = And.getMachineOpcode();
1425  if (N0Opc == X86::AND8rr || N0Opc == X86::AND16rr ||
1426  N0Opc == X86::AND32rr || N0Opc == X86::AND64rr) {
1427  MachineSDNode *Test = CurDAG->getMachineNode(Opc, SDLoc(N),
1428  MVT::i32,
1429  And.getOperand(0),
1430  And.getOperand(1));
1431  ReplaceUses(N, Test);
1432  MadeChange = true;
1433  continue;
1434  }
1435  if (N0Opc == X86::AND8rm || N0Opc == X86::AND16rm ||
1436  N0Opc == X86::AND32rm || N0Opc == X86::AND64rm) {
1437  unsigned NewOpc;
1438  switch (N0Opc) {
1439  case X86::AND8rm: NewOpc = X86::TEST8mr; break;
1440  case X86::AND16rm: NewOpc = X86::TEST16mr; break;
1441  case X86::AND32rm: NewOpc = X86::TEST32mr; break;
1442  case X86::AND64rm: NewOpc = X86::TEST64mr; break;
1443  }
1444 
1445  // Need to swap the memory and register operand.
1446  SDValue Ops[] = { And.getOperand(1),
1447  And.getOperand(2),
1448  And.getOperand(3),
1449  And.getOperand(4),
1450  And.getOperand(5),
1451  And.getOperand(0),
1452  And.getOperand(6) /* Chain */ };
1453  MachineSDNode *Test = CurDAG->getMachineNode(NewOpc, SDLoc(N),
1454  MVT::i32, MVT::Other, Ops);
1455  CurDAG->setNodeMemRefs(
1456  Test, cast<MachineSDNode>(And.getNode())->memoperands());
1457  ReplaceUses(N, Test);
1458  MadeChange = true;
1459  continue;
1460  }
1461  }
1462 
1463  // Look for a KAND+KORTEST and turn it into KTEST if only the zero flag is
1464  // used. We're doing this late so we can prefer to fold the AND into masked
1465  // comparisons. Doing that can be better for the live range of the mask
1466  // register.
1467  if ((Opc == X86::KORTESTBrr || Opc == X86::KORTESTWrr ||
1468  Opc == X86::KORTESTDrr || Opc == X86::KORTESTQrr) &&
1469  N->getOperand(0) == N->getOperand(1) &&
1470  N->isOnlyUserOf(N->getOperand(0).getNode()) &&
1471  N->getOperand(0).isMachineOpcode() &&
1472  onlyUsesZeroFlag(SDValue(N, 0))) {
1473  SDValue And = N->getOperand(0);
1474  unsigned N0Opc = And.getMachineOpcode();
1475  // KANDW is legal with AVX512F, but KTESTW requires AVX512DQ. The other
1476  // KAND instructions and KTEST use the same ISA feature.
1477  if (N0Opc == X86::KANDBrr ||
1478  (N0Opc == X86::KANDWrr && Subtarget->hasDQI()) ||
1479  N0Opc == X86::KANDDrr || N0Opc == X86::KANDQrr) {
1480  unsigned NewOpc;
1481  switch (Opc) {
1482  default: llvm_unreachable("Unexpected opcode!");
1483  case X86::KORTESTBrr: NewOpc = X86::KTESTBrr; break;
1484  case X86::KORTESTWrr: NewOpc = X86::KTESTWrr; break;
1485  case X86::KORTESTDrr: NewOpc = X86::KTESTDrr; break;
1486  case X86::KORTESTQrr: NewOpc = X86::KTESTQrr; break;
1487  }
1488  MachineSDNode *KTest = CurDAG->getMachineNode(NewOpc, SDLoc(N),
1489  MVT::i32,
1490  And.getOperand(0),
1491  And.getOperand(1));
1492  ReplaceUses(N, KTest);
1493  MadeChange = true;
1494  continue;
1495  }
1496  }
1497 
1498  // Attempt to remove vectors moves that were inserted to zero upper bits.
1499  if (Opc != TargetOpcode::SUBREG_TO_REG)
1500  continue;
1501 
1502  unsigned SubRegIdx = N->getConstantOperandVal(2);
1503  if (SubRegIdx != X86::sub_xmm && SubRegIdx != X86::sub_ymm)
1504  continue;
1505 
1506  SDValue Move = N->getOperand(1);
1507  if (!Move.isMachineOpcode())
1508  continue;
1509 
1510  // Make sure its one of the move opcodes we recognize.
1511  switch (Move.getMachineOpcode()) {
1512  default:
1513  continue;
1514  case X86::VMOVAPDrr: case X86::VMOVUPDrr:
1515  case X86::VMOVAPSrr: case X86::VMOVUPSrr:
1516  case X86::VMOVDQArr: case X86::VMOVDQUrr:
1517  case X86::VMOVAPDYrr: case X86::VMOVUPDYrr:
1518  case X86::VMOVAPSYrr: case X86::VMOVUPSYrr:
1519  case X86::VMOVDQAYrr: case X86::VMOVDQUYrr:
1520  case X86::VMOVAPDZ128rr: case X86::VMOVUPDZ128rr:
1521  case X86::VMOVAPSZ128rr: case X86::VMOVUPSZ128rr:
1522  case X86::VMOVDQA32Z128rr: case X86::VMOVDQU32Z128rr:
1523  case X86::VMOVDQA64Z128rr: case X86::VMOVDQU64Z128rr:
1524  case X86::VMOVAPDZ256rr: case X86::VMOVUPDZ256rr:
1525  case X86::VMOVAPSZ256rr: case X86::VMOVUPSZ256rr:
1526  case X86::VMOVDQA32Z256rr: case X86::VMOVDQU32Z256rr:
1527  case X86::VMOVDQA64Z256rr: case X86::VMOVDQU64Z256rr:
1528  break;
1529  }
1530 
1531  SDValue In = Move.getOperand(0);
1532  if (!In.isMachineOpcode() ||
1533  In.getMachineOpcode() <= TargetOpcode::GENERIC_OP_END)
1534  continue;
1535 
1536  // Make sure the instruction has a VEX, XOP, or EVEX prefix. This covers
1537  // the SHA instructions which use a legacy encoding.
1538  uint64_t TSFlags = getInstrInfo()->get(In.getMachineOpcode()).TSFlags;
1539  if ((TSFlags & X86II::EncodingMask) != X86II::VEX &&
1540  (TSFlags & X86II::EncodingMask) != X86II::EVEX &&
1541  (TSFlags & X86II::EncodingMask) != X86II::XOP)
1542  continue;
1543 
1544  // Producing instruction is another vector instruction. We can drop the
1545  // move.
1546  CurDAG->UpdateNodeOperands(N, N->getOperand(0), In, N->getOperand(2));
1547  MadeChange = true;
1548  }
1549 
1550  if (MadeChange)
1551  CurDAG->RemoveDeadNodes();
1552 }
1553 
1554 
1555 /// Emit any code that needs to be executed only in the main function.
1556 void X86DAGToDAGISel::emitSpecialCodeForMain() {
1557  if (Subtarget->isTargetCygMing()) {
1559  auto &DL = CurDAG->getDataLayout();
1560 
1561  TargetLowering::CallLoweringInfo CLI(*CurDAG);
1562  CLI.setChain(CurDAG->getRoot())
1563  .setCallee(CallingConv::C, Type::getVoidTy(*CurDAG->getContext()),
1564  CurDAG->getExternalSymbol("__main", TLI->getPointerTy(DL)),
1565  std::move(Args));
1566  const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
1567  std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
1568  CurDAG->setRoot(Result.second);
1569  }
1570 }
1571 
1572 void X86DAGToDAGISel::emitFunctionEntryCode() {
1573  // If this is main, emit special code for main.
1574  const Function &F = MF->getFunction();
1575  if (F.hasExternalLinkage() && F.getName() == "main")
1576  emitSpecialCodeForMain();
1577 }
1578 
1579 static bool isDispSafeForFrameIndex(int64_t Val) {
1580  // On 64-bit platforms, we can run into an issue where a frame index
1581  // includes a displacement that, when added to the explicit displacement,
1582  // will overflow the displacement field. Assuming that the frame index
1583  // displacement fits into a 31-bit integer (which is only slightly more
1584  // aggressive than the current fundamental assumption that it fits into
1585  // a 32-bit integer), a 31-bit disp should always be safe.
1586  return isInt<31>(Val);
1587 }
1588 
1589 bool X86DAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset,
1590  X86ISelAddressMode &AM) {
1591  // We may have already matched a displacement and the caller just added the
1592  // symbolic displacement. So we still need to do the checks even if Offset
1593  // is zero.
1594 
1595  int64_t Val = AM.Disp + Offset;
1596 
1597  // Cannot combine ExternalSymbol displacements with integer offsets.
1598  if (Val != 0 && (AM.ES || AM.MCSym))
1599  return true;
1600 
1601  CodeModel::Model M = TM.getCodeModel();
1602  if (Subtarget->is64Bit()) {
1603  if (Val != 0 &&
1605  AM.hasSymbolicDisplacement()))
1606  return true;
1607  // In addition to the checks required for a register base, check that
1608  // we do not try to use an unsafe Disp with a frame index.
1609  if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
1611  return true;
1612  }
1613  AM.Disp = Val;
1614  return false;
1615 
1616 }
1617 
1618 bool X86DAGToDAGISel::matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM,
1619  bool AllowSegmentRegForX32) {
1620  SDValue Address = N->getOperand(1);
1621 
1622  // load gs:0 -> GS segment register.
1623  // load fs:0 -> FS segment register.
1624  //
1625  // This optimization is generally valid because the GNU TLS model defines that
1626  // gs:0 (or fs:0 on X86-64) contains its own address. However, for X86-64 mode
1627  // with 32-bit registers, as we get in ILP32 mode, those registers are first
1628  // zero-extended to 64 bits and then added it to the base address, which gives
1629  // unwanted results when the register holds a negative value.
1630  // For more information see http://people.redhat.com/drepper/tls.pdf
1631  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address)) {
1632  if (C->getSExtValue() == 0 && AM.Segment.getNode() == nullptr &&
1633  !IndirectTlsSegRefs &&
1634  (Subtarget->isTargetGlibc() || Subtarget->isTargetAndroid() ||
1635  Subtarget->isTargetFuchsia())) {
1636  if (Subtarget->isTarget64BitILP32() && !AllowSegmentRegForX32)
1637  return true;
1638  switch (N->getPointerInfo().getAddrSpace()) {
1639  case X86AS::GS:
1640  AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1641  return false;
1642  case X86AS::FS:
1643  AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1644  return false;
1645  // Address space X86AS::SS is not handled here, because it is not used to
1646  // address TLS areas.
1647  }
1648  }
1649  }
1650 
1651  return true;
1652 }
1653 
1654 /// Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes into an addressing
1655 /// mode. These wrap things that will resolve down into a symbol reference.
1656 /// If no match is possible, this returns true, otherwise it returns false.
1657 bool X86DAGToDAGISel::matchWrapper(SDValue N, X86ISelAddressMode &AM) {
1658  // If the addressing mode already has a symbol as the displacement, we can
1659  // never match another symbol.
1660  if (AM.hasSymbolicDisplacement())
1661  return true;
1662 
1663  bool IsRIPRelTLS = false;
1664  bool IsRIPRel = N.getOpcode() == X86ISD::WrapperRIP;
1665  if (IsRIPRel) {
1666  SDValue Val = N.getOperand(0);
1668  IsRIPRelTLS = true;
1669  }
1670 
1671  // We can't use an addressing mode in the 64-bit large code model.
1672  // Global TLS addressing is an exception. In the medium code model,
1673  // we use can use a mode when RIP wrappers are present.
1674  // That signifies access to globals that are known to be "near",
1675  // such as the GOT itself.
1676  CodeModel::Model M = TM.getCodeModel();
1677  if (Subtarget->is64Bit() &&
1678  ((M == CodeModel::Large && !IsRIPRelTLS) ||
1679  (M == CodeModel::Medium && !IsRIPRel)))
1680  return true;
1681 
1682  // Base and index reg must be 0 in order to use %rip as base.
1683  if (IsRIPRel && AM.hasBaseOrIndexReg())
1684  return true;
1685 
1686  // Make a local copy in case we can't do this fold.
1687  X86ISelAddressMode Backup = AM;
1688 
1689  int64_t Offset = 0;
1690  SDValue N0 = N.getOperand(0);
1691  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
1692  AM.GV = G->getGlobal();
1693  AM.SymbolFlags = G->getTargetFlags();
1694  Offset = G->getOffset();
1695  } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
1696  AM.CP = CP->getConstVal();
1697  AM.Alignment = CP->getAlign();
1698  AM.SymbolFlags = CP->getTargetFlags();
1699  Offset = CP->getOffset();
1700  } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
1701  AM.ES = S->getSymbol();
1702  AM.SymbolFlags = S->getTargetFlags();
1703  } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
1704  AM.MCSym = S->getMCSymbol();
1705  } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
1706  AM.JT = J->getIndex();
1707  AM.SymbolFlags = J->getTargetFlags();
1708  } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
1709  AM.BlockAddr = BA->getBlockAddress();
1710  AM.SymbolFlags = BA->getTargetFlags();
1711  Offset = BA->getOffset();
1712  } else
1713  llvm_unreachable("Unhandled symbol reference node.");
1714 
1715  if (foldOffsetIntoAddress(Offset, AM)) {
1716  AM = Backup;
1717  return true;
1718  }
1719 
1720  if (IsRIPRel)
1721  AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
1722 
1723  // Commit the changes now that we know this fold is safe.
1724  return false;
1725 }
1726 
1727 /// Add the specified node to the specified addressing mode, returning true if
1728 /// it cannot be done. This just pattern matches for the addressing mode.
1729 bool X86DAGToDAGISel::matchAddress(SDValue N, X86ISelAddressMode &AM) {
1730  if (matchAddressRecursively(N, AM, 0))
1731  return true;
1732 
1733  // Post-processing: Make a second attempt to fold a load, if we now know
1734  // that there will not be any other register. This is only performed for
1735  // 64-bit ILP32 mode since 32-bit mode and 64-bit LP64 mode will have folded
1736  // any foldable load the first time.
1737  if (Subtarget->isTarget64BitILP32() &&
1738  AM.BaseType == X86ISelAddressMode::RegBase &&
1739  AM.Base_Reg.getNode() != nullptr && AM.IndexReg.getNode() == nullptr) {
1740  SDValue Save_Base_Reg = AM.Base_Reg;
1741  if (auto *LoadN = dyn_cast<LoadSDNode>(Save_Base_Reg)) {
1742  AM.Base_Reg = SDValue();
1743  if (matchLoadInAddress(LoadN, AM, /*AllowSegmentRegForX32=*/true))
1744  AM.Base_Reg = Save_Base_Reg;
1745  }
1746  }
1747 
1748  // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
1749  // a smaller encoding and avoids a scaled-index.
1750  if (AM.Scale == 2 &&
1751  AM.BaseType == X86ISelAddressMode::RegBase &&
1752  AM.Base_Reg.getNode() == nullptr) {
1753  AM.Base_Reg = AM.IndexReg;
1754  AM.Scale = 1;
1755  }
1756 
1757  // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
1758  // because it has a smaller encoding.
1759  // TODO: Which other code models can use this?
1760  switch (TM.getCodeModel()) {
1761  default: break;
1762  case CodeModel::Small:
1763  case CodeModel::Kernel:
1764  if (Subtarget->is64Bit() &&
1765  AM.Scale == 1 &&
1766  AM.BaseType == X86ISelAddressMode::RegBase &&
1767  AM.Base_Reg.getNode() == nullptr &&
1768  AM.IndexReg.getNode() == nullptr &&
1769  AM.SymbolFlags == X86II::MO_NO_FLAG &&
1770  AM.hasSymbolicDisplacement())
1771  AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
1772  break;
1773  }
1774 
1775  return false;
1776 }
1777 
1778 bool X86DAGToDAGISel::matchAdd(SDValue &N, X86ISelAddressMode &AM,
1779  unsigned Depth) {
1780  // Add an artificial use to this node so that we can keep track of
1781  // it if it gets CSE'd with a different node.
1782  HandleSDNode Handle(N);
1783 
1784  X86ISelAddressMode Backup = AM;
1785  if (!matchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1786  !matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
1787  return false;
1788  AM = Backup;
1789 
1790  // Try again after commutating the operands.
1791  if (!matchAddressRecursively(Handle.getValue().getOperand(1), AM,
1792  Depth + 1) &&
1793  !matchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth + 1))
1794  return false;
1795  AM = Backup;
1796 
1797  // If we couldn't fold both operands into the address at the same time,
1798  // see if we can just put each operand into a register and fold at least
1799  // the add.
1800  if (AM.BaseType == X86ISelAddressMode::RegBase &&
1801  !AM.Base_Reg.getNode() &&
1802  !AM.IndexReg.getNode()) {
1803  N = Handle.getValue();
1804  AM.Base_Reg = N.getOperand(0);
1805  AM.IndexReg = N.getOperand(1);
1806  AM.Scale = 1;
1807  return false;
1808  }
1809  N = Handle.getValue();
1810  return true;
1811 }
1812 
1813 // Insert a node into the DAG at least before the Pos node's position. This
1814 // will reposition the node as needed, and will assign it a node ID that is <=
1815 // the Pos node's ID. Note that this does *not* preserve the uniqueness of node
1816 // IDs! The selection DAG must no longer depend on their uniqueness when this
1817 // is used.
1818 static void insertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
1819  if (N->getNodeId() == -1 ||
1822  DAG.RepositionNode(Pos->getIterator(), N.getNode());
1823  // Mark Node as invalid for pruning as after this it may be a successor to a
1824  // selected node but otherwise be in the same position of Pos.
1825  // Conservatively mark it with the same -abs(Id) to assure node id
1826  // invariant is preserved.
1827  N->setNodeId(Pos->getNodeId());
1829  }
1830 }
1831 
1832 // Transform "(X >> (8-C1)) & (0xff << C1)" to "((X >> 8) & 0xff) << C1" if
1833 // safe. This allows us to convert the shift and and into an h-register
1834 // extract and a scaled index. Returns false if the simplification is
1835 // performed.
1837  uint64_t Mask,
1839  X86ISelAddressMode &AM) {
1840  if (Shift.getOpcode() != ISD::SRL ||
1841  !isa<ConstantSDNode>(Shift.getOperand(1)) ||
1842  !Shift.hasOneUse())
1843  return true;
1844 
1845  int ScaleLog = 8 - Shift.getConstantOperandVal(1);
1846  if (ScaleLog <= 0 || ScaleLog >= 4 ||
1847  Mask != (0xffu << ScaleLog))
1848  return true;
1849 
1850  MVT VT = N.getSimpleValueType();
1851  SDLoc DL(N);
1852  SDValue Eight = DAG.getConstant(8, DL, MVT::i8);
1853  SDValue NewMask = DAG.getConstant(0xff, DL, VT);
1854  SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
1855  SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
1856  SDValue ShlCount = DAG.getConstant(ScaleLog, DL, MVT::i8);
1857  SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
1858 
1859  // Insert the new nodes into the topological ordering. We must do this in
1860  // a valid topological ordering as nothing is going to go back and re-sort
1861  // these nodes. We continually insert before 'N' in sequence as this is
1862  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1863  // hierarchy left to express.
1864  insertDAGNode(DAG, N, Eight);
1865  insertDAGNode(DAG, N, Srl);
1866  insertDAGNode(DAG, N, NewMask);
1867  insertDAGNode(DAG, N, And);
1868  insertDAGNode(DAG, N, ShlCount);
1869  insertDAGNode(DAG, N, Shl);
1870  DAG.ReplaceAllUsesWith(N, Shl);
1871  DAG.RemoveDeadNode(N.getNode());
1872  AM.IndexReg = And;
1873  AM.Scale = (1 << ScaleLog);
1874  return false;
1875 }
1876 
1877 // Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
1878 // allows us to fold the shift into this addressing mode. Returns false if the
1879 // transform succeeded.
1881  X86ISelAddressMode &AM) {
1882  SDValue Shift = N.getOperand(0);
1883 
1884  // Use a signed mask so that shifting right will insert sign bits. These
1885  // bits will be removed when we shift the result left so it doesn't matter
1886  // what we use. This might allow a smaller immediate encoding.
1887  int64_t Mask = cast<ConstantSDNode>(N->getOperand(1))->getSExtValue();
1888 
1889  // If we have an any_extend feeding the AND, look through it to see if there
1890  // is a shift behind it. But only if the AND doesn't use the extended bits.
1891  // FIXME: Generalize this to other ANY_EXTEND than i32 to i64?
1892  bool FoundAnyExtend = false;
1893  if (Shift.getOpcode() == ISD::ANY_EXTEND && Shift.hasOneUse() &&
1894  Shift.getOperand(0).getSimpleValueType() == MVT::i32 &&
1895  isUInt<32>(Mask)) {
1896  FoundAnyExtend = true;
1897  Shift = Shift.getOperand(0);
1898  }
1899 
1900  if (Shift.getOpcode() != ISD::SHL ||
1901  !isa<ConstantSDNode>(Shift.getOperand(1)))
1902  return true;
1903 
1904  SDValue X = Shift.getOperand(0);
1905 
1906  // Not likely to be profitable if either the AND or SHIFT node has more
1907  // than one use (unless all uses are for address computation). Besides,
1908  // isel mechanism requires their node ids to be reused.
1909  if (!N.hasOneUse() || !Shift.hasOneUse())
1910  return true;
1911 
1912  // Verify that the shift amount is something we can fold.
1913  unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1914  if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
1915  return true;
1916 
1917  MVT VT = N.getSimpleValueType();
1918  SDLoc DL(N);
1919  if (FoundAnyExtend) {
1920  SDValue NewX = DAG.getNode(ISD::ANY_EXTEND, DL, VT, X);
1921  insertDAGNode(DAG, N, NewX);
1922  X = NewX;
1923  }
1924 
1925  SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, DL, VT);
1926  SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
1927  SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
1928 
1929  // Insert the new nodes into the topological ordering. We must do this in
1930  // a valid topological ordering as nothing is going to go back and re-sort
1931  // these nodes. We continually insert before 'N' in sequence as this is
1932  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1933  // hierarchy left to express.
1934  insertDAGNode(DAG, N, NewMask);
1935  insertDAGNode(DAG, N, NewAnd);
1936  insertDAGNode(DAG, N, NewShift);
1937  DAG.ReplaceAllUsesWith(N, NewShift);
1938  DAG.RemoveDeadNode(N.getNode());
1939 
1940  AM.Scale = 1 << ShiftAmt;
1941  AM.IndexReg = NewAnd;
1942  return false;
1943 }
1944 
1945 // Implement some heroics to detect shifts of masked values where the mask can
1946 // be replaced by extending the shift and undoing that in the addressing mode
1947 // scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
1948 // (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
1949 // the addressing mode. This results in code such as:
1950 //
1951 // int f(short *y, int *lookup_table) {
1952 // ...
1953 // return *y + lookup_table[*y >> 11];
1954 // }
1955 //
1956 // Turning into:
1957 // movzwl (%rdi), %eax
1958 // movl %eax, %ecx
1959 // shrl $11, %ecx
1960 // addl (%rsi,%rcx,4), %eax
1961 //
1962 // Instead of:
1963 // movzwl (%rdi), %eax
1964 // movl %eax, %ecx
1965 // shrl $9, %ecx
1966 // andl $124, %rcx
1967 // addl (%rsi,%rcx), %eax
1968 //
1969 // Note that this function assumes the mask is provided as a mask *after* the
1970 // value is shifted. The input chain may or may not match that, but computing
1971 // such a mask is trivial.
1973  uint64_t Mask,
1975  X86ISelAddressMode &AM) {
1976  if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
1977  !isa<ConstantSDNode>(Shift.getOperand(1)))
1978  return true;
1979 
1980  unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1981  unsigned MaskLZ = countLeadingZeros(Mask);
1982  unsigned MaskTZ = countTrailingZeros(Mask);
1983 
1984  // The amount of shift we're trying to fit into the addressing mode is taken
1985  // from the trailing zeros of the mask.
1986  unsigned AMShiftAmt = MaskTZ;
1987 
1988  // There is nothing we can do here unless the mask is removing some bits.
1989  // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
1990  if (AMShiftAmt == 0 || AMShiftAmt > 3) return true;
1991 
1992  // We also need to ensure that mask is a continuous run of bits.
1993  if (countTrailingOnes(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
1994 
1995  // Scale the leading zero count down based on the actual size of the value.
1996  // Also scale it down based on the size of the shift.
1997  unsigned ScaleDown = (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
1998  if (MaskLZ < ScaleDown)
1999  return true;
2000  MaskLZ -= ScaleDown;
2001 
2002  // The final check is to ensure that any masked out high bits of X are
2003  // already known to be zero. Otherwise, the mask has a semantic impact
2004  // other than masking out a couple of low bits. Unfortunately, because of
2005  // the mask, zero extensions will be removed from operands in some cases.
2006  // This code works extra hard to look through extensions because we can
2007  // replace them with zero extensions cheaply if necessary.
2008  bool ReplacingAnyExtend = false;
2009  if (X.getOpcode() == ISD::ANY_EXTEND) {
2010  unsigned ExtendBits = X.getSimpleValueType().getSizeInBits() -
2011  X.getOperand(0).getSimpleValueType().getSizeInBits();
2012  // Assume that we'll replace the any-extend with a zero-extend, and
2013  // narrow the search to the extended value.
2014  X = X.getOperand(0);
2015  MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
2016  ReplacingAnyExtend = true;
2017  }
2018  APInt MaskedHighBits =
2019  APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ);
2020  KnownBits Known = DAG.computeKnownBits(X);
2021  if (MaskedHighBits != Known.Zero) return true;
2022 
2023  // We've identified a pattern that can be transformed into a single shift
2024  // and an addressing mode. Make it so.
2025  MVT VT = N.getSimpleValueType();
2026  if (ReplacingAnyExtend) {
2027  assert(X.getValueType() != VT);
2028  // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
2029  SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X);
2030  insertDAGNode(DAG, N, NewX);
2031  X = NewX;
2032  }
2033  SDLoc DL(N);
2034  SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
2035  SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
2036  SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
2037  SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
2038 
2039  // Insert the new nodes into the topological ordering. We must do this in
2040  // a valid topological ordering as nothing is going to go back and re-sort
2041  // these nodes. We continually insert before 'N' in sequence as this is
2042  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
2043  // hierarchy left to express.
2044  insertDAGNode(DAG, N, NewSRLAmt);
2045  insertDAGNode(DAG, N, NewSRL);
2046  insertDAGNode(DAG, N, NewSHLAmt);
2047  insertDAGNode(DAG, N, NewSHL);
2048  DAG.ReplaceAllUsesWith(N, NewSHL);
2049  DAG.RemoveDeadNode(N.getNode());
2050 
2051  AM.Scale = 1 << AMShiftAmt;
2052  AM.IndexReg = NewSRL;
2053  return false;
2054 }
2055 
2056 // Transform "(X >> SHIFT) & (MASK << C1)" to
2057 // "((X >> (SHIFT + C1)) & (MASK)) << C1". Everything before the SHL will be
2058 // matched to a BEXTR later. Returns false if the simplification is performed.
2060  uint64_t Mask,
2062  X86ISelAddressMode &AM,
2063  const X86Subtarget &Subtarget) {
2064  if (Shift.getOpcode() != ISD::SRL ||
2065  !isa<ConstantSDNode>(Shift.getOperand(1)) ||
2066  !Shift.hasOneUse() || !N.hasOneUse())
2067  return true;
2068 
2069  // Only do this if BEXTR will be matched by matchBEXTRFromAndImm.
2070  if (!Subtarget.hasTBM() &&
2071  !(Subtarget.hasBMI() && Subtarget.hasFastBEXTR()))
2072  return true;
2073 
2074  // We need to ensure that mask is a continuous run of bits.
2075  if (!isShiftedMask_64(Mask)) return true;
2076 
2077  unsigned ShiftAmt = Shift.getConstantOperandVal(1);
2078 
2079  // The amount of shift we're trying to fit into the addressing mode is taken
2080  // from the trailing zeros of the mask.
2081  unsigned AMShiftAmt = countTrailingZeros(Mask);
2082 
2083  // There is nothing we can do here unless the mask is removing some bits.
2084  // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
2085  if (AMShiftAmt == 0 || AMShiftAmt > 3) return true;
2086 
2087  MVT VT = N.getSimpleValueType();
2088  SDLoc DL(N);
2089  SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
2090  SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
2091  SDValue NewMask = DAG.getConstant(Mask >> AMShiftAmt, DL, VT);
2092  SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, NewSRL, NewMask);
2093  SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
2094  SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewAnd, NewSHLAmt);
2095 
2096  // Insert the new nodes into the topological ordering. We must do this in
2097  // a valid topological ordering as nothing is going to go back and re-sort
2098  // these nodes. We continually insert before 'N' in sequence as this is
2099  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
2100  // hierarchy left to express.
2101  insertDAGNode(DAG, N, NewSRLAmt);
2102  insertDAGNode(DAG, N, NewSRL);
2103  insertDAGNode(DAG, N, NewMask);
2104  insertDAGNode(DAG, N, NewAnd);
2105  insertDAGNode(DAG, N, NewSHLAmt);
2106  insertDAGNode(DAG, N, NewSHL);
2107  DAG.ReplaceAllUsesWith(N, NewSHL);
2108  DAG.RemoveDeadNode(N.getNode());
2109 
2110  AM.Scale = 1 << AMShiftAmt;
2111  AM.IndexReg = NewAnd;
2112  return false;
2113 }
2114 
2115 bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
2116  unsigned Depth) {
2117  SDLoc dl(N);
2118  LLVM_DEBUG({
2119  dbgs() << "MatchAddress: ";
2120  AM.dump(CurDAG);
2121  });
2122  // Limit recursion.
2123  if (Depth > 5)
2124  return matchAddressBase(N, AM);
2125 
2126  // If this is already a %rip relative address, we can only merge immediates
2127  // into it. Instead of handling this in every case, we handle it here.
2128  // RIP relative addressing: %rip + 32-bit displacement!
2129  if (AM.isRIPRelative()) {
2130  // FIXME: JumpTable and ExternalSymbol address currently don't like
2131  // displacements. It isn't very important, but this should be fixed for
2132  // consistency.
2133  if (!(AM.ES || AM.MCSym) && AM.JT != -1)
2134  return true;
2135 
2136  if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
2137  if (!foldOffsetIntoAddress(Cst->getSExtValue(), AM))
2138  return false;
2139  return true;
2140  }
2141 
2142  switch (N.getOpcode()) {
2143  default: break;
2144  case ISD::LOCAL_RECOVER: {
2145  if (!AM.hasSymbolicDisplacement() && AM.Disp == 0)
2146  if (const auto *ESNode = dyn_cast<MCSymbolSDNode>(N.getOperand(0))) {
2147  // Use the symbol and don't prefix it.
2148  AM.MCSym = ESNode->getMCSymbol();
2149  return false;
2150  }
2151  break;
2152  }
2153  case ISD::Constant: {
2154  uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
2155  if (!foldOffsetIntoAddress(Val, AM))
2156  return false;
2157  break;
2158  }
2159 
2160  case X86ISD::Wrapper:
2161  case X86ISD::WrapperRIP:
2162  if (!matchWrapper(N, AM))
2163  return false;
2164  break;
2165 
2166  case ISD::LOAD:
2167  if (!matchLoadInAddress(cast<LoadSDNode>(N), AM))
2168  return false;
2169  break;
2170 
2171  case ISD::FrameIndex:
2172  if (AM.BaseType == X86ISelAddressMode::RegBase &&
2173  AM.Base_Reg.getNode() == nullptr &&
2174  (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
2175  AM.BaseType = X86ISelAddressMode::FrameIndexBase;
2176  AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
2177  return false;
2178  }
2179  break;
2180 
2181  case ISD::SHL:
2182  if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
2183  break;
2184 
2185  if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
2186  unsigned Val = CN->getZExtValue();
2187  // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
2188  // that the base operand remains free for further matching. If
2189  // the base doesn't end up getting used, a post-processing step
2190  // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
2191  if (Val == 1 || Val == 2 || Val == 3) {
2192  AM.Scale = 1 << Val;
2193  SDValue ShVal = N.getOperand(0);
2194 
2195  // Okay, we know that we have a scale by now. However, if the scaled
2196  // value is an add of something and a constant, we can fold the
2197  // constant into the disp field here.
2198  if (CurDAG->isBaseWithConstantOffset(ShVal)) {
2199  AM.IndexReg = ShVal.getOperand(0);
2200  ConstantSDNode *AddVal = cast<ConstantSDNode>(ShVal.getOperand(1));
2201  uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val;
2202  if (!foldOffsetIntoAddress(Disp, AM))
2203  return false;
2204  }
2205 
2206  AM.IndexReg = ShVal;
2207  return false;
2208  }
2209  }
2210  break;
2211 
2212  case ISD::SRL: {
2213  // Scale must not be used already.
2214  if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
2215 
2216  // We only handle up to 64-bit values here as those are what matter for
2217  // addressing mode optimizations.
2218  assert(N.getSimpleValueType().getSizeInBits() <= 64 &&
2219  "Unexpected value size!");
2220 
2221  SDValue And = N.getOperand(0);
2222  if (And.getOpcode() != ISD::AND) break;
2223  SDValue X = And.getOperand(0);
2224 
2225  // The mask used for the transform is expected to be post-shift, but we
2226  // found the shift first so just apply the shift to the mask before passing
2227  // it down.
2228  if (!isa<ConstantSDNode>(N.getOperand(1)) ||
2229  !isa<ConstantSDNode>(And.getOperand(1)))
2230  break;
2231  uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
2232 
2233  // Try to fold the mask and shift into the scale, and return false if we
2234  // succeed.
2235  if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
2236  return false;
2237  break;
2238  }
2239 
2240  case ISD::SMUL_LOHI:
2241  case ISD::UMUL_LOHI:
2242  // A mul_lohi where we need the low part can be folded as a plain multiply.
2243  if (N.getResNo() != 0) break;
2245  case ISD::MUL:
2246  case X86ISD::MUL_IMM:
2247  // X*[3,5,9] -> X+X*[2,4,8]
2248  if (AM.BaseType == X86ISelAddressMode::RegBase &&
2249  AM.Base_Reg.getNode() == nullptr &&
2250  AM.IndexReg.getNode() == nullptr) {
2251  if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1)))
2252  if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
2253  CN->getZExtValue() == 9) {
2254  AM.Scale = unsigned(CN->getZExtValue())-1;
2255 
2256  SDValue MulVal = N.getOperand(0);
2257  SDValue Reg;
2258 
2259  // Okay, we know that we have a scale by now. However, if the scaled
2260  // value is an add of something and a constant, we can fold the
2261  // constant into the disp field here.
2262  if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
2263  isa<ConstantSDNode>(MulVal.getOperand(1))) {
2264  Reg = MulVal.getOperand(0);
2265  ConstantSDNode *AddVal =
2266  cast<ConstantSDNode>(MulVal.getOperand(1));
2267  uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
2268  if (foldOffsetIntoAddress(Disp, AM))
2269  Reg = N.getOperand(0);
2270  } else {
2271  Reg = N.getOperand(0);
2272  }
2273 
2274  AM.IndexReg = AM.Base_Reg = Reg;
2275  return false;
2276  }
2277  }
2278  break;
2279 
2280  case ISD::SUB: {
2281  // Given A-B, if A can be completely folded into the address and
2282  // the index field with the index field unused, use -B as the index.
2283  // This is a win if a has multiple parts that can be folded into
2284  // the address. Also, this saves a mov if the base register has
2285  // other uses, since it avoids a two-address sub instruction, however
2286  // it costs an additional mov if the index register has other uses.
2287 
2288  // Add an artificial use to this node so that we can keep track of
2289  // it if it gets CSE'd with a different node.
2290  HandleSDNode Handle(N);
2291 
2292  // Test if the LHS of the sub can be folded.
2293  X86ISelAddressMode Backup = AM;
2294  if (matchAddressRecursively(N.getOperand(0), AM, Depth+1)) {
2295  N = Handle.getValue();
2296  AM = Backup;
2297  break;
2298  }
2299  N = Handle.getValue();
2300  // Test if the index field is free for use.
2301  if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
2302  AM = Backup;
2303  break;
2304  }
2305 
2306  int Cost = 0;
2307  SDValue RHS = N.getOperand(1);
2308  // If the RHS involves a register with multiple uses, this
2309  // transformation incurs an extra mov, due to the neg instruction
2310  // clobbering its operand.
2311  if (!RHS.getNode()->hasOneUse() ||
2312  RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
2313  RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
2314  RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
2315  (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
2316  RHS.getOperand(0).getValueType() == MVT::i32))
2317  ++Cost;
2318  // If the base is a register with multiple uses, this
2319  // transformation may save a mov.
2320  if ((AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode() &&
2321  !AM.Base_Reg.getNode()->hasOneUse()) ||
2322  AM.BaseType == X86ISelAddressMode::FrameIndexBase)
2323  --Cost;
2324  // If the folded LHS was interesting, this transformation saves
2325  // address arithmetic.
2326  if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
2327  ((AM.Disp != 0) && (Backup.Disp == 0)) +
2328  (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
2329  --Cost;
2330  // If it doesn't look like it may be an overall win, don't do it.
2331  if (Cost >= 0) {
2332  AM = Backup;
2333  break;
2334  }
2335 
2336  // Ok, the transformation is legal and appears profitable. Go for it.
2337  // Negation will be emitted later to avoid creating dangling nodes if this
2338  // was an unprofitable LEA.
2339  AM.IndexReg = RHS;
2340  AM.NegateIndex = true;
2341  AM.Scale = 1;
2342  return false;
2343  }
2344 
2345  case ISD::ADD:
2346  if (!matchAdd(N, AM, Depth))
2347  return false;
2348  break;
2349 
2350  case ISD::OR:
2351  // We want to look through a transform in InstCombine and DAGCombiner that
2352  // turns 'add' into 'or', so we can treat this 'or' exactly like an 'add'.
2353  // Example: (or (and x, 1), (shl y, 3)) --> (add (and x, 1), (shl y, 3))
2354  // An 'lea' can then be used to match the shift (multiply) and add:
2355  // and $1, %esi
2356  // lea (%rsi, %rdi, 8), %rax
2357  if (CurDAG->haveNoCommonBitsSet(N.getOperand(0), N.getOperand(1)) &&
2358  !matchAdd(N, AM, Depth))
2359  return false;
2360  break;
2361 
2362  case ISD::AND: {
2363  // Perform some heroic transforms on an and of a constant-count shift
2364  // with a constant to enable use of the scaled offset field.
2365 
2366  // Scale must not be used already.
2367  if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
2368 
2369  // We only handle up to 64-bit values here as those are what matter for
2370  // addressing mode optimizations.
2371  assert(N.getSimpleValueType().getSizeInBits() <= 64 &&
2372  "Unexpected value size!");
2373 
2374  if (!isa<ConstantSDNode>(N.getOperand(1)))
2375  break;
2376 
2377  if (N.getOperand(0).getOpcode() == ISD::SRL) {
2378  SDValue Shift = N.getOperand(0);
2379  SDValue X = Shift.getOperand(0);
2380 
2381  uint64_t Mask = N.getConstantOperandVal(1);
2382 
2383  // Try to fold the mask and shift into an extract and scale.
2384  if (!foldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
2385  return false;
2386 
2387  // Try to fold the mask and shift directly into the scale.
2388  if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
2389  return false;
2390 
2391  // Try to fold the mask and shift into BEXTR and scale.
2392  if (!foldMaskedShiftToBEXTR(*CurDAG, N, Mask, Shift, X, AM, *Subtarget))
2393  return false;
2394  }
2395 
2396  // Try to swap the mask and shift to place shifts which can be done as
2397  // a scale on the outside of the mask.
2398  if (!foldMaskedShiftToScaledMask(*CurDAG, N, AM))
2399  return false;
2400 
2401  break;
2402  }
2403  case ISD::ZERO_EXTEND: {
2404  // Try to widen a zexted shift left to the same size as its use, so we can
2405  // match the shift as a scale factor.
2406  if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
2407  break;
2408  if (N.getOperand(0).getOpcode() != ISD::SHL || !N.getOperand(0).hasOneUse())
2409  break;
2410 
2411  // Give up if the shift is not a valid scale factor [1,2,3].
2412  SDValue Shl = N.getOperand(0);
2413  auto *ShAmtC = dyn_cast<ConstantSDNode>(Shl.getOperand(1));
2414  if (!ShAmtC || ShAmtC->getZExtValue() > 3)
2415  break;
2416 
2417  // The narrow shift must only shift out zero bits (it must be 'nuw').
2418  // That makes it safe to widen to the destination type.
2420  ShAmtC->getZExtValue());
2421  if (!CurDAG->MaskedValueIsZero(Shl.getOperand(0), HighZeros))
2422  break;
2423 
2424  // zext (shl nuw i8 %x, C) to i32 --> shl (zext i8 %x to i32), (zext C)
2425  MVT VT = N.getSimpleValueType();
2426  SDLoc DL(N);
2427  SDValue Zext = CurDAG->getNode(ISD::ZERO_EXTEND, DL, VT, Shl.getOperand(0));
2428  SDValue NewShl = CurDAG->getNode(ISD::SHL, DL, VT, Zext, Shl.getOperand(1));
2429 
2430  // Convert the shift to scale factor.
2431  AM.Scale = 1 << ShAmtC->getZExtValue();
2432  AM.IndexReg = Zext;
2433 
2434  insertDAGNode(*CurDAG, N, Zext);
2435  insertDAGNode(*CurDAG, N, NewShl);
2436  CurDAG->ReplaceAllUsesWith(N, NewShl);
2437  CurDAG->RemoveDeadNode(N.getNode());
2438  return false;
2439  }
2440  }
2441 
2442  return matchAddressBase(N, AM);
2443 }
2444 
2445 /// Helper for MatchAddress. Add the specified node to the
2446 /// specified addressing mode without any further recursion.
2447 bool X86DAGToDAGISel::matchAddressBase(SDValue N, X86ISelAddressMode &AM) {
2448  // Is the base register already occupied?
2449  if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
2450  // If so, check to see if the scale index register is set.
2451  if (!AM.IndexReg.getNode()) {
2452  AM.IndexReg = N;
2453  AM.Scale = 1;
2454  return false;
2455  }
2456 
2457  // Otherwise, we cannot select it.
2458  return true;
2459  }
2460 
2461  // Default, generate it as a register.
2462  AM.BaseType = X86ISelAddressMode::RegBase;
2463  AM.Base_Reg = N;
2464  return false;
2465 }
2466 
2467 /// Helper for selectVectorAddr. Handles things that can be folded into a
2468 /// gather scatter address. The index register and scale should have already
2469 /// been handled.
2470 bool X86DAGToDAGISel::matchVectorAddress(SDValue N, X86ISelAddressMode &AM) {
2471  // TODO: Support other operations.
2472  switch (N.getOpcode()) {
2473  case ISD::Constant: {
2474  uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
2475  if (!foldOffsetIntoAddress(Val, AM))
2476  return false;
2477  break;
2478  }
2479  case X86ISD::Wrapper:
2480  if (!matchWrapper(N, AM))
2481  return false;
2482  break;
2483  }
2484 
2485  return matchAddressBase(N, AM);
2486 }
2487 
2488 bool X86DAGToDAGISel::selectVectorAddr(MemSDNode *Parent, SDValue BasePtr,
2489  SDValue IndexOp, SDValue ScaleOp,
2490  SDValue &Base, SDValue &Scale,
2491  SDValue &Index, SDValue &Disp,
2492  SDValue &Segment) {
2493  X86ISelAddressMode AM;
2494  AM.IndexReg = IndexOp;
2495  AM.Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
2496 
2497  unsigned AddrSpace = Parent->getPointerInfo().getAddrSpace();
2498  if (AddrSpace == X86AS::GS)
2499  AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
2500  if (AddrSpace == X86AS::FS)
2501  AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
2502  if (AddrSpace == X86AS::SS)
2503  AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
2504 
2505  SDLoc DL(BasePtr);
2506  MVT VT = BasePtr.getSimpleValueType();
2507 
2508  // Try to match into the base and displacement fields.
2509  if (matchVectorAddress(BasePtr, AM))
2510  return false;
2511 
2512  getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2513  return true;
2514 }
2515 
2516 /// Returns true if it is able to pattern match an addressing mode.
2517 /// It returns the operands which make up the maximal addressing mode it can
2518 /// match by reference.
2519 ///
2520 /// Parent is the parent node of the addr operand that is being matched. It
2521 /// is always a load, store, atomic node, or null. It is only null when
2522 /// checking memory operands for inline asm nodes.
2523 bool X86DAGToDAGISel::selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
2524  SDValue &Scale, SDValue &Index,
2525  SDValue &Disp, SDValue &Segment) {
2526  X86ISelAddressMode AM;
2527 
2528  if (Parent &&
2529  // This list of opcodes are all the nodes that have an "addr:$ptr" operand
2530  // that are not a MemSDNode, and thus don't have proper addrspace info.
2531  Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
2532  Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
2533  Parent->getOpcode() != X86ISD::TLSCALL && // Fixme
2534  Parent->getOpcode() != X86ISD::ENQCMD && // Fixme
2535  Parent->getOpcode() != X86ISD::ENQCMDS && // Fixme
2536  Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp
2537  Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp
2538  unsigned AddrSpace =
2539  cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
2540  if (AddrSpace == X86AS::GS)
2541  AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
2542  if (AddrSpace == X86AS::FS)
2543  AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
2544  if (AddrSpace == X86AS::SS)
2545  AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
2546  }
2547 
2548  // Save the DL and VT before calling matchAddress, it can invalidate N.
2549  SDLoc DL(N);
2550  MVT VT = N.getSimpleValueType();
2551 
2552  if (matchAddress(N, AM))
2553  return false;
2554 
2555  getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2556  return true;
2557 }
2558 
2559 bool X86DAGToDAGISel::selectMOV64Imm32(SDValue N, SDValue &Imm) {
2560  // In static codegen with small code model, we can get the address of a label
2561  // into a register with 'movl'
2562  if (N->getOpcode() != X86ISD::Wrapper)
2563  return false;
2564 
2565  N = N.getOperand(0);
2566 
2567  // At least GNU as does not accept 'movl' for TPOFF relocations.
2568  // FIXME: We could use 'movl' when we know we are targeting MC.
2569  if (N->getOpcode() == ISD::TargetGlobalTLSAddress)
2570  return false;
2571 
2572  Imm = N;
2573  if (N->getOpcode() != ISD::TargetGlobalAddress)
2574  return TM.getCodeModel() == CodeModel::Small;
2575 
2577  cast<GlobalAddressSDNode>(N)->getGlobal()->getAbsoluteSymbolRange();
2578  if (!CR)
2579  return TM.getCodeModel() == CodeModel::Small;
2580 
2581  return CR->getUnsignedMax().ult(1ull << 32);
2582 }
2583 
2584 bool X86DAGToDAGISel::selectLEA64_32Addr(SDValue N, SDValue &Base,
2585  SDValue &Scale, SDValue &Index,
2586  SDValue &Disp, SDValue &Segment) {
2587  // Save the debug loc before calling selectLEAAddr, in case it invalidates N.
2588  SDLoc DL(N);
2589 
2590  if (!selectLEAAddr(N, Base, Scale, Index, Disp, Segment))
2591  return false;
2592 
2593  RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base);
2594  if (RN && RN->getReg() == 0)
2595  Base = CurDAG->getRegister(0, MVT::i64);
2596  else if (Base.getValueType() == MVT::i32 && !isa<FrameIndexSDNode>(Base)) {
2597  // Base could already be %rip, particularly in the x32 ABI.
2598  SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL,
2599  MVT::i64), 0);
2600  Base = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef,
2601  Base);
2602  }
2603 
2604  RN = dyn_cast<RegisterSDNode>(Index);
2605  if (RN && RN->getReg() == 0)
2606  Index = CurDAG->getRegister(0, MVT::i64);
2607  else {
2608  assert(Index.getValueType() == MVT::i32 &&
2609  "Expect to be extending 32-bit registers for use in LEA");
2610  SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL,
2611  MVT::i64), 0);
2612  Index = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef,
2613  Index);
2614  }
2615 
2616  return true;
2617 }
2618 
2619 /// Calls SelectAddr and determines if the maximal addressing
2620 /// mode it matches can be cost effectively emitted as an LEA instruction.
2621 bool X86DAGToDAGISel::selectLEAAddr(SDValue N,
2622  SDValue &Base, SDValue &Scale,
2623  SDValue &Index, SDValue &Disp,
2624  SDValue &Segment) {
2625  X86ISelAddressMode AM;
2626 
2627  // Save the DL and VT before calling matchAddress, it can invalidate N.
2628  SDLoc DL(N);
2629  MVT VT = N.getSimpleValueType();
2630 
2631  // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
2632  // segments.
2633  SDValue Copy = AM.Segment;
2634  SDValue T = CurDAG->getRegister(0, MVT::i32);
2635  AM.Segment = T;
2636  if (matchAddress(N, AM))
2637  return false;
2638  assert (T == AM.Segment);
2639  AM.Segment = Copy;
2640 
2641  unsigned Complexity = 0;
2642  if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode())
2643  Complexity = 1;
2644  else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
2645  Complexity = 4;
2646 
2647  if (AM.IndexReg.getNode())
2648  Complexity++;
2649 
2650  // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
2651  // a simple shift.
2652  if (AM.Scale > 1)
2653  Complexity++;
2654 
2655  // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
2656  // to a LEA. This is determined with some experimentation but is by no means
2657  // optimal (especially for code size consideration). LEA is nice because of
2658  // its three-address nature. Tweak the cost function again when we can run
2659  // convertToThreeAddress() at register allocation time.
2660  if (AM.hasSymbolicDisplacement()) {
2661  // For X86-64, always use LEA to materialize RIP-relative addresses.
2662  if (Subtarget->is64Bit())
2663  Complexity = 4;
2664  else
2665  Complexity += 2;
2666  }
2667 
2668  // Heuristic: try harder to form an LEA from ADD if the operands set flags.
2669  // Unlike ADD, LEA does not affect flags, so we will be less likely to require
2670  // duplicating flag-producing instructions later in the pipeline.
2671  if (N.getOpcode() == ISD::ADD) {
2672  auto isMathWithFlags = [](SDValue V) {
2673  switch (V.getOpcode()) {
2674  case X86ISD::ADD:
2675  case X86ISD::SUB:
2676  case X86ISD::ADC:
2677  case X86ISD::SBB:
2678  /* TODO: These opcodes can be added safely, but we may want to justify
2679  their inclusion for different reasons (better for reg-alloc).
2680  case X86ISD::SMUL:
2681  case X86ISD::UMUL:
2682  case X86ISD::OR:
2683  case X86ISD::XOR:
2684  case X86ISD::AND:
2685  */
2686  // Value 1 is the flag output of the node - verify it's not dead.
2687  return !SDValue(V.getNode(), 1).use_empty();
2688  default:
2689  return false;
2690  }
2691  };
2692  // TODO: This could be an 'or' rather than 'and' to make the transform more
2693  // likely to happen. We might want to factor in whether there's a
2694  // load folding opportunity for the math op that disappears with LEA.
2695  if (isMathWithFlags(N.getOperand(0)) && isMathWithFlags(N.getOperand(1)))
2696  Complexity++;
2697  }
2698 
2699  if (AM.Disp)
2700  Complexity++;
2701 
2702  // If it isn't worth using an LEA, reject it.
2703  if (Complexity <= 2)
2704  return false;
2705 
2706  getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2707  return true;
2708 }
2709 
2710 /// This is only run on TargetGlobalTLSAddress nodes.
2711 bool X86DAGToDAGISel::selectTLSADDRAddr(SDValue N, SDValue &Base,
2712  SDValue &Scale, SDValue &Index,
2713  SDValue &Disp, SDValue &Segment) {
2714  assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
2715  const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
2716 
2717  X86ISelAddressMode AM;
2718  AM.GV = GA->getGlobal();
2719  AM.Disp += GA->getOffset();
2720  AM.SymbolFlags = GA->getTargetFlags();
2721 
2722  if (Subtarget->is32Bit()) {
2723  AM.Scale = 1;
2724  AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
2725  }
2726 
2727  MVT VT = N.getSimpleValueType();
2728  getAddressOperands(AM, SDLoc(N), VT, Base, Scale, Index, Disp, Segment);
2729  return true;
2730 }
2731 
2732 bool X86DAGToDAGISel::selectRelocImm(SDValue N, SDValue &Op) {
2733  // Keep track of the original value type and whether this value was
2734  // truncated. If we see a truncation from pointer type to VT that truncates
2735  // bits that are known to be zero, we can use a narrow reference.
2736  EVT VT = N.getValueType();
2737  bool WasTruncated = false;
2738  if (N.getOpcode() == ISD::TRUNCATE) {
2739  WasTruncated = true;
2740  N = N.getOperand(0);
2741  }
2742 
2743  if (N.getOpcode() != X86ISD::Wrapper)
2744  return false;
2745 
2746  // We can only use non-GlobalValues as immediates if they were not truncated,
2747  // as we do not have any range information. If we have a GlobalValue and the
2748  // address was not truncated, we can select it as an operand directly.
2749  unsigned Opc = N.getOperand(0)->getOpcode();
2750  if (Opc != ISD::TargetGlobalAddress || !WasTruncated) {
2751  Op = N.getOperand(0);
2752  // We can only select the operand directly if we didn't have to look past a
2753  // truncate.
2754  return !WasTruncated;
2755  }
2756 
2757  // Check that the global's range fits into VT.
2758  auto *GA = cast<GlobalAddressSDNode>(N.getOperand(0));
2760  if (!CR || CR->getUnsignedMax().uge(1ull << VT.getSizeInBits()))
2761  return false;
2762 
2763  // Okay, we can use a narrow reference.
2764  Op = CurDAG->getTargetGlobalAddress(GA->getGlobal(), SDLoc(N), VT,
2765  GA->getOffset(), GA->getTargetFlags());
2766  return true;
2767 }
2768 
2769 bool X86DAGToDAGISel::tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
2770  SDValue &Base, SDValue &Scale,
2771  SDValue &Index, SDValue &Disp,
2772  SDValue &Segment) {
2773  assert(Root && P && "Unknown root/parent nodes");
2774  if (!ISD::isNON_EXTLoad(N.getNode()) ||
2775  !IsProfitableToFold(N, P, Root) ||
2776  !IsLegalToFold(N, P, Root, OptLevel))
2777  return false;
2778 
2779  return selectAddr(N.getNode(),
2780  N.getOperand(1), Base, Scale, Index, Disp, Segment);
2781 }
2782 
2783 bool X86DAGToDAGISel::tryFoldBroadcast(SDNode *Root, SDNode *P, SDValue N,
2784  SDValue &Base, SDValue &Scale,
2785  SDValue &Index, SDValue &Disp,
2786  SDValue &Segment) {
2787  assert(Root && P && "Unknown root/parent nodes");
2788  if (N->getOpcode() != X86ISD::VBROADCAST_LOAD ||
2789  !IsProfitableToFold(N, P, Root) ||
2790  !IsLegalToFold(N, P, Root, OptLevel))
2791  return false;
2792 
2793  return selectAddr(N.getNode(),
2794  N.getOperand(1), Base, Scale, Index, Disp, Segment);
2795 }
2796 
2797 /// Return an SDNode that returns the value of the global base register.
2798 /// Output instructions required to initialize the global base register,
2799 /// if necessary.
2800 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
2801  unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
2802  auto &DL = MF->getDataLayout();
2803  return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode();
2804 }
2805 
2806 bool X86DAGToDAGISel::isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const {
2807  if (N->getOpcode() == ISD::TRUNCATE)
2808  N = N->getOperand(0).getNode();
2809  if (N->getOpcode() != X86ISD::Wrapper)
2810  return false;
2811 
2812  auto *GA = dyn_cast<GlobalAddressSDNode>(N->getOperand(0));
2813  if (!GA)
2814  return false;
2815 
2817  if (!CR)
2818  return Width == 32 && TM.getCodeModel() == CodeModel::Small;
2819 
2820  return CR->getSignedMin().sge(-1ull << Width) &&
2821  CR->getSignedMax().slt(1ull << Width);
2822 }
2823 
2825  assert(N->isMachineOpcode() && "Unexpected node");
2827  unsigned Opc = N->getMachineOpcode();
2828  if (Opc == X86::JCC_1)
2829  CC = static_cast<X86::CondCode>(N->getConstantOperandVal(1));
2830  else if (Opc == X86::SETCCr)
2831  CC = static_cast<X86::CondCode>(N->getConstantOperandVal(0));
2832  else if (Opc == X86::SETCCm)
2833  CC = static_cast<X86::CondCode>(N->getConstantOperandVal(5));
2834  else if (Opc == X86::CMOV16rr || Opc == X86::CMOV32rr ||
2835  Opc == X86::CMOV64rr)
2836  CC = static_cast<X86::CondCode>(N->getConstantOperandVal(2));
2837  else if (Opc == X86::CMOV16rm || Opc == X86::CMOV32rm ||
2838  Opc == X86::CMOV64rm)
2839  CC = static_cast<X86::CondCode>(N->getConstantOperandVal(6));
2840 
2841  return CC;
2842 }
2843 
2844 /// Test whether the given X86ISD::CMP node has any users that use a flag
2845 /// other than ZF.
2846 bool X86DAGToDAGISel::onlyUsesZeroFlag(SDValue Flags) const {
2847  // Examine each user of the node.
2848  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
2849  UI != UE; ++UI) {
2850  // Only check things that use the flags.
2851  if (UI.getUse().getResNo() != Flags.getResNo())
2852  continue;
2853  // Only examine CopyToReg uses that copy to EFLAGS.
2854  if (UI->getOpcode() != ISD::CopyToReg ||
2855  cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
2856  return false;
2857  // Examine each user of the CopyToReg use.
2858  for (SDNode::use_iterator FlagUI = UI->use_begin(),
2859  FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
2860  // Only examine the Flag result.
2861  if (FlagUI.getUse().getResNo() != 1) continue;
2862  // Anything unusual: assume conservatively.
2863  if (!FlagUI->isMachineOpcode()) return false;
2864  // Examine the condition code of the user.
2865  X86::CondCode CC = getCondFromNode(*FlagUI);
2866 
2867  switch (CC) {
2868  // Comparisons which only use the zero flag.
2869  case X86::COND_E: case X86::COND_NE:
2870  continue;
2871  // Anything else: assume conservatively.
2872  default:
2873  return false;
2874  }
2875  }
2876  }
2877  return true;
2878 }
2879 
2880 /// Test whether the given X86ISD::CMP node has any uses which require the SF
2881 /// flag to be accurate.
2882 bool X86DAGToDAGISel::hasNoSignFlagUses(SDValue Flags) const {
2883  // Examine each user of the node.
2884  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
2885  UI != UE; ++UI) {
2886  // Only check things that use the flags.
2887  if (UI.getUse().getResNo() != Flags.getResNo())
2888  continue;
2889  // Only examine CopyToReg uses that copy to EFLAGS.
2890  if (UI->getOpcode() != ISD::CopyToReg ||
2891  cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
2892  return false;
2893  // Examine each user of the CopyToReg use.
2894  for (SDNode::use_iterator FlagUI = UI->use_begin(),
2895  FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
2896  // Only examine the Flag result.
2897  if (FlagUI.getUse().getResNo() != 1) continue;
2898  // Anything unusual: assume conservatively.
2899  if (!FlagUI->isMachineOpcode()) return false;
2900  // Examine the condition code of the user.
2901  X86::CondCode CC = getCondFromNode(*FlagUI);
2902 
2903  switch (CC) {
2904  // Comparisons which don't examine the SF flag.
2905  case X86::COND_A: case X86::COND_AE:
2906  case X86::COND_B: case X86::COND_BE:
2907  case X86::COND_E: case X86::COND_NE:
2908  case X86::COND_O: case X86::COND_NO:
2909  case X86::COND_P: case X86::COND_NP:
2910  continue;
2911  // Anything else: assume conservatively.
2912  default:
2913  return false;
2914  }
2915  }
2916  }
2917  return true;
2918 }
2919 
2921  switch (CC) {
2922  // Comparisons which don't examine the CF flag.
2923  case X86::COND_O: case X86::COND_NO:
2924  case X86::COND_E: case X86::COND_NE:
2925  case X86::COND_S: case X86::COND_NS:
2926  case X86::COND_P: case X86::COND_NP:
2927  case X86::COND_L: case X86::COND_GE:
2928  case X86::COND_G: case X86::COND_LE:
2929  return false;
2930  // Anything else: assume conservatively.
2931  default:
2932  return true;
2933  }
2934 }
2935 
2936 /// Test whether the given node which sets flags has any uses which require the
2937 /// CF flag to be accurate.
2938  bool X86DAGToDAGISel::hasNoCarryFlagUses(SDValue Flags) const {
2939  // Examine each user of the node.
2940  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
2941  UI != UE; ++UI) {
2942  // Only check things that use the flags.
2943  if (UI.getUse().getResNo() != Flags.getResNo())
2944  continue;
2945 
2946  unsigned UIOpc = UI->getOpcode();
2947 
2948  if (UIOpc == ISD::CopyToReg) {
2949  // Only examine CopyToReg uses that copy to EFLAGS.
2950  if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
2951  return false;
2952  // Examine each user of the CopyToReg use.
2953  for (SDNode::use_iterator FlagUI = UI->use_begin(), FlagUE = UI->use_end();
2954  FlagUI != FlagUE; ++FlagUI) {
2955  // Only examine the Flag result.
2956  if (FlagUI.getUse().getResNo() != 1)
2957  continue;
2958  // Anything unusual: assume conservatively.
2959  if (!FlagUI->isMachineOpcode())
2960  return false;
2961  // Examine the condition code of the user.
2962  X86::CondCode CC = getCondFromNode(*FlagUI);
2963 
2964  if (mayUseCarryFlag(CC))
2965  return false;
2966  }
2967 
2968  // This CopyToReg is ok. Move on to the next user.
2969  continue;
2970  }
2971 
2972  // This might be an unselected node. So look for the pre-isel opcodes that
2973  // use flags.
2974  unsigned CCOpNo;
2975  switch (UIOpc) {
2976  default:
2977  // Something unusual. Be conservative.
2978  return false;
2979  case X86ISD::SETCC: CCOpNo = 0; break;
2980  case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
2981  case X86ISD::CMOV: CCOpNo = 2; break;
2982  case X86ISD::BRCOND: CCOpNo = 2; break;
2983  }
2984 
2985  X86::CondCode CC = (X86::CondCode)UI->getConstantOperandVal(CCOpNo);
2986  if (mayUseCarryFlag(CC))
2987  return false;
2988  }
2989  return true;
2990 }
2991 
2992 /// Check whether or not the chain ending in StoreNode is suitable for doing
2993 /// the {load; op; store} to modify transformation.
2995  SDValue StoredVal, SelectionDAG *CurDAG,
2996  unsigned LoadOpNo,
2997  LoadSDNode *&LoadNode,
2998  SDValue &InputChain) {
2999  // Is the stored value result 0 of the operation?
3000  if (StoredVal.getResNo() != 0) return false;
3001 
3002  // Are there other uses of the operation other than the store?
3003  if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
3004 
3005  // Is the store non-extending and non-indexed?
3006  if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
3007  return false;
3008 
3009  SDValue Load = StoredVal->getOperand(LoadOpNo);
3010  // Is the stored value a non-extending and non-indexed load?
3011  if (!ISD::isNormalLoad(Load.getNode())) return false;
3012 
3013  // Return LoadNode by reference.
3014  LoadNode = cast<LoadSDNode>(Load);
3015 
3016  // Is store the only read of the loaded value?
3017  if (!Load.hasOneUse())
3018  return false;
3019 
3020  // Is the address of the store the same as the load?
3021  if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
3022  LoadNode->getOffset() != StoreNode->getOffset())
3023  return false;
3024 
3025  bool FoundLoad = false;
3026  SmallVector<SDValue, 4> ChainOps;
3027  SmallVector<const SDNode *, 4> LoopWorklist;
3029  const unsigned int Max = 1024;
3030 
3031  // Visualization of Load-Op-Store fusion:
3032  // -------------------------
3033  // Legend:
3034  // *-lines = Chain operand dependencies.
3035  // |-lines = Normal operand dependencies.
3036  // Dependencies flow down and right. n-suffix references multiple nodes.
3037  //
3038  // C Xn C
3039  // * * *
3040  // * * *
3041  // Xn A-LD Yn TF Yn
3042  // * * \ | * |
3043  // * * \ | * |
3044  // * * \ | => A--LD_OP_ST
3045  // * * \| \
3046  // TF OP \
3047  // * | \ Zn
3048  // * | \
3049  // A-ST Zn
3050  //
3051 
3052  // This merge induced dependences from: #1: Xn -> LD, OP, Zn
3053  // #2: Yn -> LD
3054  // #3: ST -> Zn
3055 
3056  // Ensure the transform is safe by checking for the dual
3057  // dependencies to make sure we do not induce a loop.
3058 
3059  // As LD is a predecessor to both OP and ST we can do this by checking:
3060  // a). if LD is a predecessor to a member of Xn or Yn.
3061  // b). if a Zn is a predecessor to ST.
3062 
3063  // However, (b) can only occur through being a chain predecessor to
3064  // ST, which is the same as Zn being a member or predecessor of Xn,
3065  // which is a subset of LD being a predecessor of Xn. So it's
3066  // subsumed by check (a).
3067 
3068  SDValue Chain = StoreNode->getChain();
3069 
3070  // Gather X elements in ChainOps.
3071  if (Chain == Load.getValue(1)) {
3072  FoundLoad = true;
3073  ChainOps.push_back(Load.getOperand(0));
3074  } else if (Chain.getOpcode() == ISD::TokenFactor) {
3075  for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
3076  SDValue Op = Chain.getOperand(i);
3077  if (Op == Load.getValue(1)) {
3078  FoundLoad = true;
3079  // Drop Load, but keep its chain. No cycle check necessary.
3080  ChainOps.push_back(Load.getOperand(0));
3081  continue;
3082  }
3083  LoopWorklist.push_back(Op.getNode());
3084  ChainOps.push_back(Op);
3085  }
3086  }
3087 
3088  if (!FoundLoad)
3089  return false;
3090 
3091  // Worklist is currently Xn. Add Yn to worklist.
3092  for (SDValue Op : StoredVal->ops())
3093  if (Op.getNode() != LoadNode)
3094  LoopWorklist.push_back(Op.getNode());
3095 
3096  // Check (a) if Load is a predecessor to Xn + Yn
3097  if (SDNode::hasPredecessorHelper(Load.getNode(), Visited, LoopWorklist, Max,
3098  true))
3099  return false;
3100 
3101  InputChain =
3102  CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ChainOps);
3103  return true;
3104 }
3105 
3106 // Change a chain of {load; op; store} of the same value into a simple op
3107 // through memory of that value, if the uses of the modified value and its
3108 // address are suitable.
3109 //
3110 // The tablegen pattern memory operand pattern is currently not able to match
3111 // the case where the EFLAGS on the original operation are used.
3112 //
3113 // To move this to tablegen, we'll need to improve tablegen to allow flags to
3114 // be transferred from a node in the pattern to the result node, probably with
3115 // a new keyword. For example, we have this
3116 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
3117 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
3118 // (implicit EFLAGS)]>;
3119 // but maybe need something like this
3120 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
3121 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
3122 // (transferrable EFLAGS)]>;
3123 //
3124 // Until then, we manually fold these and instruction select the operation
3125 // here.
3126 bool X86DAGToDAGISel::foldLoadStoreIntoMemOperand(SDNode *Node) {
3127  StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
3128  SDValue StoredVal = StoreNode->getOperand(1);
3129  unsigned Opc = StoredVal->getOpcode();
3130 
3131  // Before we try to select anything, make sure this is memory operand size
3132  // and opcode we can handle. Note that this must match the code below that
3133  // actually lowers the opcodes.
3134  EVT MemVT = StoreNode->getMemoryVT();
3135  if (MemVT != MVT::i64 && MemVT != MVT::i32 && MemVT != MVT::i16 &&
3136  MemVT != MVT::i8)
3137  return false;
3138 
3139  bool IsCommutable = false;
3140  bool IsNegate = false;
3141  switch (Opc) {
3142  default:
3143  return false;
3144  case X86ISD::SUB:
3145  IsNegate = isNullConstant(StoredVal.getOperand(0));
3146  break;
3147  case X86ISD::SBB:
3148  break;
3149  case X86ISD::ADD:
3150  case X86ISD::ADC:
3151  case X86ISD::AND:
3152  case X86ISD::OR:
3153  case X86ISD::XOR:
3154  IsCommutable = true;
3155  break;
3156  }
3157 
3158  unsigned LoadOpNo = IsNegate ? 1 : 0;
3159  LoadSDNode *LoadNode = nullptr;
3160  SDValue InputChain;
3161  if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadOpNo,
3162  LoadNode, InputChain)) {
3163  if (!IsCommutable)
3164  return false;
3165 
3166  // This operation is commutable, try the other operand.
3167  LoadOpNo = 1;
3168  if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadOpNo,
3169  LoadNode, InputChain))
3170  return false;
3171  }
3172 
3173  SDValue Base, Scale, Index, Disp, Segment;
3174  if (!selectAddr(LoadNode, LoadNode->getBasePtr(), Base, Scale, Index, Disp,
3175  Segment))
3176  return false;
3177 
3178  auto SelectOpcode = [&](unsigned Opc64, unsigned Opc32, unsigned Opc16,
3179  unsigned Opc8) {
3180  switch (MemVT.getSimpleVT().SimpleTy) {
3181  case MVT::i64:
3182  return Opc64;
3183  case MVT::i32:
3184  return Opc32;
3185  case MVT::i16:
3186  return Opc16;
3187  case MVT::i8:
3188  return Opc8;
3189  default:
3190  llvm_unreachable("Invalid size!");
3191  }
3192  };
3193 
3195  switch (Opc) {
3196  case X86ISD::SUB:
3197  // Handle negate.
3198  if (IsNegate) {
3199  unsigned NewOpc = SelectOpcode(X86::NEG64m, X86::NEG32m, X86::NEG16m,
3200  X86::NEG8m);
3201  const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
3202  Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32,
3203  MVT::Other, Ops);
3204  break;
3205  }
3207  case X86ISD::ADD:
3208  // Try to match inc/dec.
3209  if (!Subtarget->slowIncDec() || CurDAG->shouldOptForSize()) {
3210  bool IsOne = isOneConstant(StoredVal.getOperand(1));
3211  bool IsNegOne = isAllOnesConstant(StoredVal.getOperand(1));
3212  // ADD/SUB with 1/-1 and carry flag isn't used can use inc/dec.
3213  if ((IsOne || IsNegOne) && hasNoCarryFlagUses(StoredVal.getValue(1))) {
3214  unsigned NewOpc =
3215  ((Opc == X86ISD::ADD) == IsOne)
3216  ? SelectOpcode(X86::INC64m, X86::INC32m, X86::INC16m, X86::INC8m)
3217  : SelectOpcode(X86::DEC64m, X86::DEC32m, X86::DEC16m, X86::DEC8m);
3218  const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
3219  Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32,
3220  MVT::Other, Ops);
3221  break;
3222  }
3223  }
3225  case X86ISD::ADC:
3226  case X86ISD::SBB:
3227  case X86ISD::AND:
3228  case X86ISD::OR:
3229  case X86ISD::XOR: {
3230  auto SelectRegOpcode = [SelectOpcode](unsigned Opc) {
3231  switch (Opc) {
3232  case X86ISD::ADD:
3233  return SelectOpcode(X86::ADD64mr, X86::ADD32mr, X86::ADD16mr,
3234  X86::ADD8mr);
3235  case X86ISD::ADC:
3236  return SelectOpcode(X86::ADC64mr, X86::ADC32mr, X86::ADC16mr,
3237  X86::ADC8mr);
3238  case X86ISD::SUB:
3239  return SelectOpcode(X86::SUB64mr, X86::SUB32mr, X86::SUB16mr,
3240  X86::SUB8mr);
3241  case X86ISD::SBB:
3242  return SelectOpcode(X86::SBB64mr, X86::SBB32mr, X86::SBB16mr,
3243  X86::SBB8mr);
3244  case X86ISD::AND:
3245  return SelectOpcode(X86::AND64mr, X86::AND32mr, X86::AND16mr,
3246  X86::AND8mr);
3247  case X86ISD::OR:
3248  return SelectOpcode(X86::OR64mr, X86::OR32mr, X86::OR16mr, X86::OR8mr);
3249  case X86ISD::XOR:
3250  return SelectOpcode(X86::XOR64mr, X86::XOR32mr, X86::XOR16mr,
3251  X86::XOR8mr);
3252  default:
3253  llvm_unreachable("Invalid opcode!");
3254  }
3255  };
3256  auto SelectImm8Opcode = [SelectOpcode](unsigned Opc) {
3257  switch (Opc) {
3258  case X86ISD::ADD:
3259  return SelectOpcode(X86::ADD64mi8, X86::ADD32mi8, X86::ADD16mi8, 0);
3260  case X86ISD::ADC:
3261  return SelectOpcode(X86::ADC64mi8, X86::ADC32mi8, X86::ADC16mi8, 0);
3262  case X86ISD::SUB:
3263  return SelectOpcode(X86::SUB64mi8, X86::SUB32mi8, X86::SUB16mi8, 0);
3264  case X86ISD::SBB:
3265  return SelectOpcode(X86::SBB64mi8, X86::SBB32mi8, X86::SBB16mi8, 0);
3266  case X86ISD::AND:
3267  return SelectOpcode(X86::AND64mi8, X86::AND32mi8, X86::AND16mi8, 0);
3268  case X86ISD::OR:
3269  return SelectOpcode(X86::OR64mi8, X86::OR32mi8, X86::OR16mi8, 0);
3270  case X86ISD::XOR:
3271  return SelectOpcode(X86::XOR64mi8, X86::XOR32mi8, X86::XOR16mi8, 0);
3272  default:
3273  llvm_unreachable("Invalid opcode!");
3274  }
3275  };
3276  auto SelectImmOpcode = [SelectOpcode](unsigned Opc) {
3277  switch (Opc) {
3278  case X86ISD::ADD:
3279  return SelectOpcode(X86::ADD64mi32, X86::ADD32mi, X86::ADD16mi,
3280  X86::ADD8mi);
3281  case X86ISD::ADC:
3282  return SelectOpcode(X86::ADC64mi32, X86::ADC32mi, X86::ADC16mi,
3283  X86::ADC8mi);
3284  case X86ISD::SUB:
3285  return SelectOpcode(X86::SUB64mi32, X86::SUB32mi, X86::SUB16mi,
3286  X86::SUB8mi);
3287  case X86ISD::SBB:
3288  return SelectOpcode(X86::SBB64mi32, X86::SBB32mi, X86::SBB16mi,
3289  X86::SBB8mi);
3290  case X86ISD::AND:
3291  return SelectOpcode(X86::AND64mi32, X86::AND32mi, X86::AND16mi,
3292  X86::AND8mi);
3293  case X86ISD::OR:
3294  return SelectOpcode(X86::OR64mi32, X86::OR32mi, X86::OR16mi,
3295  X86::OR8mi);
3296  case X86ISD::XOR:
3297  return SelectOpcode(X86::XOR64mi32, X86::XOR32mi, X86::XOR16mi,
3298  X86::XOR8mi);
3299  default:
3300  llvm_unreachable("Invalid opcode!");
3301  }
3302  };
3303 
3304  unsigned NewOpc = SelectRegOpcode(Opc);
3305  SDValue Operand = StoredVal->getOperand(1-LoadOpNo);
3306 
3307  // See if the operand is a constant that we can fold into an immediate
3308  // operand.
3309  if (auto *OperandC = dyn_cast<ConstantSDNode>(Operand)) {
3310  int64_t OperandV = OperandC->getSExtValue();
3311 
3312  // Check if we can shrink the operand enough to fit in an immediate (or
3313  // fit into a smaller immediate) by negating it and switching the
3314  // operation.
3315  if ((Opc == X86ISD::ADD || Opc == X86ISD::SUB) &&
3316  ((MemVT != MVT::i8 && !isInt<8>(OperandV) && isInt<8>(-OperandV)) ||
3317  (MemVT == MVT::i64 && !isInt<32>(OperandV) &&
3318  isInt<32>(-OperandV))) &&
3319  hasNoCarryFlagUses(StoredVal.getValue(1))) {
3320  OperandV = -OperandV;
3321  Opc = Opc == X86ISD::ADD ? X86ISD::SUB : X86ISD::ADD;
3322  }
3323 
3324  // First try to fit this into an Imm8 operand. If it doesn't fit, then try
3325  // the larger immediate operand.
3326  if (MemVT != MVT::i8 && isInt<8>(OperandV)) {
3327  Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
3328  NewOpc = SelectImm8Opcode(Opc);
3329  } else if (MemVT != MVT::i64 || isInt<32>(OperandV)) {
3330  Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
3331  NewOpc = SelectImmOpcode(Opc);
3332  }
3333  }
3334 
3335  if (Opc == X86ISD::ADC || Opc == X86ISD::SBB) {
3336  SDValue CopyTo =
3337  CurDAG->getCopyToReg(InputChain, SDLoc(Node), X86::EFLAGS,
3338  StoredVal.getOperand(2), SDValue());
3339 
3340  const SDValue Ops[] = {Base, Scale, Index, Disp,
3341  Segment, Operand, CopyTo, CopyTo.getValue(1)};
3342  Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other,
3343  Ops);
3344  } else {
3345  const SDValue Ops[] = {Base, Scale, Index, Disp,
3346  Segment, Operand, InputChain};
3347  Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other,
3348  Ops);
3349  }
3350  break;
3351  }
3352  default:
3353  llvm_unreachable("Invalid opcode!");
3354  }
3355 
3356  MachineMemOperand *MemOps[] = {StoreNode->getMemOperand(),
3357  LoadNode->getMemOperand()};
3358  CurDAG->setNodeMemRefs(Result, MemOps);
3359 
3360  // Update Load Chain uses as well.
3361  ReplaceUses(SDValue(LoadNode, 1), SDValue(Result, 1));
3362  ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
3363  ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
3364  CurDAG->RemoveDeadNode(Node);
3365  return true;
3366 }
3367 
3368 // See if this is an X & Mask that we can match to BEXTR/BZHI.
3369 // Where Mask is one of the following patterns:
3370 // a) x & (1 << nbits) - 1
3371 // b) x & ~(-1 << nbits)
3372 // c) x & (-1 >> (32 - y))
3373 // d) x << (32 - y) >> (32 - y)
3374 bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) {
3375  assert(
3376  (Node->getOpcode() == ISD::AND || Node->getOpcode() == ISD::SRL) &&
3377  "Should be either an and-mask, or right-shift after clearing high bits.");
3378 
3379  // BEXTR is BMI instruction, BZHI is BMI2 instruction. We need at least one.
3380  if (!Subtarget->hasBMI() && !Subtarget->hasBMI2())
3381  return false;
3382 
3383  MVT NVT = Node->getSimpleValueType(0);
3384 
3385  // Only supported for 32 and 64 bits.
3386  if (NVT != MVT::i32 && NVT != MVT::i64)
3387  return false;
3388 
3389  SDValue NBits;
3390 
3391  // If we have BMI2's BZHI, we are ok with muti-use patterns.
3392  // Else, if we only have BMI1's BEXTR, we require one-use.
3393  const bool CanHaveExtraUses = Subtarget->hasBMI2();
3394  auto checkUses = [CanHaveExtraUses](SDValue Op, unsigned NUses) {
3395  return CanHaveExtraUses ||
3396  Op.getNode()->hasNUsesOfValue(NUses, Op.getResNo());
3397  };
3398  auto checkOneUse = [checkUses](SDValue Op) { return checkUses(Op, 1); };
3399  auto checkTwoUse = [checkUses](SDValue Op) { return checkUses(Op, 2); };
3400 
3401  auto peekThroughOneUseTruncation = [checkOneUse](SDValue V) {
3402  if (V->getOpcode() == ISD::TRUNCATE && checkOneUse(V)) {
3403  assert(V.getSimpleValueType() == MVT::i32 &&
3404  V.getOperand(0).getSimpleValueType() == MVT::i64 &&
3405  "Expected i64 -> i32 truncation");
3406  V = V.getOperand(0);
3407  }
3408  return V;
3409  };
3410 
3411  // a) x & ((1 << nbits) + (-1))
3412  auto matchPatternA = [checkOneUse, peekThroughOneUseTruncation,
3413  &NBits](SDValue Mask) -> bool {
3414  // Match `add`. Must only have one use!
3415  if (Mask->getOpcode() != ISD::ADD || !checkOneUse(Mask))
3416  return false;
3417  // We should be adding all-ones constant (i.e. subtracting one.)
3418  if (!isAllOnesConstant(Mask->getOperand(1)))
3419  return false;
3420  // Match `1 << nbits`. Might be truncated. Must only have one use!
3421  SDValue M0 = peekThroughOneUseTruncation(Mask->getOperand(0));
3422  if (M0->getOpcode() != ISD::SHL || !checkOneUse(M0))
3423  return false;
3424  if (!isOneConstant(M0->getOperand(0)))
3425  return false;
3426  NBits = M0->getOperand(1);
3427  return true;
3428  };
3429 
3430  auto isAllOnes = [this, peekThroughOneUseTruncation, NVT](SDValue V) {
3431  V = peekThroughOneUseTruncation(V);
3432  return CurDAG->MaskedValueIsAllOnes(
3433  V, APInt::getLowBitsSet(V.getSimpleValueType().getSizeInBits(),
3434  NVT.getSizeInBits()));
3435  };
3436 
3437  // b) x & ~(-1 << nbits)
3438  auto matchPatternB = [checkOneUse, isAllOnes, peekThroughOneUseTruncation,
3439  &NBits](SDValue Mask) -> bool {
3440  // Match `~()`. Must only have one use!
3441  if (Mask.getOpcode() != ISD::XOR || !checkOneUse(Mask))
3442  return false;
3443  // The -1 only has to be all-ones for the final Node's NVT.
3444  if (!isAllOnes(Mask->getOperand(1)))
3445  return false;
3446  // Match `-1 << nbits`. Might be truncated. Must only have one use!
3447  SDValue M0 = peekThroughOneUseTruncation(Mask->getOperand(0));
3448  if (M0->getOpcode() != ISD::SHL || !checkOneUse(M0))
3449  return false;
3450  // The -1 only has to be all-ones for the final Node's NVT.
3451  if (!isAllOnes(M0->getOperand(0)))
3452  return false;
3453  NBits = M0->getOperand(1);
3454  return true;
3455  };
3456 
3457  // Match potentially-truncated (bitwidth - y)
3458  auto matchShiftAmt = [checkOneUse, &NBits](SDValue ShiftAmt,
3459  unsigned Bitwidth) {
3460  // Skip over a truncate of the shift amount.
3461  if (ShiftAmt.getOpcode() == ISD::TRUNCATE) {
3462  ShiftAmt = ShiftAmt.getOperand(0);
3463  // The trunc should have been the only user of the real shift amount.
3464  if (!checkOneUse(ShiftAmt))
3465  return false;
3466  }
3467  // Match the shift amount as: (bitwidth - y). It should go away, too.
3468  if (ShiftAmt.getOpcode() != ISD::SUB)
3469  return false;
3470  auto *V0 = dyn_cast<ConstantSDNode>(ShiftAmt.getOperand(0));
3471  if (!V0 || V0->getZExtValue() != Bitwidth)
3472  return false;
3473  NBits = ShiftAmt.getOperand(1);
3474  return true;
3475  };
3476 
3477  // c) x & (-1 >> (32 - y))
3478  auto matchPatternC = [checkOneUse, peekThroughOneUseTruncation,
3479  matchShiftAmt](SDValue Mask) -> bool {
3480  // The mask itself may be truncated.
3481  Mask = peekThroughOneUseTruncation(Mask);
3482  unsigned Bitwidth = Mask.getSimpleValueType().getSizeInBits();
3483  // Match `l>>`. Must only have one use!
3484  if (Mask.getOpcode() != ISD::SRL || !checkOneUse(Mask))
3485  return false;
3486  // We should be shifting truly all-ones constant.
3487  if (!isAllOnesConstant(Mask.getOperand(0)))
3488  return false;
3489  SDValue M1 = Mask.getOperand(1);
3490  // The shift amount should not be used externally.
3491  if (!checkOneUse(M1))
3492  return false;
3493  return matchShiftAmt(M1, Bitwidth);
3494  };
3495 
3496  SDValue X;
3497 
3498  // d) x << (32 - y) >> (32 - y)
3499  auto matchPatternD = [checkOneUse, checkTwoUse, matchShiftAmt,
3500  &X](SDNode *Node) -> bool {
3501  if (Node->getOpcode() != ISD::SRL)
3502  return false;
3503  SDValue N0 = Node->getOperand(0);
3504  if (N0->getOpcode() != ISD::SHL || !checkOneUse(N0))
3505  return false;
3506  unsigned Bitwidth = N0.getSimpleValueType().getSizeInBits();
3507  SDValue N1 = Node->getOperand(1);
3508  SDValue N01 = N0->getOperand(1);
3509  // Both of the shifts must be by the exact same value.
3510  // There should not be any uses of the shift amount outside of the pattern.
3511  if (N1 != N01 || !checkTwoUse(N1))
3512  return false;
3513  if (!matchShiftAmt(N1, Bitwidth))
3514  return false;
3515  X = N0->getOperand(0);
3516  return true;
3517  };
3518 
3519  auto matchLowBitMask = [matchPatternA, matchPatternB,
3520  matchPatternC](SDValue Mask) -> bool {
3521  return matchPatternA(Mask) || matchPatternB(Mask) || matchPatternC(Mask);
3522  };
3523 
3524  if (Node->getOpcode() == ISD::AND) {
3525  X = Node->getOperand(0);
3526  SDValue Mask = Node->getOperand(1);
3527 
3528  if (matchLowBitMask(Mask)) {
3529  // Great.
3530  } else {
3531  std::swap(X, Mask);
3532  if (!matchLowBitMask(Mask))
3533  return false;
3534  }
3535  } else if (!matchPatternD(Node))
3536  return false;
3537 
3538  SDLoc DL(Node);
3539 
3540  // Truncate the shift amount.
3541  NBits = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NBits);
3542  insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3543 
3544  // Insert 8-bit NBits into lowest 8 bits of 32-bit register.
3545  // All the other bits are undefined, we do not care about them.
3546  SDValue ImplDef = SDValue(
3547  CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::i32), 0);
3548  insertDAGNode(*CurDAG, SDValue(Node, 0), ImplDef);
3549 
3550  SDValue SRIdxVal = CurDAG->getTargetConstant(X86::sub_8bit, DL, MVT::i32);
3551  insertDAGNode(*CurDAG, SDValue(Node, 0), SRIdxVal);
3552  NBits = SDValue(
3553  CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::i32, ImplDef,
3554  NBits, SRIdxVal), 0);
3555  insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3556 
3557  if (Subtarget->hasBMI2()) {
3558  // Great, just emit the the BZHI..
3559  if (NVT != MVT::i32) {
3560  // But have to place the bit count into the wide-enough register first.
3561  NBits = CurDAG->getNode(ISD::ANY_EXTEND, DL, NVT, NBits);
3562  insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3563  }
3564 
3565  SDValue Extract = CurDAG->getNode(X86ISD::BZHI, DL, NVT, X, NBits);
3566  ReplaceNode(Node, Extract.getNode());
3567  SelectCode(Extract.getNode());
3568  return true;
3569  }
3570 
3571  // Else, if we do *NOT* have BMI2, let's find out if the if the 'X' is
3572  // *logically* shifted (potentially with one-use trunc inbetween),
3573  // and the truncation was the only use of the shift,
3574  // and if so look past one-use truncation.
3575  {
3576  SDValue RealX = peekThroughOneUseTruncation(X);
3577  // FIXME: only if the shift is one-use?
3578  if (RealX != X && RealX.getOpcode() == ISD::SRL)
3579  X = RealX;
3580  }
3581 
3582  MVT XVT = X.getSimpleValueType();
3583 
3584  // Else, emitting BEXTR requires one more step.
3585  // The 'control' of BEXTR has the pattern of:
3586  // [15...8 bit][ 7...0 bit] location
3587  // [ bit count][ shift] name
3588  // I.e. 0b000000011'00000001 means (x >> 0b1) & 0b11
3589 
3590  // Shift NBits left by 8 bits, thus producing 'control'.
3591  // This makes the low 8 bits to be zero.
3592  SDValue C8 = CurDAG->getConstant(8, DL, MVT::i8);
3593  insertDAGNode(*CurDAG, SDValue(Node, 0), C8);
3594  SDValue Control = CurDAG->getNode(ISD::SHL, DL, MVT::i32, NBits, C8);
3595  insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3596 
3597  // If the 'X' is *logically* shifted, we can fold that shift into 'control'.
3598  // FIXME: only if the shift is one-use?
3599  if (X.getOpcode() == ISD::SRL) {
3600  SDValue ShiftAmt = X.getOperand(1);
3601  X = X.getOperand(0);
3602 
3603  assert(ShiftAmt.getValueType() == MVT::i8 &&
3604  "Expected shift amount to be i8");
3605 
3606  // Now, *zero*-extend the shift amount. The bits 8...15 *must* be zero!
3607  // We could zext to i16 in some form, but we intentionally don't do that.
3608  SDValue OrigShiftAmt = ShiftAmt;
3609  ShiftAmt = CurDAG->getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShiftAmt);
3610  insertDAGNode(*CurDAG, OrigShiftAmt, ShiftAmt);
3611 
3612  // And now 'or' these low 8 bits of shift amount into the 'control'.
3613  Control = CurDAG->getNode(ISD::OR, DL, MVT::i32, Control, ShiftAmt);
3614  insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3615  }
3616 
3617  // But have to place the 'control' into the wide-enough register first.
3618  if (XVT != MVT::i32) {
3619  Control = CurDAG->getNode(ISD::ANY_EXTEND, DL, XVT, Control);
3620  insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3621  }
3622 
3623  // And finally, form the BEXTR itself.
3624  SDValue Extract = CurDAG->getNode(X86ISD::BEXTR, DL, XVT, X, Control);
3625 
3626  // The 'X' was originally truncated. Do that now.
3627  if (XVT != NVT) {
3628  insertDAGNode(*CurDAG, SDValue(Node, 0), Extract);
3629  Extract = CurDAG->getNode(ISD::TRUNCATE, DL, NVT, Extract);
3630  }
3631 
3632  ReplaceNode(Node, Extract.getNode());
3633  SelectCode(Extract.getNode());
3634 
3635  return true;
3636 }
3637 
3638 // See if this is an (X >> C1) & C2 that we can match to BEXTR/BEXTRI.
3639 MachineSDNode *X86DAGToDAGISel::matchBEXTRFromAndImm(SDNode *Node) {
3640  MVT NVT = Node->getSimpleValueType(0);
3641  SDLoc dl(Node);
3642 
3643  SDValue N0 = Node->getOperand(0);
3644  SDValue N1 = Node->getOperand(1);
3645 
3646  // If we have TBM we can use an immediate for the control. If we have BMI
3647  // we should only do this if the BEXTR instruction is implemented well.
3648  // Otherwise moving the control into a register makes this more costly.
3649  // TODO: Maybe load folding, greater than 32-bit masks, or a guarantee of LICM
3650  // hoisting the move immediate would make it worthwhile with a less optimal
3651  // BEXTR?
3652  bool PreferBEXTR =
3653  Subtarget->hasTBM() || (Subtarget->hasBMI() && Subtarget->hasFastBEXTR());
3654  if (!PreferBEXTR && !Subtarget->hasBMI2())
3655  return nullptr;
3656 
3657  // Must have a shift right.
3658  if (N0->getOpcode() != ISD::SRL && N0->getOpcode() != ISD::SRA)
3659  return nullptr;
3660 
3661  // Shift can't have additional users.
3662  if (!N0->hasOneUse())
3663  return nullptr;
3664 
3665  // Only supported for 32 and 64 bits.
3666  if (NVT != MVT::i32 && NVT != MVT::i64)
3667  return nullptr;
3668 
3669  // Shift amount and RHS of and must be constant.
3670  ConstantSDNode *MaskCst = dyn_cast<ConstantSDNode>(N1);
3671  ConstantSDNode *ShiftCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
3672  if (!MaskCst || !ShiftCst)
3673  return nullptr;
3674 
3675  // And RHS must be a mask.
3676  uint64_t Mask = MaskCst->getZExtValue();
3677  if (!isMask_64(Mask))
3678  return nullptr;
3679 
3680  uint64_t Shift = ShiftCst->getZExtValue();
3681  uint64_t MaskSize = countPopulation(Mask);
3682 
3683  // Don't interfere with something that can be handled by extracting AH.
3684  // TODO: If we are able to fold a load, BEXTR might still be better than AH.
3685  if (Shift == 8 && MaskSize == 8)
3686  return nullptr;
3687 
3688  // Make sure we are only using bits that were in the original value, not
3689  // shifted in.
3690  if (Shift + MaskSize > NVT.getSizeInBits())
3691  return nullptr;
3692 
3693  // BZHI, if available, is always fast, unlike BEXTR. But even if we decide
3694  // that we can't use BEXTR, it is only worthwhile using BZHI if the mask
3695  // does not fit into 32 bits. Load folding is not a sufficient reason.
3696  if (!PreferBEXTR && MaskSize <= 32)
3697  return nullptr;
3698 
3699  SDValue Control;
3700  unsigned ROpc, MOpc;
3701 
3702  if (!PreferBEXTR) {
3703  assert(Subtarget->hasBMI2() && "We must have BMI2's BZHI then.");
3704  // If we can't make use of BEXTR then we can't fuse shift+mask stages.
3705  // Let's perform the mask first, and apply shift later. Note that we need to
3706  // widen the mask to account for the fact that we'll apply shift afterwards!
3707  Control = CurDAG->getTargetConstant(Shift + MaskSize, dl, NVT);
3708  ROpc = NVT == MVT::i64 ? X86::BZHI64rr : X86::BZHI32rr;
3709  MOpc = NVT == MVT::i64 ? X86::BZHI64rm : X86::BZHI32rm;
3710  unsigned NewOpc = NVT == MVT::i64 ? X86::MOV32ri64 : X86::MOV32ri;
3711  Control = SDValue(CurDAG->getMachineNode(NewOpc, dl, NVT, Control), 0);
3712  } else {
3713  // The 'control' of BEXTR has the pattern of:
3714  // [15...8 bit][ 7...0 bit] location
3715  // [ bit count][ shift] name
3716  // I.e. 0b000000011'00000001 means (x >> 0b1) & 0b11
3717  Control = CurDAG->getTargetConstant(Shift | (MaskSize << 8), dl, NVT);
3718  if (Subtarget->hasTBM()) {
3719  ROpc = NVT == MVT::i64 ? X86::BEXTRI64ri : X86::BEXTRI32ri;
3720  MOpc = NVT == MVT::i64 ? X86::BEXTRI64mi : X86::BEXTRI32mi;
3721  } else {
3722  assert(Subtarget->hasBMI() && "We must have BMI1's BEXTR then.");
3723  // BMI requires the immediate to placed in a register.
3724  ROpc = NVT == MVT::i64 ? X86::BEXTR64rr : X86::BEXTR32rr;
3725  MOpc = NVT == MVT::i64 ? X86::BEXTR64rm : X86::BEXTR32rm;
3726  unsigned NewOpc = NVT == MVT::i64 ? X86::MOV32ri64 : X86::MOV32ri;
3727  Control = SDValue(CurDAG->getMachineNode(NewOpc, dl, NVT, Control), 0);
3728  }
3729  }
3730 
3731  MachineSDNode *NewNode;
3732  SDValue Input = N0->getOperand(0);
3733  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3734  if (tryFoldLoad(Node, N0.getNode(), Input, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3735  SDValue Ops[] = {
3736  Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Control, Input.getOperand(0)};
3737  SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
3738  NewNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3739  // Update the chain.
3740  ReplaceUses(Input.getValue(1), SDValue(NewNode, 2));
3741  // Record the mem-refs
3742  CurDAG->setNodeMemRefs(NewNode, {cast<LoadSDNode>(Input)->getMemOperand()});
3743  } else {
3744  NewNode = CurDAG->getMachineNode(ROpc, dl, NVT, MVT::i32, Input, Control);
3745  }
3746 
3747  if (!PreferBEXTR) {
3748  // We still need to apply the shift.
3749  SDValue ShAmt = CurDAG->getTargetConstant(Shift, dl, NVT);
3750  unsigned NewOpc = NVT == MVT::i64 ? X86::SHR64ri : X86::SHR32ri;
3751  NewNode =
3752  CurDAG->getMachineNode(NewOpc, dl, NVT, SDValue(NewNode, 0), ShAmt);
3753  }
3754 
3755  return NewNode;
3756 }
3757 
3758 // Emit a PCMISTR(I/M) instruction.
3759 MachineSDNode *X86DAGToDAGISel::emitPCMPISTR(unsigned ROpc, unsigned MOpc,
3760  bool MayFoldLoad, const SDLoc &dl,
3761  MVT VT, SDNode *Node) {
3762  SDValue N0 = Node->getOperand(0);
3763  SDValue N1 = Node->getOperand(1);
3764  SDValue Imm = Node->getOperand(2);
3765  const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
3766  Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
3767 
3768  // Try to fold a load. No need to check alignment.
3769  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3770  if (MayFoldLoad && tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3771  SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
3772  N1.getOperand(0) };
3773  SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other);
3774  MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3775  // Update the chain.
3776  ReplaceUses(N1.getValue(1), SDValue(CNode, 2));
3777  // Record the mem-refs
3778  CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
3779  return CNode;
3780  }
3781 
3782  SDValue Ops[] = { N0, N1, Imm };
3783  SDVTList VTs = CurDAG->getVTList(VT, MVT::i32);
3784  MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops);
3785  return CNode;
3786 }
3787 
3788 // Emit a PCMESTR(I/M) instruction. Also return the Glue result in case we need
3789 // to emit a second instruction after this one. This is needed since we have two
3790 // copyToReg nodes glued before this and we need to continue that glue through.
3791 MachineSDNode *X86DAGToDAGISel::emitPCMPESTR(unsigned ROpc, unsigned MOpc,
3792  bool MayFoldLoad, const SDLoc &dl,
3793  MVT VT, SDNode *Node,
3794  SDValue &InFlag) {
3795  SDValue N0 = Node->getOperand(0);
3796  SDValue N2 = Node->getOperand(2);
3797  SDValue Imm = Node->getOperand(4);
3798  const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
3799  Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
3800 
3801  // Try to fold a load. No need to check alignment.
3802  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3803  if (MayFoldLoad && tryFoldLoad(Node, N2, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3804  SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
3805  N2.getOperand(0), InFlag };
3806  SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other, MVT::Glue);
3807  MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3808  InFlag = SDValue(CNode, 3);
3809  // Update the chain.
3810  ReplaceUses(N2.getValue(1), SDValue(CNode, 2));
3811  // Record the mem-refs
3812  CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N2)->getMemOperand()});
3813  return CNode;
3814  }
3815 
3816  SDValue Ops[] = { N0, N2, Imm, InFlag };
3817  SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Glue);
3818  MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops);
3819  InFlag = SDValue(CNode, 2);
3820  return CNode;
3821 }
3822 
3823 bool X86DAGToDAGISel::tryShiftAmountMod(SDNode *N) {
3824  EVT VT = N->getValueType(0);
3825 
3826  // Only handle scalar shifts.
3827  if (VT.isVector())
3828  return false;
3829 
3830  // Narrower shifts only mask to 5 bits in hardware.
3831  unsigned Size = VT == MVT::i64 ? 64 : 32;
3832 
3833  SDValue OrigShiftAmt = N->getOperand(1);
3834  SDValue ShiftAmt = OrigShiftAmt;
3835  SDLoc DL(N);
3836 
3837  // Skip over a truncate of the shift amount.
3838  if (ShiftAmt->getOpcode() == ISD::TRUNCATE)
3839  ShiftAmt = ShiftAmt->getOperand(0);
3840 
3841  // This function is called after X86DAGToDAGISel::matchBitExtract(),
3842  // so we are not afraid that we might mess up BZHI/BEXTR pattern.
3843 
3844  SDValue NewShiftAmt;
3845  if (ShiftAmt->getOpcode() == ISD::ADD || ShiftAmt->getOpcode() == ISD::SUB) {
3846  SDValue Add0 = ShiftAmt->getOperand(0);
3847  SDValue Add1 = ShiftAmt->getOperand(1);
3848  // If we are shifting by X+/-N where N == 0 mod Size, then just shift by X
3849  // to avoid the ADD/SUB.
3850  if (isa<ConstantSDNode>(Add1) &&
3851  cast<ConstantSDNode>(Add1)->getZExtValue() % Size == 0) {
3852  NewShiftAmt = Add0;
3853  // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to
3854  // generate a NEG instead of a SUB of a constant.
3855  } else if (ShiftAmt->getOpcode() == ISD::SUB &&
3856  isa<ConstantSDNode>(Add0) &&
3857  cast<ConstantSDNode>(Add0)->getZExtValue() != 0 &&
3858  cast<ConstantSDNode>(Add0)->getZExtValue() % Size == 0) {
3859  // Insert a negate op.
3860  // TODO: This isn't guaranteed to replace the sub if there is a logic cone
3861  // that uses it that's not a shift.
3862  EVT SubVT = ShiftAmt.getValueType();
3863  SDValue Zero = CurDAG->getConstant(0, DL, SubVT);
3864  SDValue Neg = CurDAG->getNode(ISD::SUB, DL, SubVT, Zero, Add1);
3865  NewShiftAmt = Neg;
3866 
3867  // Insert these operands into a valid topological order so they can
3868  // get selected independently.
3869  insertDAGNode(*CurDAG, OrigShiftAmt, Zero);
3870  insertDAGNode(*CurDAG, OrigShiftAmt, Neg);
3871  } else
3872  return false;
3873  } else
3874  return false;
3875 
3876  if (NewShiftAmt.getValueType() != MVT::i8) {
3877  // Need to truncate the shift amount.
3878  NewShiftAmt = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NewShiftAmt);
3879  // Add to a correct topological ordering.
3880  insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt);
3881  }
3882 
3883  // Insert a new mask to keep the shift amount legal. This should be removed
3884  // by isel patterns.
3885  NewShiftAmt = CurDAG->getNode(ISD::AND, DL, MVT::i8, NewShiftAmt,
3886  CurDAG->getConstant(Size - 1, DL, MVT::i8));
3887  // Place in a correct topological ordering.
3888  insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt);
3889 
3890  SDNode *UpdatedNode = CurDAG->UpdateNodeOperands(N, N->getOperand(0),
3891  NewShiftAmt);
3892  if (UpdatedNode != N) {
3893  // If we found an existing node, we should replace ourselves with that node
3894  // and wait for it to be selected after its other users.
3895  ReplaceNode(N, UpdatedNode);
3896  return true;
3897  }
3898 
3899  // If the original shift amount is now dead, delete it so that we don't run
3900  // it through isel.
3901  if (OrigShiftAmt.getNode()->use_empty())
3902  CurDAG->RemoveDeadNode(OrigShiftAmt.getNode());
3903 
3904  // Now that we've optimized the shift amount, defer to normal isel to get
3905  // load folding and legacy vs BMI2 selection without repeating it here.
3906  SelectCode(N);
3907  return true;
3908 }
3909 
3910 bool X86DAGToDAGISel::tryShrinkShlLogicImm(SDNode *N) {
3911  MVT NVT = N->getSimpleValueType(0);
3912  unsigned Opcode = N->getOpcode();
3913  SDLoc dl(N);
3914 
3915  // For operations of the form (x << C1) op C2, check if we can use a smaller
3916  // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
3917  SDValue Shift = N->getOperand(0);
3918  SDValue N1 = N->getOperand(1);
3919 
3920  ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
3921  if (!Cst)
3922  return false;
3923 
3924  int64_t Val = Cst->getSExtValue();
3925 
3926  // If we have an any_extend feeding the AND, look through it to see if there
3927  // is a shift behind it. But only if the AND doesn't use the extended bits.
3928  // FIXME: Generalize this to other ANY_EXTEND than i32 to i64?
3929  bool FoundAnyExtend = false;
3930  if (Shift.getOpcode() == ISD::ANY_EXTEND && Shift.hasOneUse() &&
3931  Shift.getOperand(0).getSimpleValueType() == MVT::i32 &&
3932  isUInt<32>(Val)) {
3933  FoundAnyExtend = true;
3934  Shift = Shift.getOperand(0);
3935  }
3936 
3937  if (Shift.getOpcode() != ISD::SHL || !Shift.hasOneUse())
3938  return false;
3939 
3940  // i8 is unshrinkable, i16 should be promoted to i32.
3941  if (NVT != MVT::i32 && NVT != MVT::i64)
3942  return false;
3943 
3944  ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
3945  if (!ShlCst)
3946  return false;
3947 
3948  uint64_t ShAmt = ShlCst->getZExtValue();
3949 
3950  // Make sure that we don't change the operation by removing bits.
3951  // This only matters for OR and XOR, AND is unaffected.
3952  uint64_t RemovedBitsMask = (1ULL << ShAmt) - 1;
3953  if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
3954  return false;
3955 
3956  // Check the minimum bitwidth for the new constant.
3957  // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
3958  auto CanShrinkImmediate = [&](int64_t &ShiftedVal) {
3959  if (Opcode == ISD::AND) {
3960  // AND32ri is the same as AND64ri32 with zext imm.
3961  // Try this before sign extended immediates below.
3962  ShiftedVal = (uint64_t)Val >> ShAmt;
3963  if (NVT == MVT::i64 && !isUInt<32>(Val) && isUInt<32>(ShiftedVal))
3964  return true;
3965  // Also swap order when the AND can become MOVZX.
3966  if (ShiftedVal == UINT8_MAX || ShiftedVal == UINT16_MAX)
3967  return true;
3968  }
3969  ShiftedVal = Val >> ShAmt;
3970  if ((!isInt<8>(Val) && isInt<8>(ShiftedVal)) ||
3971  (!isInt<32>(Val) && isInt<32>(ShiftedVal)))
3972  return true;
3973  if (Opcode != ISD::AND) {
3974  // MOV32ri+OR64r/XOR64r is cheaper than MOV64ri64+OR64rr/XOR64rr
3975  ShiftedVal = (uint64_t)Val >> ShAmt;
3976  if (NVT == MVT::i64 && !isUInt<32>(Val) && isUInt<32>(ShiftedVal))
3977  return true;
3978  }
3979  return false;
3980  };
3981 
3982  int64_t ShiftedVal;
3983  if (!CanShrinkImmediate(ShiftedVal))
3984  return false;
3985 
3986  // Ok, we can reorder to get a smaller immediate.
3987 
3988  // But, its possible the original immediate allowed an AND to become MOVZX.
3989  // Doing this late due to avoid the MakedValueIsZero call as late as
3990  // possible.
3991  if (Opcode == ISD::AND) {
3992  // Find the smallest zext this could possibly be.
3993  unsigned ZExtWidth = Cst->getAPIntValue().getActiveBits();
3994  ZExtWidth = PowerOf2Ceil(std::max(ZExtWidth, 8U));
3995 
3996  // Figure out which bits need to be zero to achieve that mask.
3997  APInt NeededMask = APInt::getLowBitsSet(NVT.getSizeInBits(),
3998  ZExtWidth);
3999  NeededMask &= ~Cst->getAPIntValue();
4000 
4001  if (CurDAG->MaskedValueIsZero(N->getOperand(0), NeededMask))
4002  return false;
4003  }
4004 
4005  SDValue X = Shift.getOperand(0);
4006  if (FoundAnyExtend) {
4007  SDValue NewX = CurDAG->getNode(ISD::ANY_EXTEND, dl, NVT, X);
4008  insertDAGNode(*CurDAG, SDValue(N, 0), NewX);
4009  X = NewX;
4010  }
4011 
4012  SDValue NewCst = CurDAG->getConstant(ShiftedVal, dl, NVT);
4013  insertDAGNode(*CurDAG, SDValue(N, 0), NewCst);
4014  SDValue NewBinOp = CurDAG->getNode(Opcode, dl, NVT, X, NewCst);
4015  insertDAGNode(*CurDAG, SDValue(N, 0), NewBinOp);
4016  SDValue NewSHL = CurDAG->getNode(ISD::SHL, dl, NVT, NewBinOp,
4017  Shift.getOperand(1));
4018  ReplaceNode(N, NewSHL.getNode());
4019  SelectCode(NewSHL.getNode());
4020  return true;
4021 }
4022 
4023 bool X86DAGToDAGISel::matchVPTERNLOG(SDNode *Root, SDNode *ParentA,
4024  SDNode *ParentBC, SDValue A, SDValue B,
4025  SDValue C, uint8_t Imm) {
4026  assert(A.isOperandOf(ParentA));
4027  assert(B.isOperandOf(ParentBC));
4028  assert(C.isOperandOf(ParentBC));
4029 
4030  auto tryFoldLoadOrBCast =
4031  [this](SDNode *Root, SDNode *P, SDValue &L, SDValue &Base, SDValue &Scale,
4032  SDValue &Index, SDValue &Disp, SDValue &Segment) {
4033  if (tryFoldLoad(Root, P, L, Base, Scale, Index, Disp, Segment))
4034  return true;
4035 
4036  // Not a load, check for broadcast which may be behind a bitcast.
4037  if (L.getOpcode() == ISD::BITCAST && L.hasOneUse()) {
4038  P = L.getNode();
4039  L = L.getOperand(0);
4040  }
4041 
4042  if (L.getOpcode() != X86ISD::VBROADCAST_LOAD)
4043  return false;
4044 
4045  // Only 32 and 64 bit broadcasts are supported.
4046  auto *MemIntr = cast<MemIntrinsicSDNode>(L);
4047  unsigned Size = MemIntr->getMemoryVT().getSizeInBits();
4048  if (Size != 32 && Size != 64)
4049  return false;
4050 
4051  return tryFoldBroadcast(Root, P, L, Base, Scale, Index, Disp, Segment);
4052  };
4053 
4054  bool FoldedLoad = false;
4055  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4056  if (tryFoldLoadOrBCast(Root, ParentBC, C, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
4057  FoldedLoad = true;
4058  } else if (tryFoldLoadOrBCast(Root, ParentA, A, Tmp0, Tmp1, Tmp2, Tmp3,
4059  Tmp4)) {
4060  FoldedLoad = true;
4061  std::swap(A, C);
4062  // Swap bits 1/4 and 3/6.
4063  uint8_t OldImm = Imm;
4064  Imm = OldImm & 0xa5;
4065  if (OldImm & 0x02) Imm |= 0x10;
4066  if (OldImm & 0x10) Imm |= 0x02;
4067  if (OldImm & 0x08) Imm |= 0x40;
4068  if (OldImm & 0x40) Imm |= 0x08;
4069  } else if (tryFoldLoadOrBCast(Root, ParentBC, B, Tmp0, Tmp1, Tmp2, Tmp3,
4070  Tmp4)) {
4071  FoldedLoad = true;
4072  std::swap(B, C);
4073  // Swap bits 1/2 and 5/6.
4074  uint8_t OldImm = Imm;
4075  Imm = OldImm & 0x99;
4076  if (OldImm & 0x02) Imm |= 0x04;
4077  if (OldImm & 0x04) Imm |= 0x02;
4078  if (OldImm & 0x20) Imm |= 0x40;
4079  if (OldImm & 0x40) Imm |= 0x20;
4080  }
4081 
4082  SDLoc DL(Root);
4083 
4084  SDValue TImm = CurDAG->getTargetConstant(Imm, DL, MVT::i8);
4085 
4086  MVT NVT = Root->getSimpleValueType(0);
4087 
4088  MachineSDNode *MNode;
4089  if (FoldedLoad) {
4090  SDVTList VTs = CurDAG->getVTList(NVT, MVT::Other);
4091 
4092  unsigned Opc;
4093  if (C.getOpcode() == X86ISD::VBROADCAST_LOAD) {
4094  auto *MemIntr = cast<MemIntrinsicSDNode>(C);
4095  unsigned EltSize = MemIntr->getMemoryVT().getSizeInBits();
4096  assert((EltSize == 32 || EltSize == 64) && "Unexpected broadcast size!");
4097 
4098  bool UseD = EltSize == 32;
4099  if (NVT.is128BitVector())
4100  Opc = UseD ? X86::VPTERNLOGDZ128rmbi : X86::VPTERNLOGQZ128rmbi;
4101  else if (NVT.is256BitVector())
4102  Opc = UseD ? X86::VPTERNLOGDZ256rmbi : X86::VPTERNLOGQZ256rmbi;
4103  else if (NVT.is512BitVector())
4104  Opc = UseD ? X86::VPTERNLOGDZrmbi : X86::VPTERNLOGQZrmbi;
4105  else
4106  llvm_unreachable("Unexpected vector size!");
4107  } else {
4108  bool UseD = NVT.getVectorElementType() == MVT::i32;
4109  if (NVT.is128BitVector())
4110  Opc = UseD ? X86::VPTERNLOGDZ128rmi : X86::VPTERNLOGQZ128rmi;
4111  else if (NVT.is256BitVector())
4112  Opc = UseD ? X86::VPTERNLOGDZ256rmi : X86::VPTERNLOGQZ256rmi;
4113  else if (NVT.is512BitVector())
4114  Opc = UseD ? X86::VPTERNLOGDZrmi : X86::VPTERNLOGQZrmi;
4115  else
4116  llvm_unreachable("Unexpected vector size!");
4117  }
4118 
4119  SDValue Ops[] = {A, B, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, TImm, C.getOperand(0)};
4120  MNode = CurDAG->getMachineNode(Opc, DL, VTs, Ops);
4121 
4122  // Update the chain.
4123  ReplaceUses(C.getValue(1), SDValue(MNode, 1));
4124  // Record the mem-refs
4125  CurDAG->setNodeMemRefs(MNode, {cast<MemSDNode>(C)->getMemOperand()});
4126  } else {
4127  bool UseD = NVT.getVectorElementType() == MVT::i32;
4128  unsigned Opc;
4129  if (NVT.is128BitVector())
4130  Opc = UseD ? X86::VPTERNLOGDZ128rri : X86::VPTERNLOGQZ128rri;
4131  else if (NVT.is256BitVector())
4132  Opc = UseD ? X86::VPTERNLOGDZ256rri : X86::VPTERNLOGQZ256rri;
4133  else if (NVT.is512BitVector())
4134  Opc = UseD ? X86::VPTERNLOGDZrri : X86::VPTERNLOGQZrri;
4135  else
4136  llvm_unreachable("Unexpected vector size!");
4137 
4138  MNode = CurDAG->getMachineNode(Opc, DL, NVT, {A, B, C, TImm});
4139  }
4140 
4141  ReplaceUses(SDValue(Root, 0), SDValue(MNode, 0));
4142  CurDAG->RemoveDeadNode(Root);
4143  return true;
4144 }
4145 
4146 // Try to match two logic ops to a VPTERNLOG.
4147 // FIXME: Handle inverted inputs?
4148 // FIXME: Handle more complex patterns that use an operand more than once?
4149 bool X86DAGToDAGISel::tryVPTERNLOG(SDNode *N) {
4150  MVT NVT = N->getSimpleValueType(0);
4151 
4152  // Make sure we support VPTERNLOG.
4153  if (!NVT.isVector() || !Subtarget->hasAVX512() ||
4154  NVT.getVectorElementType() == MVT::i1)
4155  return false;
4156 
4157  // We need VLX for 128/256-bit.
4158  if (!(Subtarget->hasVLX() || NVT.is512BitVector()))
4159  return false;
4160 
4161  SDValue N0 = N->getOperand(0);
4162  SDValue N1 = N->getOperand(1);
4163 
4164  auto getFoldableLogicOp = [](SDValue Op) {
4165  // Peek through single use bitcast.
4166  if (Op.getOpcode() == ISD::BITCAST && Op.hasOneUse())
4167  Op = Op.getOperand(0);
4168 
4169  if (!Op.hasOneUse())
4170  return SDValue();
4171 
4172  unsigned Opc = Op.getOpcode();
4173  if (Opc == ISD::AND || Opc == ISD::OR || Opc == ISD::XOR ||
4174  Opc == X86ISD::ANDNP)
4175  return Op;
4176 
4177  return SDValue();
4178  };
4179 
4180  SDValue A, FoldableOp;
4181  if ((FoldableOp = getFoldableLogicOp(N1))) {
4182  A = N0;
4183  } else if ((FoldableOp = getFoldableLogicOp(N0))) {
4184  A = N1;
4185  } else
4186  return false;
4187 
4188  SDValue B = FoldableOp.getOperand(0);
4189  SDValue C = FoldableOp.getOperand(1);
4190 
4191  // We can build the appropriate control immediate by performing the logic
4192  // operation we're matching using these constants for A, B, and C.
4193  const uint8_t TernlogMagicA = 0xf0;
4194  const uint8_t TernlogMagicB = 0xcc;
4195  const uint8_t TernlogMagicC = 0xaa;
4196 
4197  uint8_t Imm;
4198  switch (FoldableOp.getOpcode()) {
4199  default: llvm_unreachable("Unexpected opcode!");
4200  case ISD::AND: Imm = TernlogMagicB & TernlogMagicC; break;
4201  case ISD::OR: Imm = TernlogMagicB | TernlogMagicC; break;
4202  case ISD::XOR: Imm = TernlogMagicB ^ TernlogMagicC; break;
4203  case X86ISD::ANDNP: Imm = ~(TernlogMagicB) & TernlogMagicC; break;
4204  }
4205 
4206  switch (N->getOpcode()) {
4207  default: llvm_unreachable("Unexpected opcode!");
4208  case X86ISD::ANDNP:
4209  if (A == N0)
4210  Imm &= ~TernlogMagicA;
4211  else
4212  Imm = ~(Imm) & TernlogMagicA;
4213  break;
4214  case ISD::AND: Imm &= TernlogMagicA; break;
4215  case ISD::OR: Imm |= TernlogMagicA; break;
4216  case ISD::XOR: Imm ^= TernlogMagicA; break;
4217  }
4218 
4219  return matchVPTERNLOG(N, N, FoldableOp.getNode(), A, B, C, Imm);
4220 }
4221 
4222 /// If the high bits of an 'and' operand are known zero, try setting the
4223 /// high bits of an 'and' constant operand to produce a smaller encoding by
4224 /// creating a small, sign-extended negative immediate rather than a large
4225 /// positive one. This reverses a transform in SimplifyDemandedBits that
4226 /// shrinks mask constants by clearing bits. There is also a possibility that
4227 /// the 'and' mask can be made -1, so the 'and' itself is unnecessary. In that
4228 /// case, just replace the 'and'. Return 'true' if the node is replaced.
4229 bool X86DAGToDAGISel::shrinkAndImmediate(SDNode *And) {
4230  // i8 is unshrinkable, i16 should be promoted to i32, and vector ops don't
4231  // have immediate operands.
4232  MVT VT = And->getSimpleValueType(0);
4233  if (VT != MVT::i32 && VT != MVT::i64)
4234  return false;
4235 
4236  auto *And1C = dyn_cast<ConstantSDNode>(And->getOperand(1));
4237  if (!And1C)
4238  return false;
4239 
4240  // Bail out if the mask constant is already negative. It's can't shrink more.
4241  // If the upper 32 bits of a 64 bit mask are all zeros, we have special isel
4242  // patterns to use a 32-bit and instead of a 64-bit and by relying on the
4243  // implicit zeroing of 32 bit ops. So we should check if the lower 32 bits
4244  // are negative too.
4245  APInt MaskVal = And1C->getAPIntValue();
4246  unsigned MaskLZ = MaskVal.countLeadingZeros();
4247  if (!MaskLZ || (VT == MVT::i64 && MaskLZ == 32))
4248  return false;
4249 
4250  // Don't extend into the upper 32 bits of a 64 bit mask.
4251  if (VT == MVT::i64 && MaskLZ >= 32) {
4252  MaskLZ -= 32;
4253  MaskVal = MaskVal.trunc(32);
4254  }
4255 
4256  SDValue And0 = And->getOperand(0);
4257  APInt HighZeros = APInt::getHighBitsSet(MaskVal.getBitWidth(), MaskLZ);
4258  APInt NegMaskVal = MaskVal | HighZeros;
4259 
4260  // If a negative constant would not allow a smaller encoding, there's no need
4261  // to continue. Only change the constant when we know it's a win.
4262  unsigned MinWidth = NegMaskVal.getMinSignedBits();
4263  if (MinWidth > 32 || (MinWidth > 8 && MaskVal.getMinSignedBits() <= 32))
4264  return false;
4265 
4266  // Extend masks if we truncated above.
4267  if (VT == MVT::i64 && MaskVal.getBitWidth() < 64) {
4268  NegMaskVal = NegMaskVal.zext(64);
4269  HighZeros = HighZeros.zext(64);
4270  }
4271 
4272  // The variable operand must be all zeros in the top bits to allow using the
4273  // new, negative constant as the mask.
4274  if (!CurDAG->MaskedValueIsZero(And0, HighZeros))
4275  return false;
4276 
4277  // Check if the mask is -1. In that case, this is an unnecessary instruction
4278  // that escaped earlier analysis.
4279  if (NegMaskVal.isAllOnesValue()) {
4280  ReplaceNode(And, And0.getNode());
4281  return true;
4282  }
4283 
4284  // A negative mask allows a smaller encoding. Create a new 'and' node.
4285  SDValue NewMask = CurDAG->getConstant(NegMaskVal, SDLoc(And), VT);
4286  insertDAGNode(*CurDAG, SDValue(And, 0), NewMask);
4287  SDValue NewAnd = CurDAG->getNode(ISD::AND, SDLoc(And), VT, And0, NewMask);
4288  ReplaceNode(And, NewAnd.getNode());
4289  SelectCode(NewAnd.getNode());
4290  return true;
4291 }
4292 
4293 static unsigned getVPTESTMOpc(MVT TestVT, bool IsTestN, bool FoldedLoad,
4294  bool FoldedBCast, bool Masked) {
4295 #define VPTESTM_CASE(VT, SUFFIX) \
4296 case MVT::VT: \
4297  if (Masked) \
4298  return IsTestN ? X86::VPTESTNM##SUFFIX##k: X86::VPTESTM##SUFFIX##k; \
4299  return IsTestN ? X86::VPTESTNM##SUFFIX : X86::VPTESTM##SUFFIX;
4300 
4301 
4302 #define VPTESTM_BROADCAST_CASES(SUFFIX) \
4303 default: llvm_unreachable("Unexpected VT!"); \
4304 VPTESTM_CASE(v4i32, DZ128##SUFFIX) \
4305 VPTESTM_CASE(v2i64, QZ128##SUFFIX) \
4306 VPTESTM_CASE(v8i32, DZ256##SUFFIX) \
4307 VPTESTM_CASE(v4i64, QZ256##SUFFIX) \
4308 VPTESTM_CASE(v16i32, DZ##SUFFIX) \
4309 VPTESTM_CASE(v8i64, QZ##SUFFIX)
4310 
4311 #define VPTESTM_FULL_CASES(SUFFIX) \
4312 VPTESTM_BROADCAST_CASES(SUFFIX) \
4313 VPTESTM_CASE(v16i8, BZ128##SUFFIX) \
4314 VPTESTM_CASE(v8i16, WZ128##SUFFIX) \
4315 VPTESTM_CASE(v32i8, BZ256##SUFFIX) \
4316 VPTESTM_CASE(v16i16, WZ256##SUFFIX) \
4317 VPTESTM_CASE(v64i8, BZ##SUFFIX) \
4318 VPTESTM_CASE(v32i16, WZ##SUFFIX)
4319 
4320  if (FoldedBCast) {
4321  switch (TestVT.SimpleTy) {
4323  }
4324  }
4325 
4326  if (FoldedLoad) {
4327  switch (TestVT.SimpleTy) {
4328  VPTESTM_FULL_CASES(rm)
4329  }
4330  }
4331 
4332  switch (TestVT.SimpleTy) {
4333  VPTESTM_FULL_CASES(rr)
4334  }
4335 
4336 #undef VPTESTM_FULL_CASES
4337 #undef VPTESTM_BROADCAST_CASES
4338 #undef VPTESTM_CASE
4339 }
4340 
4341 // Try to create VPTESTM instruction. If InMask is not null, it will be used
4342 // to form a masked operation.
4343 bool X86DAGToDAGISel::tryVPTESTM(SDNode *Root, SDValue Setcc,
4344  SDValue InMask) {
4345  assert(Subtarget->hasAVX512() && "Expected AVX512!");
4347  "Unexpected VT!");
4348 
4349  // Look for equal and not equal compares.
4350  ISD::CondCode CC = cast<CondCodeSDNode>(Setcc.getOperand(2))->get();
4351  if (CC != ISD::SETEQ && CC != ISD::SETNE)
4352  return false;
4353 
4354  SDValue SetccOp0 = Setcc.getOperand(0);
4355  SDValue SetccOp1 = Setcc.getOperand(1);
4356 
4357  // Canonicalize the all zero vector to the RHS.
4358  if (ISD::isBuildVectorAllZeros(SetccOp0.getNode()))
4359  std::swap(SetccOp0, SetccOp1);
4360 
4361  // See if we're comparing against zero.
4362  if (!ISD::isBuildVectorAllZeros(SetccOp1.getNode()))
4363  return false;
4364 
4365  SDValue N0 = SetccOp0;
4366 
4367  MVT CmpVT = N0.getSimpleValueType();
4368  MVT CmpSVT = CmpVT.getVectorElementType();
4369 
4370  // Start with both operands the same. We'll try to refine this.
4371  SDValue Src0 = N0;
4372  SDValue Src1 = N0;
4373 
4374  {
4375  // Look through single use bitcasts.
4376  SDValue N0Temp = N0;
4377  if (N0Temp.getOpcode() == ISD::BITCAST && N0Temp.hasOneUse())
4378  N0Temp = N0.getOperand(0);
4379 
4380  // Look for single use AND.
4381  if (N0Temp.getOpcode() == ISD::AND && N0Temp.hasOneUse()) {
4382  Src0 = N0Temp.getOperand(0);
4383  Src1 = N0Temp.getOperand(1);
4384  }
4385  }
4386 
4387  // Without VLX we need to widen the operation.
4388  bool Widen = !Subtarget->hasVLX() && !CmpVT.is512BitVector();
4389 
4390  auto tryFoldLoadOrBCast = [&](SDNode *Root, SDNode *P, SDValue &L,
4391  SDValue &Base, SDValue &Scale, SDValue &Index,
4392  SDValue &Disp, SDValue &Segment) {
4393  // If we need to widen, we can't fold the load.
4394  if (!Widen)
4395  if (tryFoldLoad(Root, P, L, Base, Scale, Index, Disp, Segment))
4396  return true;
4397 
4398  // If we didn't fold a load, try to match broadcast. No widening limitation
4399  // for this. But only 32 and 64 bit types are supported.
4400  if (CmpSVT != MVT::i32 && CmpSVT != MVT::i64)
4401  return false;
4402 
4403  // Look through single use bitcasts.
4404  if (L.getOpcode() == ISD::BITCAST && L.hasOneUse()) {
4405  P = L.getNode();
4406  L = L.getOperand(0);
4407  }
4408 
4409  if (L.getOpcode() != X86ISD::VBROADCAST_LOAD)
4410  return false;
4411 
4412  auto *MemIntr = cast<MemIntrinsicSDNode>(L);
4413  if (MemIntr->getMemoryVT().getSizeInBits() != CmpSVT.getSizeInBits())
4414  return false;
4415 
4416  return tryFoldBroadcast(Root, P, L, Base, Scale, Index, Disp, Segment);
4417  };
4418 
4419  // We can only fold loads if the sources are unique.
4420  bool CanFoldLoads = Src0 != Src1;
4421 
4422  bool FoldedLoad = false;
4423  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4424  if (CanFoldLoads) {
4425  FoldedLoad = tryFoldLoadOrBCast(Root, N0.getNode(), Src1, Tmp0, Tmp1, Tmp2,
4426  Tmp3, Tmp4);
4427  if (!FoldedLoad) {
4428  // And is commutative.
4429  FoldedLoad = tryFoldLoadOrBCast(Root, N0.getNode(), Src0, Tmp0, Tmp1,
4430  Tmp2, Tmp3, Tmp4);
4431  if (FoldedLoad)
4432  std::swap(Src0, Src1);
4433  }
4434  }
4435 
4436  bool FoldedBCast = FoldedLoad && Src1.getOpcode() == X86ISD::VBROADCAST_LOAD;
4437 
4438  bool IsMasked = InMask.getNode() != nullptr;
4439 
4440  SDLoc dl(Root);
4441 
4442  MVT ResVT = Setcc.getSimpleValueType();
4443  MVT MaskVT = ResVT;
4444  if (Widen) {
4445  // Widen the inputs using insert_subreg or copy_to_regclass.
4446  unsigned Scale = CmpVT.is128BitVector() ? 4 : 2;
4447  unsigned SubReg = CmpVT.is128BitVector() ? X86::sub_xmm : X86::sub_ymm;
4448  unsigned NumElts = CmpVT.getVectorNumElements() * Scale;
4449  CmpVT = MVT::getVectorVT(CmpSVT, NumElts);
4450  MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
4451  SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, dl,
4452  CmpVT), 0);
4453  Src0 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src0);
4454 
4455  if (!FoldedBCast)
4456  Src1 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src1);
4457 
4458  if (IsMasked) {
4459  // Widen the mask.
4460  unsigned RegClass = TLI->getRegClassFor(MaskVT)->getID();
4461  SDValue RC = CurDAG->getTargetConstant(RegClass, dl, MVT::i32);
4462  InMask = SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
4463  dl, MaskVT, InMask, RC), 0);
4464  }
4465  }
4466 
4467  bool IsTestN = CC == ISD::SETEQ;
4468  unsigned Opc = getVPTESTMOpc(CmpVT, IsTestN, FoldedLoad, FoldedBCast,
4469  IsMasked);
4470 
4471  MachineSDNode *CNode;
4472  if (FoldedLoad) {
4473  SDVTList VTs = CurDAG->getVTList(MaskVT, MVT::Other);
4474 
4475  if (IsMasked) {
4476  SDValue Ops[] = { InMask, Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4,
4477  Src1.getOperand(0) };
4478  CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4479  } else {
4480  SDValue Ops[] = { Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4,
4481  Src1.getOperand(0) };
4482  CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4483  }
4484 
4485  // Update the chain.
4486  ReplaceUses(Src1.getValue(1), SDValue(CNode, 1));
4487  // Record the mem-refs
4488  CurDAG->setNodeMemRefs(CNode, {cast<MemSDNode>(Src1)->getMemOperand()});
4489  } else {
4490  if (IsMasked)
4491  CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, InMask, Src0, Src1);
4492  else
4493  CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, Src0, Src1);
4494  }
4495 
4496  // If we widened, we need to shrink the mask VT.
4497  if (Widen) {
4498  unsigned RegClass = TLI->getRegClassFor(ResVT)->getID();
4499  SDValue RC = CurDAG->getTargetConstant(RegClass, dl, MVT::i32);
4500  CNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
4501  dl, ResVT, SDValue(CNode, 0), RC);
4502  }
4503 
4504  ReplaceUses(SDValue(Root, 0), SDValue(CNode, 0));
4505  CurDAG->RemoveDeadNode(Root);
4506  return true;
4507 }
4508 
4509 // Try to match the bitselect pattern (or (and A, B), (andn A, C)). Turn it
4510 // into vpternlog.
4511 bool X86DAGToDAGISel::tryMatchBitSelect(SDNode *N) {
4512  assert(N->getOpcode() == ISD::OR && "Unexpected opcode!");
4513 
4514  MVT NVT = N->getSimpleValueType(0);
4515 
4516  // Make sure we support VPTERNLOG.
4517  if (!NVT.isVector() || !Subtarget->hasAVX512())
4518  return false;
4519 
4520  // We need VLX for 128/256-bit.
4521  if (!(Subtarget->hasVLX() || NVT.is512BitVector()))
4522  return false;
4523 
4524  SDValue N0 = N->getOperand(0);
4525  SDValue N1 = N->getOperand(1);
4526 
4527  // Canonicalize AND to LHS.
4528  if (N1.getOpcode() == ISD::AND)
4529  std::swap(N0, N1);
4530 
4531  if (N0.getOpcode() != ISD::AND ||
4532  N1.getOpcode() != X86ISD::ANDNP ||
4533  !N0.hasOneUse() || !N1.hasOneUse())
4534  return false;
4535 
4536  // ANDN is not commutable, use it to pick down A and C.
4537  SDValue A = N1.getOperand(0);
4538  SDValue C = N1.getOperand(1);
4539 
4540  // AND is commutable, if one operand matches A, the other operand is B.
4541  // Otherwise this isn't a match.
4542  SDValue B;
4543  if (N0.getOperand(0) == A)
4544  B = N0.getOperand(1);
4545  else if (N0.getOperand(1) == A)
4546  B = N0.getOperand(0);
4547  else
4548  return false;
4549 
4550  SDLoc dl(N);
4551  SDValue Imm = CurDAG->getTargetConstant(0xCA, dl, MVT::i8);
4552  SDValue Ternlog = CurDAG->getNode(X86ISD::VPTERNLOG, dl, NVT, A, B, C, Imm);
4553  ReplaceNode(N, Ternlog.getNode());
4554 
4555  return matchVPTERNLOG(Ternlog.getNode(), Ternlog.getNode(), Ternlog.getNode(),
4556  A, B, C, 0xCA);
4557 }
4558 
4559 void X86DAGToDAGISel::Select(SDNode *Node) {
4560  MVT NVT = Node->getSimpleValueType(0);
4561  unsigned Opcode = Node->getOpcode();
4562  SDLoc dl(Node);
4563 
4564  if (Node->isMachineOpcode()) {
4565  LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
4566  Node->setNodeId(-1);
4567  return; // Already selected.
4568  }
4569 
4570  switch (Opcode) {
4571  default: break;
4572  case ISD::INTRINSIC_W_CHAIN: {
4573  unsigned IntNo = Node->getConstantOperandVal(1);
4574  switch (IntNo) {
4575  default: break;
4576  case Intrinsic::x86_encodekey128:
4577  case Intrinsic::x86_encodekey256: {
4578  if (!Subtarget->hasKL())
4579  break;
4580 
4581  unsigned Opcode;
4582  switch (IntNo) {
4583  default: llvm_unreachable("Impossible intrinsic");
4584  case Intrinsic::x86_encodekey128: Opcode = X86::ENCODEKEY128; break;
4585  case Intrinsic::x86_encodekey256: Opcode = X86::ENCODEKEY256; break;
4586  }
4587 
4588  SDValue Chain = Node->getOperand(0);
4589  Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM0, Node->getOperand(3),
4590  SDValue());
4591  if (Opcode == X86::ENCODEKEY256)
4592  Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM1, Node->getOperand(4),
4593  Chain.getValue(1));
4594 
4595  MachineSDNode *Res = CurDAG->getMachineNode(
4596  Opcode, dl, Node->getVTList(),
4597  {Node->getOperand(2), Chain, Chain.getValue(1)});
4598  ReplaceNode(Node, Res);
4599  return;
4600  }
4601  case Intrinsic::x86_tileloadd64_internal: {
4602  if (!Subtarget->hasAMXTILE())
4603  break;
4604  unsigned Opc = X86::PTILELOADDV;
4605  // _tile_loadd_internal(row, col, buf, STRIDE)
4606  SDValue Base = Node->getOperand(4);
4607  SDValue Scale = getI8Imm(1, dl);
4608  SDValue Index = Node->getOperand(5);
4609  SDValue Disp = CurDAG->getTargetConstant(0, dl, MVT::i32);
4610  SDValue Segment = CurDAG->getRegister(0, MVT::i16);
4611  SDValue Chain = Node->getOperand(0);
4612  MachineSDNode *CNode;
4613  SDValue Ops[] = {Node->getOperand(2),
4614  Node->getOperand(3),
4615  Base,
4616  Scale,
4617  Index,
4618  Disp,
4619  Segment,
4620  Chain};
4621  CNode = CurDAG->getMachineNode(Opc, dl, {MVT::x86amx, MVT::Other}, Ops);
4622  ReplaceNode(Node, CNode);
4623  return;
4624  }
4625  }
4626  break;
4627  }
4628  case ISD::INTRINSIC_VOID: {
4629  unsigned IntNo = Node->getConstantOperandVal(1);
4630  switch (IntNo) {
4631  default: break;
4632  case Intrinsic::x86_sse3_monitor:
4633  case Intrinsic::x86_monitorx:
4634  case Intrinsic::x86_clzero: {
4635  bool Use64BitPtr = Node->getOperand(2).getValueType() == MVT::i64;
4636 
4637  unsigned Opc = 0;
4638  switch (IntNo) {
4639  default: llvm_unreachable("Unexpected intrinsic!");
4640  case Intrinsic::x86_sse3_monitor:
4641  if (!Subtarget->hasSSE3())
4642  break;
4643  Opc = Use64BitPtr ? X86::MONITOR64rrr : X86::MONITOR32rrr;
4644  break;
4645  case Intrinsic::x86_monitorx:
4646  if (!Subtarget->hasMWAITX())
4647  break;
4648  Opc = Use64BitPtr ? X86::MONITORX64rrr : X86::MONITORX32rrr;
4649  break;
4650  case Intrinsic::x86_clzero:
4651  if (!Subtarget->hasCLZERO())
4652  break;
4653  Opc = Use64BitPtr ? X86::CLZERO64r : X86::CLZERO32r;
4654  break;
4655  }
4656 
4657  if (Opc) {
4658  unsigned PtrReg = Use64BitPtr ? X86::RAX : X86::EAX;
4659  SDValue Chain = CurDAG->getCopyToReg(Node->getOperand(0), dl, PtrReg,
4660  Node->getOperand(2), SDValue());
4661  SDValue InFlag = Chain.getValue(1);
4662 
4663  if (IntNo == Intrinsic::x86_sse3_monitor ||
4664  IntNo == Intrinsic::x86_monitorx) {
4665  // Copy the other two operands to ECX and EDX.
4666  Chain = CurDAG->getCopyToReg(Chain, dl, X86::ECX, Node->getOperand(3),
4667  InFlag);
4668  InFlag = Chain.getValue(1);
4669  Chain = CurDAG->getCopyToReg(Chain, dl, X86::EDX, Node->getOperand(4),
4670  InFlag);
4671  InFlag = Chain.getValue(1);
4672  }
4673 
4674  MachineSDNode *CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
4675  { Chain, InFlag});
4676  ReplaceNode(Node, CNode);
4677  return;
4678  }
4679 
4680  break;
4681  }
4682  case Intrinsic::x86_tilestored64_internal: {
4683  unsigned Opc = X86::PTILESTOREDV;
4684  // _tile_stored_internal(row, col, buf, STRIDE, c)
4685  SDValue Base = Node->getOperand(4);
4686  SDValue Scale = getI8Imm(1, dl);
4687  SDValue Index = Node->getOperand(5);
4688  SDValue Disp = CurDAG->getTargetConstant(0, dl, MVT::i32);
4689  SDValue Segment = CurDAG->getRegister(0, MVT::i16);
4690  SDValue Chain = Node->getOperand(0);
4691  MachineSDNode *CNode;
4692  SDValue Ops[] = {Node->getOperand(2),
4693  Node->getOperand(3),
4694  Base,
4695  Scale,
4696  Index,
4697  Disp,
4698  Segment,
4699  Node->getOperand(6),
4700  Chain};
4701  CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
4702  ReplaceNode(Node, CNode);
4703  return;
4704  }
4705  case Intrinsic::x86_tileloadd64:
4706  case Intrinsic::x86_tileloaddt164:
4707  case Intrinsic::x86_tilestored64: {
4708  if (!Subtarget->hasAMXTILE())
4709  break;
4710  unsigned Opc;
4711  switch (IntNo) {
4712  default: llvm_unreachable("Unexpected intrinsic!");
4713  case Intrinsic::x86_tileloadd64: Opc = X86::PTILELOADD; break;
4714  case Intrinsic::x86_tileloaddt164: Opc = X86::PTILELOADDT1; break;
4715  case Intrinsic::x86_tilestored64: Opc = X86::PTILESTORED; break;
4716  }
4717  // FIXME: Match displacement and scale.
4718  unsigned TIndex = Node->getConstantOperandVal(2);
4719  SDValue TReg = getI8Imm(TIndex, dl);
4720  SDValue Base = Node->getOperand(3);
4721  SDValue Scale = getI8Imm(1, dl);
4722  SDValue Index = Node->getOperand(4);
4723  SDValue Disp = CurDAG->getTargetConstant(0, dl, MVT::i32);
4724  SDValue Segment = CurDAG->getRegister(0, MVT::i16);
4725  SDValue Chain = Node->getOperand(0);
4726  MachineSDNode *CNode;
4727  if (Opc == X86::PTILESTORED) {
4728  SDValue Ops[] = { Base, Scale, Index, Disp, Segment, TReg, Chain };
4729  CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
4730  } else {
4731  SDValue Ops[] = { TReg, Base, Scale, Index, Disp, Segment, Chain };
4732  CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
4733  }
4734  ReplaceNode(Node, CNode);
4735  return;
4736  }
4737  }
4738  break;
4739  }
4740  case ISD::BRIND:
4741  case X86ISD::NT_BRIND: {
4742  if (Subtarget->isTargetNaCl())
4743  // NaCl has its own pass where jmp %r32 are converted to jmp %r64. We
4744  // leave the instruction alone.
4745  break;
4746  if (Subtarget->isTarget64BitILP32()) {
4747  // Converts a 32-bit register to a 64-bit, zero-extended version of
4748  // it. This is needed because x86-64 can do many things, but jmp %r32
4749  // ain't one of them.
4750  SDValue Target = Node->getOperand(1);
4751  assert(Target.getValueType() == MVT::i32 && "Unexpected VT!");
4752  SDValue ZextTarget = CurDAG->getZExtOrTrunc(Target, dl, MVT::i64);
4753  SDValue Brind = CurDAG->getNode(Opcode, dl, MVT::Other,
4754  Node->getOperand(0), ZextTarget);
4755  ReplaceNode(Node, Brind.getNode());
4756  SelectCode(ZextTarget.getNode());
4757  SelectCode(Brind.getNode());
4758  return;
4759  }
4760  break;
4761  }
4762  case X86ISD::GlobalBaseReg:
4763  ReplaceNode(Node, getGlobalBaseReg());
4764  return;
4765 
4766  case ISD::BITCAST:
4767  // Just drop all 128/256/512-bit bitcasts.
4768  if (NVT.is512BitVector() || NVT.is256BitVector() || NVT.is128BitVector() ||
4769  NVT == MVT::f128) {
4770  ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
4771  CurDAG->RemoveDeadNode(Node);
4772  return;
4773  }
4774  break;
4775 
4776  case ISD::SRL:
4777  if (matchBitExtract(Node))
4778  return;
4780  case ISD::SRA:
4781  case ISD::SHL:
4782  if (tryShiftAmountMod(Node))
4783  return;
4784  break;
4785 
4786  case X86ISD::VPTERNLOG: {
4787  uint8_t Imm = cast<ConstantSDNode>(Node->getOperand(3))->getZExtValue();
4788  if (matchVPTERNLOG(Node, Node, Node, Node->getOperand(0),
4789  Node->getOperand(1), Node->getOperand(2), Imm))
4790  return;
4791  break;
4792  }
4793 
4794  case X86ISD::ANDNP:
4795  if (tryVPTERNLOG(Node))
4796  return;
4797  break;
4798 
4799  case ISD::AND:
4800  if (NVT.isVector() && NVT.getVectorElementType() == MVT::i1) {
4801  // Try to form a masked VPTESTM. Operands can be in either order.
4802  SDValue N0 = Node->getOperand(0);
4803  SDValue N1 = Node->getOperand(1);
4804  if (N0.getOpcode() == ISD::SETCC && N0.hasOneUse() &&
4805  tryVPTESTM(Node, N0, N1))
4806  return;
4807  if (N1.getOpcode() == ISD::SETCC && N1.hasOneUse() &&
4808  tryVPTESTM(Node, N1, N0))
4809  return;
4810  }
4811 
4812  if (MachineSDNode *NewNode = matchBEXTRFromAndImm(Node)) {
4813  ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0));
4814  CurDAG->RemoveDeadNode(Node);
4815  return;
4816  }
4817  if (matchBitExtract(Node))
4818  return;
4819  if (AndImmShrink && shrinkAndImmediate(Node))
4820  return;
4821 
4823  case ISD::OR:
4824  case ISD::XOR:
4825  if (tryShrinkShlLogicImm(Node))
4826  return;
4827  if (Opcode == ISD::OR && tryMatchBitSelect(Node))
4828  return;
4829  if (tryVPTERNLOG(Node))
4830  return;
4831 
4833  case ISD::ADD:
4834  case ISD::SUB: {
4835  // Try to avoid folding immediates with multiple uses for optsize.
4836  // This code tries to select to register form directly to avoid going
4837  // through the isel table which might fold the immediate. We can't change
4838  // the patterns on the add/sub/and/or/xor with immediate paterns in the
4839  // tablegen files to check immediate use count without making the patterns
4840  // unavailable to the fast-isel table.
4841  if (!CurDAG->shouldOptForSize())
4842  break;
4843 
4844  // Only handle i8/i16/i32/i64.
4845  if (NVT != MVT::i8 && NVT != MVT::i16 && NVT != MVT::i32 && NVT != MVT::i64)
4846  break;
4847 
4848  SDValue N0 = Node->getOperand(0);
4849  SDValue N1 = Node->getOperand(1);
4850 
4851  ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
4852  if (!Cst)
4853  break;
4854 
4855  int64_t Val = Cst->getSExtValue();
4856 
4857  // Make sure its an immediate that is considered foldable.
4858  // FIXME: Handle unsigned 32 bit immediates for 64-bit AND.
4859  if (!isInt<8>(Val) && !isInt<32>(Val))
4860  break;
4861 
4862  // If this can match to INC/DEC, let it go.
4863  if (Opcode == ISD::ADD && (Val == 1 || Val == -1))
4864  break;
4865 
4866  // Check if we should avoid folding this immediate.
4867  if (!shouldAvoidImmediateInstFormsForSize(N1.getNode()))
4868  break;
4869 
4870  // We should not fold the immediate. So we need a register form instead.
4871  unsigned ROpc, MOpc;
4872  switch (NVT.SimpleTy) {
4873  default: llvm_unreachable("Unexpected VT!");
4874  case MVT::i8:
4875  switch (Opcode) {
4876  default: llvm_unreachable("Unexpected opcode!");
4877  case ISD::ADD: ROpc = X86::ADD8rr; MOpc = X86::ADD8rm; break;
4878  case ISD::SUB: ROpc = X86::SUB8rr; MOpc = X86::SUB8rm; break;
4879  case ISD::AND: ROpc = X86::AND8rr; MOpc = X86::AND8rm; break;
4880  case ISD::OR: ROpc = X86::OR8rr; MOpc = X86::OR8rm; break;
4881  case ISD::XOR: ROpc = X86::XOR8rr; MOpc = X86::XOR8rm; break;
4882  }
4883  break;
4884  case MVT::i16:
4885  switch (Opcode) {
4886  default: llvm_unreachable("Unexpected opcode!");
4887  case ISD::ADD: ROpc = X86::ADD16rr; MOpc = X86::ADD16rm; break;
4888  case ISD::SUB: ROpc = X86::SUB16rr; MOpc = X86::SUB16rm; break;
4889  case ISD::AND: ROpc = X86::AND16rr; MOpc = X86::AND16rm; break;
4890  case ISD::OR: ROpc = X86::OR16rr; MOpc = X86::OR16rm; break;
4891  case ISD::XOR: ROpc = X86::XOR16rr; MOpc = X86::XOR16rm; break;
4892  }
4893  break;
4894  case MVT::i32:
4895  switch (Opcode) {
4896  default: llvm_unreachable("Unexpected opcode!");
4897  case ISD::ADD: ROpc = X86::ADD32rr; MOpc = X86::ADD32rm; break;
4898  case ISD::SUB: ROpc = X86::SUB32rr; MOpc = X86::SUB32rm; break;
4899  case ISD::AND: ROpc = X86::AND32rr; MOpc = X86::AND32rm; break;
4900  case ISD::OR: ROpc = X86::OR32rr; MOpc = X86::OR32rm; break;
4901  case ISD::XOR: ROpc = X86::XOR32rr; MOpc = X86::XOR32rm; break;
4902  }
4903  break;
4904  case MVT::i64:
4905  switch (Opcode) {
4906  default: llvm_unreachable("Unexpected opcode!");
4907  case ISD::ADD: ROpc = X86::ADD64rr; MOpc = X86::ADD64rm; break;
4908  case ISD::SUB: ROpc = X86::SUB64rr; MOpc = X86::SUB64rm; break;
4909  case ISD::AND: ROpc = X86::AND64rr; MOpc = X86::AND64rm; break;
4910  case ISD::OR: ROpc = X86::OR64rr; MOpc = X86::OR64rm; break;
4911  case ISD::XOR: ROpc = X86::XOR64rr; MOpc = X86::XOR64rm; break;
4912  }
4913  break;
4914  }
4915 
4916  // Ok this is a AND/OR/XOR/ADD/SUB with constant.
4917 
4918  // If this is a not a subtract, we can still try to fold a load.
4919  if (Opcode != ISD::SUB) {
4920  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4921  if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
4922  SDValue Ops[] = { N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
4923  SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
4924  MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
4925  // Update the chain.
4926  ReplaceUses(N0.getValue(1), SDValue(CNode, 2));
4927  // Record the mem-refs
4928  CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N0)->getMemOperand()});
4929  ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
4930  CurDAG->RemoveDeadNode(Node);
4931  return;
4932  }
4933  }
4934 
4935  CurDAG->SelectNodeTo(Node, ROpc, NVT, MVT::i32, N0, N1);
4936  return;
4937  }
4938 
4939  case X86ISD::SMUL:
4940  // i16/i32/i64 are handled with isel patterns.
4941  if (NVT != MVT::i8)
4942  break;
4944  case X86ISD::UMUL: {
4945  SDValue N0 = Node->getOperand(0);
4946  SDValue N1 = Node->getOperand(1);
4947 
4948  unsigned LoReg, ROpc, MOpc;
4949  switch (NVT.SimpleTy) {
4950  default: llvm_unreachable("Unsupported VT!");
4951  case MVT::i8:
4952  LoReg = X86::AL;
4953  ROpc = Opcode == X86ISD::SMUL ? X86::IMUL8r : X86::MUL8r;
4954  MOpc = Opcode == X86ISD::SMUL ? X86::IMUL8m : X86::MUL8m;
4955  break;
4956  case MVT::i16:
4957  LoReg = X86::AX;
4958  ROpc = X86::MUL16r;
4959  MOpc = X86::MUL16m;
4960  break;
4961  case MVT::i32:
4962  LoReg = X86::EAX;
4963  ROpc = X86::MUL32r;
4964  MOpc = X86::MUL32m;
4965  break;
4966  case MVT::i64:
4967  LoReg = X86::RAX;
4968  ROpc = X86::MUL64r;
4969  MOpc = X86::MUL64m;
4970  break;
4971  }
4972 
4973  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4974  bool FoldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4975  // Multiply is commutative.
4976  if (!FoldedLoad) {
4977  FoldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4978  if (FoldedLoad)
4979  std::swap(N0, N1);
4980  }
4981 
4982  SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
4983  N0, SDValue()).getValue(1);
4984 
4985  MachineSDNode *CNode;
4986  if (FoldedLoad) {
4987  // i16/i32/i64 use an instruction that produces a low and high result even
4988  // though only the low result is used.
4989  SDVTList VTs;
4990  if (NVT == MVT::i8)
4991  VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
4992  else
4993  VTs = CurDAG->getVTList(NVT, NVT, MVT::i32, MVT::Other);
4994 
4995  SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
4996  InFlag };
4997  CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
4998 
4999  // Update the chain.
5000  ReplaceUses(N1.getValue(1), SDValue(CNode, NVT == MVT::i8 ? 2 : 3));
5001  // Record the mem-refs
5002  CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
5003  } else {
5004  // i16/i32/i64 use an instruction that produces a low and high result even
5005  // though only the low result is used.
5006  SDVTList VTs;
5007  if (NVT == MVT::i8)
5008  VTs = CurDAG->getVTList(NVT, MVT::i32);
5009  else
5010  VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
5011 
5012  CNode = CurDAG->getMachineNode(ROpc, dl, VTs, {N1, InFlag});
5013  }
5014 
5015  ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
5016  ReplaceUses(SDValue(Node, 1), SDValue(CNode, NVT == MVT::i8 ? 1 : 2));
5017  CurDAG->RemoveDeadNode(Node);
5018  return;
5019  }
5020 
5021  case ISD::SMUL_LOHI:
5022  case ISD::UMUL_LOHI: {
5023  SDValue N0 = Node->getOperand(0);
5024  SDValue N1 = Node->getOperand(1);
5025 
5026  unsigned Opc, MOpc;
5027  unsigned LoReg, HiReg;
5028  bool IsSigned = Opcode == ISD::SMUL_LOHI;
5029  bool UseMULX = !IsSigned && Subtarget->hasBMI2();
5030  bool UseMULXHi = UseMULX && SDValue(Node, 0).use_empty();
5031  switch (NVT.SimpleTy) {
5032  default: llvm_unreachable("Unsupported VT!");
5033  case MVT::i32:
5034  Opc = UseMULXHi ? X86::MULX32Hrr :
5035  UseMULX ? X86::MULX32rr :
5036  IsSigned ? X86::IMUL32r : X86::MUL32r;
5037  MOpc = UseMULXHi ? X86::MULX32Hrm :
5038  UseMULX ? X86::MULX32rm :
5039  IsSigned ? X86::IMUL32m : X86::MUL32m;
5040  LoReg = UseMULX ? X86::EDX : X86::EAX;
5041  HiReg = X86::EDX;
5042  break;
5043  case MVT::i64:
5044  Opc = UseMULXHi ? X86::MULX64Hrr :
5045  UseMULX ? X86::MULX64rr :
5046  IsSigned ? X86::IMUL64r : X86::MUL64r;
5047  MOpc = UseMULXHi ? X86::MULX64Hrm :
5048  UseMULX ? X86::MULX64rm :
5049  IsSigned ? X86::IMUL64m : X86::MUL64m;
5050  LoReg = UseMULX ? X86::RDX : X86::RAX;
5051  HiReg = X86::RDX;
5052  break;
5053  }
5054 
5055  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
5056  bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
5057  // Multiply is commmutative.
5058  if (!foldedLoad) {
5059  foldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
5060  if (foldedLoad)
5061  std::swap(N0, N1);
5062  }
5063 
5064  SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
5065  N0, SDValue()).getValue(1);
5066  SDValue ResHi, ResLo;
5067  if (foldedLoad) {
5068  SDValue Chain;
5069  MachineSDNode *CNode = nullptr;
5070  SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
5071  InFlag };
5072  if (UseMULXHi) {
5073  SDVTList VTs = CurDAG->getVTList(NVT, MVT::Other);
5074  CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
5075  ResHi = SDValue(CNode, 0);
5076  Chain = SDValue(CNode, 1);
5077  } else if (UseMULX) {
5078  SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other);
5079  CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
5080  ResHi = SDValue(CNode, 0);
5081  ResLo = SDValue(CNode, 1);
5082  Chain = SDValue(CNode, 2);
5083  } else {
5084  SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
5085  CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
5086  Chain = SDValue(CNode, 0);
5087  InFlag = SDValue(CNode, 1);
5088  }
5089 
5090  // Update the chain.
5091  ReplaceUses(N1.getValue(1), Chain);
5092  // Record the mem-refs
5093  CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
5094  } else {
5095  SDValue Ops[] = { N1, InFlag };
5096  if (UseMULXHi) {
5097  SDVTList VTs = CurDAG->getVTList(NVT);
5098  SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
5099  ResHi = SDValue(CNode, 0);
5100  } else if (UseMULX) {
5101  SDVTList VTs = CurDAG->getVTList(NVT, NVT);
5102  SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
5103  ResHi = SDValue(CNode, 0);
5104  ResLo = SDValue(CNode, 1);
5105  } else {
5106  SDVTList VTs = CurDAG->getVTList(MVT::Glue);
5107  SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
5108  InFlag = SDValue(CNode, 0);
5109  }
5110  }
5111 
5112  // Copy the low half of the result, if it is needed.
5113  if (!SDValue(Node, 0).use_empty()) {
5114  if (!ResLo) {
5115  assert(LoReg && "Register for low half is not defined!");
5116  ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg,
5117  NVT, InFlag);
5118  InFlag = ResLo.getValue(2);
5119  }
5120  ReplaceUses(SDValue(Node, 0), ResLo);
5121  LLVM_DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG);
5122  dbgs() << '\n');
5123  }
5124  // Copy the high half of the result, if it is needed.
5125  if (!SDValue(Node, 1).use_empty()) {
5126  if (!ResHi) {
5127  assert(HiReg && "Register for high half is not defined!");
5128  ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg,
5129  NVT, InFlag);
5130  InFlag = ResHi.getValue(2);
5131  }
5132  ReplaceUses(SDValue(Node, 1), ResHi);
5133  LLVM_DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG);
5134  dbgs() << '\n');
5135  }
5136 
5137  CurDAG->RemoveDeadNode(Node);
5138  return;
5139  }
5140 
5141  case ISD::SDIVREM:
5142  case ISD::UDIVREM: {
5143  SDValue N0 = Node->getOperand(0);
5144  SDValue N1 = Node->getOperand(1);
5145 
5146  unsigned ROpc, MOpc;
5147  bool isSigned = Opcode == ISD::SDIVREM;
5148  if (!isSigned) {
5149  switch (NVT.SimpleTy) {
5150  default: llvm_unreachable("Unsupported VT!");
5151  case MVT::i8: ROpc = X86::DIV8r; MOpc = X86::DIV8m; break;
5152  case MVT::i16: ROpc = X86::DIV16r; MOpc = X86::DIV16m; break;
5153  case MVT::i32: ROpc = X86::DIV32r; MOpc = X86::DIV32m; break;
5154  case MVT::i64: ROpc = X86::DIV64r; MOpc = X86::DIV64m; break;
5155  }
5156  } else {
5157  switch (NVT.SimpleTy) {
5158  default: llvm_unreachable("Unsupported VT!");
5159  case MVT::i8: ROpc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
5160  case MVT::i16: ROpc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
5161  case MVT::i32: ROpc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
5162  case MVT::i64: ROpc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
5163  }
5164  }
5165 
5166  unsigned LoReg, HiReg, ClrReg;
5167  unsigned SExtOpcode;
5168  switch (NVT.SimpleTy) {
5169  default: llvm_unreachable("Unsupported VT!");
5170  case MVT::i8:
5171  LoReg = X86::AL; ClrReg = HiReg = X86::AH;
5172  SExtOpcode = 0; // Not used.
5173  break;
5174  case MVT::i16:
5175  LoReg = X86::AX; HiReg = X86::DX;
5176  ClrReg = X86::DX;
5177  SExtOpcode = X86::CWD;
5178  break;
5179  case MVT::i32:
5180  LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
5181  SExtOpcode = X86::CDQ;
5182  break;
5183  case MVT::i64:
5184  LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
5185  SExtOpcode = X86::CQO;
5186  break;
5187  }
5188 
5189  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
5190  bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
5191  bool signBitIsZero = CurDAG->SignBitIsZero(N0);
5192 
5193  SDValue InFlag;
5194  if (NVT == MVT::i8) {
5195  // Special case for div8, just use a move with zero extension to AX to
5196  // clear the upper 8 bits (AH).
5197  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain;
5198  MachineSDNode *Move;
5199  if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
5200  SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
5201  unsigned Opc = (isSigned && !signBitIsZero) ? X86::MOVSX16rm8
5202  : X86::MOVZX16rm8;
5203  Move = CurDAG->getMachineNode(Opc, dl, MVT::i16, MVT::Other, Ops);
5204  Chain = SDValue(Move, 1);
5205  ReplaceUses(N0.getValue(1), Chain);
5206  // Record the mem-refs
5207  CurDAG->setNodeMemRefs(Move, {cast<LoadSDNode>(N0)->getMemOperand()});
5208  } else {
5209  unsigned Opc = (isSigned && !signBitIsZero) ? X86::MOVSX16rr8