LLVM  10.0.0svn
WebAssemblyISelLowering.cpp
Go to the documentation of this file.
1 //=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the WebAssemblyTargetLowering class.
11 ///
12 //===----------------------------------------------------------------------===//
13 
17 #include "WebAssemblySubtarget.h"
19 #include "llvm/CodeGen/Analysis.h"
27 #include "llvm/IR/DiagnosticInfo.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/Support/Debug.h"
35 using namespace llvm;
36 
37 #define DEBUG_TYPE "wasm-lower"
38 
40  const TargetMachine &TM, const WebAssemblySubtarget &STI)
41  : TargetLowering(TM), Subtarget(&STI) {
42  auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
43 
44  // Booleans always contain 0 or 1.
46  // Except in SIMD vectors
48  // We don't know the microarchitecture here, so just reduce register pressure.
50  // Tell ISel that we have a stack pointer.
52  Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
53  // Set up the register classes.
54  addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
55  addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
56  addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
57  addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
58  if (Subtarget->hasSIMD128()) {
59  addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
60  addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
61  addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
62  addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
63  }
64  if (Subtarget->hasUnimplementedSIMD128()) {
65  addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
66  addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
67  }
68  // Compute derived properties from the register classes.
70 
76 
77  // Take the default expansion for va_arg, va_copy, and va_end. There is no
78  // default action for va_start, so we do that custom.
83 
84  for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
85  // Don't expand the floating-point types to constant pools.
87  // Expand floating-point comparisons.
88  for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
91  // Expand floating-point library function operators.
92  for (auto Op :
95  // Note supported floating-point library function operators that otherwise
96  // default to expand.
97  for (auto Op :
100  // Support minimum and maximum, which otherwise default to expand.
103  // WebAssembly currently has no builtin f16 support.
108  }
109 
110  // Expand unavailable integer operations.
111  for (auto Op :
115  for (auto T : {MVT::i32, MVT::i64})
117  if (Subtarget->hasSIMD128())
118  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
120  if (Subtarget->hasUnimplementedSIMD128())
122  }
123 
124  // SIMD-specific configuration
125  if (Subtarget->hasSIMD128()) {
126  // Support saturating add for i8x16 and i16x8
127  for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
128  for (auto T : {MVT::v16i8, MVT::v8i16})
130 
131  // Custom lower BUILD_VECTORs to minimize number of replace_lanes
132  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
134  if (Subtarget->hasUnimplementedSIMD128())
135  for (auto T : {MVT::v2i64, MVT::v2f64})
137 
138  // We have custom shuffle lowering to expose the shuffle mask
139  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
141  if (Subtarget->hasUnimplementedSIMD128())
142  for (auto T: {MVT::v2i64, MVT::v2f64})
144 
145  // Custom lowering since wasm shifts must have a scalar shift amount
146  for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL}) {
147  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
149  if (Subtarget->hasUnimplementedSIMD128())
151  }
152 
153  // Custom lower lane accesses to expand out variable indices
155  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
157  if (Subtarget->hasUnimplementedSIMD128())
158  for (auto T : {MVT::v2i64, MVT::v2f64})
160  }
161 
162  // There is no i64x2.mul instruction
164 
165  // There are no vector select instructions
166  for (auto Op : {ISD::VSELECT, ISD::SELECT_CC, ISD::SELECT}) {
167  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
169  if (Subtarget->hasUnimplementedSIMD128())
170  for (auto T : {MVT::v2i64, MVT::v2f64})
172  }
173 
174  // Expand integer operations supported for scalars but not SIMD
177  for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
179  if (Subtarget->hasUnimplementedSIMD128())
181  }
182 
183  // Expand float operations supported for scalars but not SIMD
188  if (Subtarget->hasUnimplementedSIMD128())
190  }
191 
192  // Expand additional SIMD ops that V8 hasn't implemented yet
193  if (!Subtarget->hasUnimplementedSIMD128()) {
196  }
197  }
198 
199  // As a special case, these operators use the type to mean the type to
200  // sign-extend from.
202  if (!Subtarget->hasSignExt()) {
203  // Sign extends are legal only when extending a vector extract
204  auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
205  for (auto T : {MVT::i8, MVT::i16, MVT::i32})
207  }
208  for (auto T : MVT::integer_vector_valuetypes())
210 
211  // Dynamic stack allocation: use the default expansion.
215 
218 
219  // Expand these forms; we pattern-match the forms that we can handle in isel.
220  for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
221  for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
223 
224  // We have custom switch handling.
226 
227  // WebAssembly doesn't have:
228  // - Floating-point extending loads.
229  // - Floating-point truncating stores.
230  // - i1 extending loads.
231  // - extending/truncating SIMD loads/stores
234  for (auto T : MVT::integer_valuetypes())
237  if (Subtarget->hasSIMD128()) {
239  MVT::v2f64}) {
240  for (auto MemT : MVT::vector_valuetypes()) {
241  if (MVT(T) != MemT) {
242  setTruncStoreAction(T, MemT, Expand);
244  setLoadExtAction(Ext, T, MemT, Expand);
245  }
246  }
247  }
248  }
249 
250  // Don't do anything clever with build_pairs
252 
253  // Trap lowers to wasm unreachable
255 
256  // Exception handling intrinsics
259 
261 
262  if (Subtarget->hasBulkMemory()) {
263  // Use memory.copy and friends over multiple loads and stores
264  MaxStoresPerMemcpy = 1;
268  MaxStoresPerMemset = 1;
270  }
271 
272  // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
273  // consistent with the f64 and f128 names.
274  setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
275  setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
276 
277  // Define the emscripten name for return address helper.
278  // TODO: when implementing other WASM backends, make this generic or only do
279  // this on emscripten depending on what they end up doing.
280  setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
281 
282  // Always convert switches to br_tables unless there is only one case, which
283  // is equivalent to a simple branch. This reduces code size for wasm, and we
284  // defer possible jump table optimizations to the VM.
286 }
287 
289 WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
290  // We have wasm instructions for these
291  switch (AI->getOperation()) {
292  case AtomicRMWInst::Add:
293  case AtomicRMWInst::Sub:
294  case AtomicRMWInst::And:
295  case AtomicRMWInst::Or:
296  case AtomicRMWInst::Xor:
297  case AtomicRMWInst::Xchg:
299  default:
300  break;
301  }
303 }
304 
305 FastISel *WebAssemblyTargetLowering::createFastISel(
306  FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
307  return WebAssembly::createFastISel(FuncInfo, LibInfo);
308 }
309 
310 MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
311  EVT VT) const {
312  unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
313  if (BitWidth > 1 && BitWidth < 8)
314  BitWidth = 8;
315 
316  if (BitWidth > 64) {
317  // The shift will be lowered to a libcall, and compiler-rt libcalls expect
318  // the count to be an i32.
319  BitWidth = 32;
320  assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&
321  "32-bit shift counts ought to be enough for anyone");
322  }
323 
324  MVT Result = MVT::getIntegerVT(BitWidth);
326  "Unable to represent scalar shift amount type");
327  return Result;
328 }
329 
330 // Lower an fp-to-int conversion operator from the LLVM opcode, which has an
331 // undefined result on invalid/overflow, to the WebAssembly opcode, which
332 // traps on invalid/overflow.
334  MachineBasicBlock *BB,
335  const TargetInstrInfo &TII,
336  bool IsUnsigned, bool Int64,
337  bool Float64, unsigned LoweredOpcode) {
339 
340  unsigned OutReg = MI.getOperand(0).getReg();
341  unsigned InReg = MI.getOperand(1).getReg();
342 
343  unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
344  unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
345  unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
346  unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
347  unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
348  unsigned Eqz = WebAssembly::EQZ_I32;
349  unsigned And = WebAssembly::AND_I32;
350  int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
351  int64_t Substitute = IsUnsigned ? 0 : Limit;
352  double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
353  auto &Context = BB->getParent()->getFunction().getContext();
355 
356  const BasicBlock *LLVMBB = BB->getBasicBlock();
357  MachineFunction *F = BB->getParent();
358  MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
359  MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
360  MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
361 
363  F->insert(It, FalseMBB);
364  F->insert(It, TrueMBB);
365  F->insert(It, DoneMBB);
366 
367  // Transfer the remainder of BB and its successor edges to DoneMBB.
368  DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
369  DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
370 
371  BB->addSuccessor(TrueMBB);
372  BB->addSuccessor(FalseMBB);
373  TrueMBB->addSuccessor(DoneMBB);
374  FalseMBB->addSuccessor(DoneMBB);
375 
376  unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
377  Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
378  Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
379  CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
380  EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
381  FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
382  TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
383 
384  MI.eraseFromParent();
385  // For signed numbers, we can do a single comparison to determine whether
386  // fabs(x) is within range.
387  if (IsUnsigned) {
388  Tmp0 = InReg;
389  } else {
390  BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
391  }
392  BuildMI(BB, DL, TII.get(FConst), Tmp1)
393  .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
394  BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
395 
396  // For unsigned numbers, we have to do a separate comparison with zero.
397  if (IsUnsigned) {
398  Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
399  unsigned SecondCmpReg =
400  MRI.createVirtualRegister(&WebAssembly::I32RegClass);
401  unsigned AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
402  BuildMI(BB, DL, TII.get(FConst), Tmp1)
403  .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
404  BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
405  BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
406  CmpReg = AndReg;
407  }
408 
409  BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
410 
411  // Create the CFG diamond to select between doing the conversion or using
412  // the substitute value.
413  BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
414  BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
415  BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
416  BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
417  BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
418  .addReg(FalseReg)
419  .addMBB(FalseMBB)
420  .addReg(TrueReg)
421  .addMBB(TrueMBB);
422 
423  return DoneMBB;
424 }
425 
426 MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
427  MachineInstr &MI, MachineBasicBlock *BB) const {
428  const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
429  DebugLoc DL = MI.getDebugLoc();
430 
431  switch (MI.getOpcode()) {
432  default:
433  llvm_unreachable("Unexpected instr type to insert");
434  case WebAssembly::FP_TO_SINT_I32_F32:
435  return LowerFPToInt(MI, DL, BB, TII, false, false, false,
436  WebAssembly::I32_TRUNC_S_F32);
437  case WebAssembly::FP_TO_UINT_I32_F32:
438  return LowerFPToInt(MI, DL, BB, TII, true, false, false,
439  WebAssembly::I32_TRUNC_U_F32);
440  case WebAssembly::FP_TO_SINT_I64_F32:
441  return LowerFPToInt(MI, DL, BB, TII, false, true, false,
442  WebAssembly::I64_TRUNC_S_F32);
443  case WebAssembly::FP_TO_UINT_I64_F32:
444  return LowerFPToInt(MI, DL, BB, TII, true, true, false,
445  WebAssembly::I64_TRUNC_U_F32);
446  case WebAssembly::FP_TO_SINT_I32_F64:
447  return LowerFPToInt(MI, DL, BB, TII, false, false, true,
448  WebAssembly::I32_TRUNC_S_F64);
449  case WebAssembly::FP_TO_UINT_I32_F64:
450  return LowerFPToInt(MI, DL, BB, TII, true, false, true,
451  WebAssembly::I32_TRUNC_U_F64);
452  case WebAssembly::FP_TO_SINT_I64_F64:
453  return LowerFPToInt(MI, DL, BB, TII, false, true, true,
454  WebAssembly::I64_TRUNC_S_F64);
455  case WebAssembly::FP_TO_UINT_I64_F64:
456  return LowerFPToInt(MI, DL, BB, TII, true, true, true,
457  WebAssembly::I64_TRUNC_U_F64);
458  llvm_unreachable("Unexpected instruction to emit with custom inserter");
459  }
460 }
461 
462 const char *
463 WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
464  switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
466  break;
467 #define HANDLE_NODETYPE(NODE) \
468  case WebAssemblyISD::NODE: \
469  return "WebAssemblyISD::" #NODE;
470 #include "WebAssemblyISD.def"
471 #undef HANDLE_NODETYPE
472  }
473  return nullptr;
474 }
475 
476 std::pair<unsigned, const TargetRegisterClass *>
477 WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
478  const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
479  // First, see if this is a constraint that directly corresponds to a
480  // WebAssembly register class.
481  if (Constraint.size() == 1) {
482  switch (Constraint[0]) {
483  case 'r':
484  assert(VT != MVT::iPTR && "Pointer MVT not expected here");
485  if (Subtarget->hasSIMD128() && VT.isVector()) {
486  if (VT.getSizeInBits() == 128)
487  return std::make_pair(0U, &WebAssembly::V128RegClass);
488  }
489  if (VT.isInteger() && !VT.isVector()) {
490  if (VT.getSizeInBits() <= 32)
491  return std::make_pair(0U, &WebAssembly::I32RegClass);
492  if (VT.getSizeInBits() <= 64)
493  return std::make_pair(0U, &WebAssembly::I64RegClass);
494  }
495  break;
496  default:
497  break;
498  }
499  }
500 
501  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
502 }
503 
504 bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
505  // Assume ctz is a relatively cheap operation.
506  return true;
507 }
508 
509 bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
510  // Assume clz is a relatively cheap operation.
511  return true;
512 }
513 
514 bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
515  const AddrMode &AM,
516  Type *Ty, unsigned AS,
517  Instruction *I) const {
518  // WebAssembly offsets are added as unsigned without wrapping. The
519  // isLegalAddressingMode gives us no way to determine if wrapping could be
520  // happening, so we approximate this by accepting only non-negative offsets.
521  if (AM.BaseOffs < 0)
522  return false;
523 
524  // WebAssembly has no scale register operands.
525  if (AM.Scale != 0)
526  return false;
527 
528  // Everything else is legal.
529  return true;
530 }
531 
532 bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
533  EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/,
534  MachineMemOperand::Flags /*Flags*/, bool *Fast) const {
535  // WebAssembly supports unaligned accesses, though it should be declared
536  // with the p2align attribute on loads and stores which do so, and there
537  // may be a performance impact. We tell LLVM they're "fast" because
538  // for the kinds of things that LLVM uses this for (merging adjacent stores
539  // of constants, etc.), WebAssembly implementations will either want the
540  // unaligned access or they'll split anyway.
541  if (Fast)
542  *Fast = true;
543  return true;
544 }
545 
546 bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
547  AttributeList Attr) const {
548  // The current thinking is that wasm engines will perform this optimization,
549  // so we can save on code size.
550  return true;
551 }
552 
553 EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
554  LLVMContext &C,
555  EVT VT) const {
556  if (VT.isVector())
558 
559  return TargetLowering::getSetCCResultType(DL, C, VT);
560 }
561 
562 bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
563  const CallInst &I,
564  MachineFunction &MF,
565  unsigned Intrinsic) const {
566  switch (Intrinsic) {
567  case Intrinsic::wasm_atomic_notify:
569  Info.memVT = MVT::i32;
570  Info.ptrVal = I.getArgOperand(0);
571  Info.offset = 0;
572  Info.align = 4;
573  // atomic.notify instruction does not really load the memory specified with
574  // this argument, but MachineMemOperand should either be load or store, so
575  // we set this to a load.
576  // FIXME Volatile isn't really correct, but currently all LLVM atomic
577  // instructions are treated as volatiles in the backend, so we should be
578  // consistent. The same applies for wasm_atomic_wait intrinsics too.
580  return true;
581  case Intrinsic::wasm_atomic_wait_i32:
583  Info.memVT = MVT::i32;
584  Info.ptrVal = I.getArgOperand(0);
585  Info.offset = 0;
586  Info.align = 4;
588  return true;
589  case Intrinsic::wasm_atomic_wait_i64:
591  Info.memVT = MVT::i64;
592  Info.ptrVal = I.getArgOperand(0);
593  Info.offset = 0;
594  Info.align = 8;
596  return true;
597  default:
598  return false;
599  }
600 }
601 
602 //===----------------------------------------------------------------------===//
603 // WebAssembly Lowering private implementation.
604 //===----------------------------------------------------------------------===//
605 
606 //===----------------------------------------------------------------------===//
607 // Lowering Code
608 //===----------------------------------------------------------------------===//
609 
610 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
612  DAG.getContext()->diagnose(
614 }
615 
616 // Test whether the given calling convention is supported.
617 static bool callingConvSupported(CallingConv::ID CallConv) {
618  // We currently support the language-independent target-independent
619  // conventions. We don't yet have a way to annotate calls with properties like
620  // "cold", and we don't have any call-clobbered registers, so these are mostly
621  // all handled the same.
622  return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
623  CallConv == CallingConv::Cold ||
624  CallConv == CallingConv::PreserveMost ||
625  CallConv == CallingConv::PreserveAll ||
626  CallConv == CallingConv::CXX_FAST_TLS;
627 }
628 
629 SDValue
630 WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
631  SmallVectorImpl<SDValue> &InVals) const {
632  SelectionDAG &DAG = CLI.DAG;
633  SDLoc DL = CLI.DL;
634  SDValue Chain = CLI.Chain;
635  SDValue Callee = CLI.Callee;
637  auto Layout = MF.getDataLayout();
638 
639  CallingConv::ID CallConv = CLI.CallConv;
640  if (!callingConvSupported(CallConv))
641  fail(DL, DAG,
642  "WebAssembly doesn't support language-specific or target-specific "
643  "calling conventions yet");
644  if (CLI.IsPatchPoint)
645  fail(DL, DAG, "WebAssembly doesn't support patch point yet");
646 
647  // Fail if tail calls are required but not enabled
648  if (!Subtarget->hasTailCall()) {
649  if ((CallConv == CallingConv::Fast && CLI.IsTailCall &&
651  (CLI.CS && CLI.CS.isMustTailCall()))
652  fail(DL, DAG, "WebAssembly 'tail-call' feature not enabled");
653  CLI.IsTailCall = false;
654  }
655 
657  if (Ins.size() > 1)
658  fail(DL, DAG, "WebAssembly doesn't support more than 1 returned value yet");
659 
660  SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
661  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
662  unsigned NumFixedArgs = 0;
663  for (unsigned I = 0; I < Outs.size(); ++I) {
664  const ISD::OutputArg &Out = Outs[I];
665  SDValue &OutVal = OutVals[I];
666  if (Out.Flags.isNest())
667  fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
668  if (Out.Flags.isInAlloca())
669  fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
670  if (Out.Flags.isInConsecutiveRegs())
671  fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
672  if (Out.Flags.isInConsecutiveRegsLast())
673  fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
674  if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
675  auto &MFI = MF.getFrameInfo();
676  int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
677  Out.Flags.getByValAlign(),
678  /*isSS=*/false);
679  SDValue SizeNode =
680  DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
681  SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
682  Chain = DAG.getMemcpy(
683  Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getByValAlign(),
684  /*isVolatile*/ false, /*AlwaysInline=*/false,
685  /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
686  OutVal = FINode;
687  }
688  // Count the number of fixed args *after* legalization.
689  NumFixedArgs += Out.IsFixed;
690  }
691 
692  bool IsVarArg = CLI.IsVarArg;
693  auto PtrVT = getPointerTy(Layout);
694 
695  // Analyze operands of the call, assigning locations to each operand.
697  CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
698 
699  if (IsVarArg) {
700  // Outgoing non-fixed arguments are placed in a buffer. First
701  // compute their offsets and the total amount of buffer space needed.
702  for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
703  const ISD::OutputArg &Out = Outs[I];
704  SDValue &Arg = OutVals[I];
705  EVT VT = Arg.getValueType();
706  assert(VT != MVT::iPTR && "Legalized args should be concrete");
707  Type *Ty = VT.getTypeForEVT(*DAG.getContext());
708  unsigned Align = std::max(Out.Flags.getOrigAlign(),
709  Layout.getABITypeAlignment(Ty));
710  unsigned Offset = CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty),
711  Align);
712  CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
713  Offset, VT.getSimpleVT(),
715  }
716  }
717 
718  unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
719 
720  SDValue FINode;
721  if (IsVarArg && NumBytes) {
722  // For non-fixed arguments, next emit stores to store the argument values
723  // to the stack buffer at the offsets computed above.
724  int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
725  Layout.getStackAlignment(),
726  /*isSS=*/false);
727  unsigned ValNo = 0;
729  for (SDValue Arg :
730  make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) {
731  assert(ArgLocs[ValNo].getValNo() == ValNo &&
732  "ArgLocs should remain in order and only hold varargs args");
733  unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
734  FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
735  SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
736  DAG.getConstant(Offset, DL, PtrVT));
737  Chains.push_back(
738  DAG.getStore(Chain, DL, Arg, Add,
739  MachinePointerInfo::getFixedStack(MF, FI, Offset), 0));
740  }
741  if (!Chains.empty())
742  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
743  } else if (IsVarArg) {
744  FINode = DAG.getIntPtrConstant(0, DL);
745  }
746 
747  if (Callee->getOpcode() == ISD::GlobalAddress) {
748  // If the callee is a GlobalAddress node (quite common, every direct call
749  // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
750  // doesn't at MO_GOT which is not needed for direct calls.
751  GlobalAddressSDNode* GA = cast<GlobalAddressSDNode>(Callee);
752  Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
754  GA->getOffset());
755  Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
756  getPointerTy(DAG.getDataLayout()), Callee);
757  }
758 
759  // Compute the operands for the CALLn node.
761  Ops.push_back(Chain);
762  Ops.push_back(Callee);
763 
764  // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
765  // isn't reliable.
766  Ops.append(OutVals.begin(),
767  IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
768  // Add a pointer to the vararg buffer.
769  if (IsVarArg)
770  Ops.push_back(FINode);
771 
772  SmallVector<EVT, 8> InTys;
773  for (const auto &In : Ins) {
774  assert(!In.Flags.isByVal() && "byval is not valid for return values");
775  assert(!In.Flags.isNest() && "nest is not valid for return values");
776  if (In.Flags.isInAlloca())
777  fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
778  if (In.Flags.isInConsecutiveRegs())
779  fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
780  if (In.Flags.isInConsecutiveRegsLast())
781  fail(DL, DAG,
782  "WebAssembly hasn't implemented cons regs last return values");
783  // Ignore In.getOrigAlign() because all our arguments are passed in
784  // registers.
785  InTys.push_back(In.VT);
786  }
787 
788  if (CLI.IsTailCall) {
789  // ret_calls do not return values to the current frame
790  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
791  return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
792  }
793 
794  InTys.push_back(MVT::Other);
795  SDVTList InTyList = DAG.getVTList(InTys);
796  SDValue Res =
797  DAG.getNode(Ins.empty() ? WebAssemblyISD::CALL0 : WebAssemblyISD::CALL1,
798  DL, InTyList, Ops);
799  if (Ins.empty()) {
800  Chain = Res;
801  } else {
802  InVals.push_back(Res);
803  Chain = Res.getValue(1);
804  }
805 
806  return Chain;
807 }
808 
809 bool WebAssemblyTargetLowering::CanLowerReturn(
810  CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
812  LLVMContext & /*Context*/) const {
813  // WebAssembly can't currently handle returning tuples.
814  return Outs.size() <= 1;
815 }
816 
817 SDValue WebAssemblyTargetLowering::LowerReturn(
818  SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
820  const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
821  SelectionDAG &DAG) const {
822  assert(Outs.size() <= 1 && "WebAssembly can only return up to one value");
823  if (!callingConvSupported(CallConv))
824  fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
825 
826  SmallVector<SDValue, 4> RetOps(1, Chain);
827  RetOps.append(OutVals.begin(), OutVals.end());
828  Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
829 
830  // Record the number and types of the return values.
831  for (const ISD::OutputArg &Out : Outs) {
832  assert(!Out.Flags.isByVal() && "byval is not valid for return values");
833  assert(!Out.Flags.isNest() && "nest is not valid for return values");
834  assert(Out.IsFixed && "non-fixed return value is not valid");
835  if (Out.Flags.isInAlloca())
836  fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
837  if (Out.Flags.isInConsecutiveRegs())
838  fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
839  if (Out.Flags.isInConsecutiveRegsLast())
840  fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
841  }
842 
843  return Chain;
844 }
845 
846 SDValue WebAssemblyTargetLowering::LowerFormalArguments(
847  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
848  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
849  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
850  if (!callingConvSupported(CallConv))
851  fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
852 
854  auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
855 
856  // Set up the incoming ARGUMENTS value, which serves to represent the liveness
857  // of the incoming values before they're represented by virtual registers.
858  MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
859 
860  for (const ISD::InputArg &In : Ins) {
861  if (In.Flags.isInAlloca())
862  fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
863  if (In.Flags.isNest())
864  fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
865  if (In.Flags.isInConsecutiveRegs())
866  fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
867  if (In.Flags.isInConsecutiveRegsLast())
868  fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
869  // Ignore In.getOrigAlign() because all our arguments are passed in
870  // registers.
871  InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
872  DAG.getTargetConstant(InVals.size(),
873  DL, MVT::i32))
874  : DAG.getUNDEF(In.VT));
875 
876  // Record the number and types of arguments.
877  MFI->addParam(In.VT);
878  }
879 
880  // Varargs are copied into a buffer allocated by the caller, and a pointer to
881  // the buffer is passed as an argument.
882  if (IsVarArg) {
883  MVT PtrVT = getPointerTy(MF.getDataLayout());
884  unsigned VarargVreg =
886  MFI->setVarargBufferVreg(VarargVreg);
887  Chain = DAG.getCopyToReg(
888  Chain, DL, VarargVreg,
889  DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
890  DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
891  MFI->addParam(PtrVT);
892  }
893 
894  // Record the number and types of arguments and results.
895  SmallVector<MVT, 4> Params;
898  DAG.getTarget(), Params, Results);
899  for (MVT VT : Results)
900  MFI->addResult(VT);
901  // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
902  // the param logic here with ComputeSignatureVTs
903  assert(MFI->getParams().size() == Params.size() &&
904  std::equal(MFI->getParams().begin(), MFI->getParams().end(),
905  Params.begin()));
906 
907  return Chain;
908 }
909 
910 void WebAssemblyTargetLowering::ReplaceNodeResults(
912  switch (N->getOpcode()) {
914  // Do not add any results, signifying that N should not be custom lowered
915  // after all. This happens because simd128 turns on custom lowering for
916  // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
917  // illegal type.
918  break;
919  default:
921  "ReplaceNodeResults not implemented for this op for WebAssembly!");
922  }
923 }
924 
925 //===----------------------------------------------------------------------===//
926 // Custom lowering hooks.
927 //===----------------------------------------------------------------------===//
928 
929 SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
930  SelectionDAG &DAG) const {
931  SDLoc DL(Op);
932  switch (Op.getOpcode()) {
933  default:
934  llvm_unreachable("unimplemented operation lowering");
935  return SDValue();
936  case ISD::FrameIndex:
937  return LowerFrameIndex(Op, DAG);
938  case ISD::GlobalAddress:
939  return LowerGlobalAddress(Op, DAG);
940  case ISD::ExternalSymbol:
941  return LowerExternalSymbol(Op, DAG);
942  case ISD::JumpTable:
943  return LowerJumpTable(Op, DAG);
944  case ISD::BR_JT:
945  return LowerBR_JT(Op, DAG);
946  case ISD::VASTART:
947  return LowerVASTART(Op, DAG);
948  case ISD::BlockAddress:
949  case ISD::BRIND:
950  fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
951  return SDValue();
952  case ISD::RETURNADDR:
953  return LowerRETURNADDR(Op, DAG);
954  case ISD::FRAMEADDR:
955  return LowerFRAMEADDR(Op, DAG);
956  case ISD::CopyToReg:
957  return LowerCopyToReg(Op, DAG);
960  return LowerAccessVectorElement(Op, DAG);
961  case ISD::INTRINSIC_VOID:
964  return LowerIntrinsic(Op, DAG);
966  return LowerSIGN_EXTEND_INREG(Op, DAG);
967  case ISD::BUILD_VECTOR:
968  return LowerBUILD_VECTOR(Op, DAG);
969  case ISD::VECTOR_SHUFFLE:
970  return LowerVECTOR_SHUFFLE(Op, DAG);
971  case ISD::SHL:
972  case ISD::SRA:
973  case ISD::SRL:
974  return LowerShift(Op, DAG);
975  }
976 }
977 
978 SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
979  SelectionDAG &DAG) const {
980  SDValue Src = Op.getOperand(2);
981  if (isa<FrameIndexSDNode>(Src.getNode())) {
982  // CopyToReg nodes don't support FrameIndex operands. Other targets select
983  // the FI to some LEA-like instruction, but since we don't have that, we
984  // need to insert some kind of instruction that can take an FI operand and
985  // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
986  // local.copy between Op and its FI operand.
987  SDValue Chain = Op.getOperand(0);
988  SDLoc DL(Op);
989  unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
990  EVT VT = Src.getValueType();
991  SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
992  : WebAssembly::COPY_I64,
993  DL, VT, Src),
994  0);
995  return Op.getNode()->getNumValues() == 1
996  ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
997  : DAG.getCopyToReg(Chain, DL, Reg, Copy,
998  Op.getNumOperands() == 4 ? Op.getOperand(3)
999  : SDValue());
1000  }
1001  return SDValue();
1002 }
1003 
1004 SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1005  SelectionDAG &DAG) const {
1006  int FI = cast<FrameIndexSDNode>(Op)->getIndex();
1007  return DAG.getTargetFrameIndex(FI, Op.getValueType());
1008 }
1009 
1010 SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1011  SelectionDAG &DAG) const {
1012  SDLoc DL(Op);
1013 
1014  if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1015  fail(DL, DAG,
1016  "Non-Emscripten WebAssembly hasn't implemented "
1017  "__builtin_return_address");
1018  return SDValue();
1019  }
1020 
1022  return SDValue();
1023 
1024  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1025  return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1026  {DAG.getConstant(Depth, DL, MVT::i32)}, false, DL)
1027  .first;
1028 }
1029 
1030 SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1031  SelectionDAG &DAG) const {
1032  // Non-zero depths are not supported by WebAssembly currently. Use the
1033  // legalizer's default expansion, which is to return 0 (what this function is
1034  // documented to do).
1035  if (Op.getConstantOperandVal(0) > 0)
1036  return SDValue();
1037 
1039  EVT VT = Op.getValueType();
1040  unsigned FP =
1042  return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1043 }
1044 
1045 SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1046  SelectionDAG &DAG) const {
1047  SDLoc DL(Op);
1048  const auto *GA = cast<GlobalAddressSDNode>(Op);
1049  EVT VT = Op.getValueType();
1050  assert(GA->getTargetFlags() == 0 &&
1051  "Unexpected target flags on generic GlobalAddressSDNode");
1052  if (GA->getAddressSpace() != 0)
1053  fail(DL, DAG, "WebAssembly only expects the 0 address space");
1054 
1055  unsigned OperandFlags = 0;
1056  if (isPositionIndependent()) {
1057  const GlobalValue *GV = GA->getGlobal();
1058  if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) {
1059  MachineFunction &MF = DAG.getMachineFunction();
1060  MVT PtrVT = getPointerTy(MF.getDataLayout());
1061  const char *BaseName;
1062  if (GV->getValueType()->isFunctionTy()) {
1063  BaseName = MF.createExternalSymbolName("__table_base");
1064  OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
1065  }
1066  else {
1067  BaseName = MF.createExternalSymbolName("__memory_base");
1068  OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
1069  }
1070  SDValue BaseAddr =
1071  DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1072  DAG.getTargetExternalSymbol(BaseName, PtrVT));
1073 
1074  SDValue SymAddr = DAG.getNode(
1076  DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1077  OperandFlags));
1078 
1079  return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1080  } else {
1081  OperandFlags = WebAssemblyII::MO_GOT;
1082  }
1083  }
1084 
1085  return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1086  DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1087  GA->getOffset(), OperandFlags));
1088 }
1089 
1090 SDValue
1091 WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1092  SelectionDAG &DAG) const {
1093  SDLoc DL(Op);
1094  const auto *ES = cast<ExternalSymbolSDNode>(Op);
1095  EVT VT = Op.getValueType();
1096  assert(ES->getTargetFlags() == 0 &&
1097  "Unexpected target flags on generic ExternalSymbolSDNode");
1098  return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1099  DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1100 }
1101 
1102 SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1103  SelectionDAG &DAG) const {
1104  // There's no need for a Wrapper node because we always incorporate a jump
1105  // table operand into a BR_TABLE instruction, rather than ever
1106  // materializing it in a register.
1107  const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1108  return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1109  JT->getTargetFlags());
1110 }
1111 
1112 SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1113  SelectionDAG &DAG) const {
1114  SDLoc DL(Op);
1115  SDValue Chain = Op.getOperand(0);
1116  const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1117  SDValue Index = Op.getOperand(2);
1118  assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
1119 
1121  Ops.push_back(Chain);
1122  Ops.push_back(Index);
1123 
1125  const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1126 
1127  // Add an operand for each case.
1128  for (auto MBB : MBBs)
1129  Ops.push_back(DAG.getBasicBlock(MBB));
1130 
1131  // TODO: For now, we just pick something arbitrary for a default case for now.
1132  // We really want to sniff out the guard and put in the real default case (and
1133  // delete the guard).
1134  Ops.push_back(DAG.getBasicBlock(MBBs[0]));
1135 
1136  return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1137 }
1138 
1139 SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1140  SelectionDAG &DAG) const {
1141  SDLoc DL(Op);
1143 
1144  auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1145  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1146 
1147  SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1148  MFI->getVarargBufferVreg(), PtrVT);
1149  return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1150  MachinePointerInfo(SV), 0);
1151 }
1152 
1153 SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1154  SelectionDAG &DAG) const {
1155  MachineFunction &MF = DAG.getMachineFunction();
1156  unsigned IntNo;
1157  switch (Op.getOpcode()) {
1158  case ISD::INTRINSIC_VOID:
1160  IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1161  break;
1163  IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1164  break;
1165  default:
1166  llvm_unreachable("Invalid intrinsic");
1167  }
1168  SDLoc DL(Op);
1169 
1170  switch (IntNo) {
1171  default:
1172  return SDValue(); // Don't custom lower most intrinsics.
1173 
1174  case Intrinsic::wasm_lsda: {
1175  EVT VT = Op.getValueType();
1176  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1177  MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1178  auto &Context = MF.getMMI().getContext();
1179  MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
1180  Twine(MF.getFunctionNumber()));
1181  return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1182  DAG.getMCSymbol(S, PtrVT));
1183  }
1184 
1185  case Intrinsic::wasm_throw: {
1186  // We only support C++ exceptions for now
1187  int Tag = cast<ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1188  if (Tag != CPP_EXCEPTION)
1189  llvm_unreachable("Invalid tag!");
1190  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1191  MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1192  const char *SymName = MF.createExternalSymbolName("__cpp_exception");
1193  SDValue SymNode = DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1194  DAG.getTargetExternalSymbol(SymName, PtrVT));
1195  return DAG.getNode(WebAssemblyISD::THROW, DL,
1196  MVT::Other, // outchain type
1197  {
1198  Op.getOperand(0), // inchain
1199  SymNode, // exception symbol
1200  Op.getOperand(3) // thrown value
1201  });
1202  }
1203  }
1204 }
1205 
1206 SDValue
1207 WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1208  SelectionDAG &DAG) const {
1209  SDLoc DL(Op);
1210  // If sign extension operations are disabled, allow sext_inreg only if operand
1211  // is a vector extract. SIMD does not depend on sign extension operations, but
1212  // allowing sext_inreg in this context lets us have simple patterns to select
1213  // extract_lane_s instructions. Expanding sext_inreg everywhere would be
1214  // simpler in this file, but would necessitate large and brittle patterns to
1215  // undo the expansion and select extract_lane_s instructions.
1216  assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
1217  if (Op.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
1218  const SDValue &Extract = Op.getOperand(0);
1219  MVT VecT = Extract.getOperand(0).getSimpleValueType();
1220  MVT ExtractedLaneT = static_cast<VTSDNode *>(Op.getOperand(1).getNode())
1221  ->getVT()
1222  .getSimpleVT();
1223  MVT ExtractedVecT =
1224  MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
1225  if (ExtractedVecT == VecT)
1226  return Op;
1227  // Bitcast vector to appropriate type to ensure ISel pattern coverage
1228  const SDValue &Index = Extract.getOperand(1);
1229  unsigned IndexVal =
1230  static_cast<ConstantSDNode *>(Index.getNode())->getZExtValue();
1231  unsigned Scale =
1232  ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
1233  assert(Scale > 1);
1234  SDValue NewIndex =
1235  DAG.getConstant(IndexVal * Scale, DL, Index.getValueType());
1236  SDValue NewExtract = DAG.getNode(
1237  ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(),
1238  DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
1239  return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(),
1240  NewExtract, Op.getOperand(1));
1241  }
1242  // Otherwise expand
1243  return SDValue();
1244 }
1245 
1246 SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
1247  SelectionDAG &DAG) const {
1248  SDLoc DL(Op);
1249  const EVT VecT = Op.getValueType();
1250  const EVT LaneT = Op.getOperand(0).getValueType();
1251  const size_t Lanes = Op.getNumOperands();
1252  auto IsConstant = [](const SDValue &V) {
1253  return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
1254  };
1255 
1256  // Find the most common operand, which is approximately the best to splat
1257  using Entry = std::pair<SDValue, size_t>;
1258  SmallVector<Entry, 16> ValueCounts;
1259  size_t NumConst = 0, NumDynamic = 0;
1260  for (const SDValue &Lane : Op->op_values()) {
1261  if (Lane.isUndef()) {
1262  continue;
1263  } else if (IsConstant(Lane)) {
1264  NumConst++;
1265  } else {
1266  NumDynamic++;
1267  }
1268  auto CountIt = std::find_if(ValueCounts.begin(), ValueCounts.end(),
1269  [&Lane](Entry A) { return A.first == Lane; });
1270  if (CountIt == ValueCounts.end()) {
1271  ValueCounts.emplace_back(Lane, 1);
1272  } else {
1273  CountIt->second++;
1274  }
1275  }
1276  auto CommonIt =
1277  std::max_element(ValueCounts.begin(), ValueCounts.end(),
1278  [](Entry A, Entry B) { return A.second < B.second; });
1279  assert(CommonIt != ValueCounts.end() && "Unexpected all-undef build_vector");
1280  SDValue SplatValue = CommonIt->first;
1281  size_t NumCommon = CommonIt->second;
1282 
1283  // If v128.const is available, consider using it instead of a splat
1284  if (Subtarget->hasUnimplementedSIMD128()) {
1285  // {i32,i64,f32,f64}.const opcode, and value
1286  const size_t ConstBytes = 1 + std::max(size_t(4), 16 / Lanes);
1287  // SIMD prefix and opcode
1288  const size_t SplatBytes = 2;
1289  const size_t SplatConstBytes = SplatBytes + ConstBytes;
1290  // SIMD prefix, opcode, and lane index
1291  const size_t ReplaceBytes = 3;
1292  const size_t ReplaceConstBytes = ReplaceBytes + ConstBytes;
1293  // SIMD prefix, v128.const opcode, and 128-bit value
1294  const size_t VecConstBytes = 18;
1295  // Initial v128.const and a replace_lane for each non-const operand
1296  const size_t ConstInitBytes = VecConstBytes + NumDynamic * ReplaceBytes;
1297  // Initial splat and all necessary replace_lanes
1298  const size_t SplatInitBytes =
1299  IsConstant(SplatValue)
1300  // Initial constant splat
1301  ? (SplatConstBytes +
1302  // Constant replace_lanes
1303  (NumConst - NumCommon) * ReplaceConstBytes +
1304  // Dynamic replace_lanes
1305  (NumDynamic * ReplaceBytes))
1306  // Initial dynamic splat
1307  : (SplatBytes +
1308  // Constant replace_lanes
1309  (NumConst * ReplaceConstBytes) +
1310  // Dynamic replace_lanes
1311  (NumDynamic - NumCommon) * ReplaceBytes);
1312  if (ConstInitBytes < SplatInitBytes) {
1313  // Create build_vector that will lower to initial v128.const
1314  SmallVector<SDValue, 16> ConstLanes;
1315  for (const SDValue &Lane : Op->op_values()) {
1316  if (IsConstant(Lane)) {
1317  ConstLanes.push_back(Lane);
1318  } else if (LaneT.isFloatingPoint()) {
1319  ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
1320  } else {
1321  ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
1322  }
1323  }
1324  SDValue Result = DAG.getBuildVector(VecT, DL, ConstLanes);
1325  // Add replace_lane instructions for non-const lanes
1326  for (size_t I = 0; I < Lanes; ++I) {
1327  const SDValue &Lane = Op->getOperand(I);
1328  if (!Lane.isUndef() && !IsConstant(Lane))
1329  Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1330  DAG.getConstant(I, DL, MVT::i32));
1331  }
1332  return Result;
1333  }
1334  }
1335  // Use a splat for the initial vector
1336  SDValue Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
1337  // Add replace_lane instructions for other values
1338  for (size_t I = 0; I < Lanes; ++I) {
1339  const SDValue &Lane = Op->getOperand(I);
1340  if (Lane != SplatValue)
1341  Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1342  DAG.getConstant(I, DL, MVT::i32));
1343  }
1344  return Result;
1345 }
1346 
1347 SDValue
1348 WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
1349  SelectionDAG &DAG) const {
1350  SDLoc DL(Op);
1351  ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
1353  assert(VecType.is128BitVector() && "Unexpected shuffle vector type");
1354  size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
1355 
1356  // Space for two vector args and sixteen mask indices
1357  SDValue Ops[18];
1358  size_t OpIdx = 0;
1359  Ops[OpIdx++] = Op.getOperand(0);
1360  Ops[OpIdx++] = Op.getOperand(1);
1361 
1362  // Expand mask indices to byte indices and materialize them as operands
1363  for (int M : Mask) {
1364  for (size_t J = 0; J < LaneBytes; ++J) {
1365  // Lower undefs (represented by -1 in mask) to zero
1366  uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J;
1367  Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
1368  }
1369  }
1370 
1371  return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1372 }
1373 
1374 SDValue
1375 WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
1376  SelectionDAG &DAG) const {
1377  // Allow constant lane indices, expand variable lane indices
1378  SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
1379  if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef())
1380  return Op;
1381  else
1382  // Perform default expansion
1383  return SDValue();
1384 }
1385 
1388  // 32-bit and 64-bit unrolled shifts will have proper semantics
1389  if (LaneT.bitsGE(MVT::i32))
1390  return DAG.UnrollVectorOp(Op.getNode());
1391  // Otherwise mask the shift value to get proper semantics from 32-bit shift
1392  SDLoc DL(Op);
1393  SDValue ShiftVal = Op.getOperand(1);
1394  uint64_t MaskVal = LaneT.getSizeInBits() - 1;
1395  SDValue MaskedShiftVal = DAG.getNode(
1396  ISD::AND, // mask opcode
1397  DL, ShiftVal.getValueType(), // masked value type
1398  ShiftVal, // original shift value operand
1399  DAG.getConstant(MaskVal, DL, ShiftVal.getValueType()) // mask operand
1400  );
1401 
1402  return DAG.UnrollVectorOp(
1403  DAG.getNode(Op.getOpcode(), // original shift opcode
1404  DL, Op.getValueType(), // original return type
1405  Op.getOperand(0), // original vector operand,
1406  MaskedShiftVal // new masked shift value operand
1407  )
1408  .getNode());
1409 }
1410 
1411 SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
1412  SelectionDAG &DAG) const {
1413  SDLoc DL(Op);
1414 
1415  // Only manually lower vector shifts
1417 
1418  // Expand all vector shifts until V8 fixes its implementation
1419  // TODO: remove this once V8 is fixed
1420  if (!Subtarget->hasUnimplementedSIMD128())
1421  return unrollVectorShift(Op, DAG);
1422 
1423  // Unroll non-splat vector shifts
1424  BuildVectorSDNode *ShiftVec;
1425  SDValue SplatVal;
1426  if (!(ShiftVec = dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode())) ||
1427  !(SplatVal = ShiftVec->getSplatValue()))
1428  return unrollVectorShift(Op, DAG);
1429 
1430  // All splats except i64x2 const splats are handled by patterns
1431  auto *SplatConst = dyn_cast<ConstantSDNode>(SplatVal);
1432  if (!SplatConst || Op.getSimpleValueType() != MVT::v2i64)
1433  return Op;
1434 
1435  // i64x2 const splats are custom lowered to avoid unnecessary wraps
1436  unsigned Opcode;
1437  switch (Op.getOpcode()) {
1438  case ISD::SHL:
1439  Opcode = WebAssemblyISD::VEC_SHL;
1440  break;
1441  case ISD::SRA:
1442  Opcode = WebAssemblyISD::VEC_SHR_S;
1443  break;
1444  case ISD::SRL:
1445  Opcode = WebAssemblyISD::VEC_SHR_U;
1446  break;
1447  default:
1448  llvm_unreachable("unexpected opcode");
1449  }
1450  APInt Shift = SplatConst->getAPIntValue().zextOrTrunc(32);
1451  return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0),
1452  DAG.getConstant(Shift, DL, MVT::i32));
1453 }
1454 
1455 //===----------------------------------------------------------------------===//
1456 // WebAssembly Optimization Hooks
1457 //===----------------------------------------------------------------------===//
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
void setFrameAddressIsTaken(bool T)
uint64_t CallInst * C
static MVT getIntegerVT(unsigned BitWidth)
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:551
This file defines the interfaces that WebAssembly uses to lower LLVM code into a selection DAG...
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
Definition: Triple.h:606
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:641
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isInteger() const
Return true if this is an integer or a vector integer type.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
static Type * getDoubleTy(LLVMContext &C)
Definition: Type.cpp:164
bool isUndef() const
const std::vector< MachineJumpTableEntry > & getJumpTables() const
const GlobalValue * getGlobal() const
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVMContext & Context
Diagnostic information for unsupported feature in backend.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:678
This class represents lattice values for constants.
Definition: AllocatorList.h:23
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
static MVT getVectorVT(MVT VT, unsigned NumElements)
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0...
Definition: ISDOpcodes.h:633
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition: ISDOpcodes.h:391
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool isVector() const
Return true if this is a vector value type.
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:222
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:385
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:730
This class represents a function call, abstracting a target machine&#39;s calling convention.
unsigned Reg
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:252
SDValue getBasicBlock(MachineBasicBlock *MBB)
unsigned getVectorNumElements() const
Function Alias Analysis Results
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:33
F(f)
MachineModuleInfo & getMMI() const
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:878
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:693
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void computeSignatureVTs(const FunctionType *Ty, const Function &F, const TargetMachine &TM, SmallVectorImpl< MVT > &Params, SmallVectorImpl< MVT > &Results)
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:459
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
Register getFrameRegister(const MachineFunction &MF) const override
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:158
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:209
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1241
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition: ValueTypes.h:135
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:480
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:80
const HexagonInstrInfo * TII
static Type * getFloatTy(LLVMContext &C)
Definition: Type.cpp:163
#define INT64_MIN
Definition: DataTypes.h:80
Shift and rotation operations.
Definition: ISDOpcodes.h:434
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:217
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
BinOp getOperation() const
Definition: Instructions.h:752
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:190
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
CopyToReg - This node has three operands: a chain, a register number to set to this value...
Definition: ISDOpcodes.h:169
uint64_t getConstantOperandVal(unsigned i) const
void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn&#39;t supported on the target and indicate what to d...
bool isInConsecutiveRegs() const
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:411
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:467
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:407
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
This class is used to represent EVT&#39;s, which are used to parameterize some operations.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:72
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:726
unsigned getSizeInBits() const
This is a fast-path instruction selection class that generates poor code and doesn&#39;t support illegal ...
Definition: FastISel.h:66
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:291
This file declares the WebAssembly-specific subclass of TargetMachine.
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:404
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
SDValue getTargetFrameIndex(int FI, EVT VT)
Definition: SelectionDAG.h:638
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:408
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition: ISDOpcodes.h:453
const MCContext & getContext() const
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:200
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
LLVM_NODISCARD size_t size() const
size - Get the string size.
Definition: StringRef.h:130
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
Definition: SelectionDAG.h:872
static mvt_range integer_vector_valuetypes()
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:586
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements. ...
Definition: SelectionDAG.h:761
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, bool isSigned, const SDLoc &dl, bool doesNotReturn=false, bool isReturnValueUsed=true, bool isPostTypeLegalization=false) const
Returns a pair of (return value, chain).
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:150
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
MVT getVectorElementType() const
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
unsigned getByValSize() const
bool IsFixed
IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
Definition: ISDOpcodes.h:351
TargetInstrInfo - Interface to description of machine instruction set.
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG)
bool is128BitVector() const
Return true if this is a 128-bit vector type.
The memory access is volatile.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
Definition: ValueTypes.h:234
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:165
Control flow instructions. These all have token chains.
Definition: ISDOpcodes.h:657
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
unsigned const MachineRegisterInfo * MRI
Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
unsigned getOrigAlign() const
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
const Triple & getTargetTriple() const
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
Definition: ISDOpcodes.h:759
iterator_range< value_op_iterator > op_values() const
const SDValue & getOperand(unsigned Num) const
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
Definition: ISDOpcodes.h:356
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:231
OperandFlags
These are flags set on operands, but should be considered private, all access should go through the M...
Definition: MCInstrDesc.h:40
This file provides WebAssembly-specific target descriptions.
unsigned char getTargetFlags() const
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:798
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline...
static mvt_range vector_valuetypes()
self_iterator getIterator()
Definition: ilist_node.h:81
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:750
auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1220
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:205
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
static unsigned NumFixedArgs
Extended Value Type.
Definition: ValueTypes.h:33
uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:644
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isPositionIndependent() const
size_t size() const
Definition: SmallVector.h:52
const TargetMachine & getTargetMachine() const
This class contains a discriminated union of information about pointers in memory operands...
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SDValue getSplatValue(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted value or a null value if this is not a splat.
unsigned first
WebAssemblyTargetLowering(const TargetMachine &TM, const WebAssemblySubtarget &STI)
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type...
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition: ISDOpcodes.h:264
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:643
TokenFactor - This node takes multiple tokens as input and produces a single token result...
Definition: ISDOpcodes.h:49
static MachineBasicBlock * LowerFPToInt(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool IsUnsigned, bool Int64, bool Float64, unsigned LoweredOpcode)
This file declares the WebAssembly-specific subclass of TargetSubtarget.
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:410
Iterator for intrusive lists based on ilist_node.
CCState - This class holds information needed while lowering arguments and return values...
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:363
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:221
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
Definition: SelectionDAG.h:744
Provides information about what library functions are available for the current target.
const DebugLoc & getDebugLoc() const
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition: ValueTypes.h:95
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:437
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
Definition: ISDOpcodes.h:605
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
Definition: Constants.cpp:706
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:689
const Function & getFunction() const
Return the LLVM function that this machine code represents.
static mvt_range integer_valuetypes()
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:163
Class for arbitrary precision integers.
Definition: APInt.h:69
unsigned getByValAlign() const
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
amdgpu Simplify well known AMD library false FunctionCallee Callee
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
A "pseudo-class" with methods for operating on BUILD_VECTORs.
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:444
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:336
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:387
const WebAssemblyRegisterInfo * getRegisterInfo() const override
Flags
Flags values. These may be or&#39;d together.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:666
Representation of each machine instruction.
Definition: MachineInstr.h:64
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
Definition: ISDOpcodes.h:755
This class is derived from MachineFunctionInfo and contains private WebAssembly-specific information ...
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:150
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:214
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:411
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB &#39;Other&#39; at the position From, and insert it into this MBB right before &#39;...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:715
static bool callingConvSupported(CallingConv::ID CallConv)
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:205
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:510
bool isInConsecutiveRegsLast() const
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:44
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
TargetOptions Options
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
This file declares WebAssembly-specific per-machine-function information.
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:332
Type * getValueType() const
Definition: GlobalValue.h:279
static CCValAssign getMem(unsigned ValNo, MVT ValVT, unsigned Offset, MVT LocVT, LocInfo HTP)
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
unsigned getOpcode() const
FSINCOS - Compute both fsin and fcos as a single operation.
Definition: ISDOpcodes.h:636
SDValue getValue(unsigned R) const
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:575
LLVM Value Representation.
Definition: Value.h:72
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:326
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:80
bool isUndef() const
Return true if the type of the node type undefined.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:65
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
const WebAssemblyInstrInfo * getInstrInfo() const override
unsigned getNumOperands() const
Register getReg() const
getReg - Returns the register number.
const SDValue & getOperand(unsigned i) const
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg)
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:416
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
LLVMContext * getContext() const
Definition: SelectionDAG.h:414
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:632
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:404
BRIND - Indirect branch.
Definition: ISDOpcodes.h:662
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
Definition: ISDOpcodes.h:651