LLVM  9.0.0svn
IRTranslator.cpp
Go to the documentation of this file.
1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the IRTranslator class.
10 //===----------------------------------------------------------------------===//
11 
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/Analysis.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/CFG.h"
39 #include "llvm/IR/Constant.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DebugInfo.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/Function.h"
46 #include "llvm/IR/InlineAsm.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Instructions.h"
49 #include "llvm/IR/IntrinsicInst.h"
50 #include "llvm/IR/Intrinsics.h"
51 #include "llvm/IR/LLVMContext.h"
52 #include "llvm/IR/Metadata.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/User.h"
55 #include "llvm/IR/Value.h"
56 #include "llvm/MC/MCContext.h"
57 #include "llvm/Pass.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CodeGen.h"
60 #include "llvm/Support/Debug.h"
67 #include <algorithm>
68 #include <cassert>
69 #include <cstdint>
70 #include <iterator>
71 #include <string>
72 #include <utility>
73 #include <vector>
74 
75 #define DEBUG_TYPE "irtranslator"
76 
77 using namespace llvm;
78 
79 static cl::opt<bool>
80  EnableCSEInIRTranslator("enable-cse-in-irtranslator",
81  cl::desc("Should enable CSE in irtranslator"),
82  cl::Optional, cl::init(false));
83 char IRTranslator::ID = 0;
84 
85 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
86  false, false)
89 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
90  false, false)
91 
97 
98  // Print the function name explicitly if we don't have a debug location (which
99  // makes the diagnostic less useful) or if we're going to emit a raw error.
100  if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
101  R << (" (in function: " + MF.getName() + ")").str();
102 
103  if (TPC.isGlobalISelAbortEnabled())
104  report_fatal_error(R.getMsg());
105  else
106  ORE.emit(R);
107 }
108 
111 }
112 
113 #ifndef NDEBUG
114 namespace {
115 /// Verify that every instruction created has the same DILocation as the
116 /// instruction being translated.
117 class DILocationVerifier : public GISelChangeObserver {
118  const Instruction *CurrInst = nullptr;
119 
120 public:
121  DILocationVerifier() = default;
122  ~DILocationVerifier() = default;
123 
124  const Instruction *getCurrentInst() const { return CurrInst; }
125  void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
126 
127  void erasingInstr(MachineInstr &MI) override {}
128  void changingInstr(MachineInstr &MI) override {}
129  void changedInstr(MachineInstr &MI) override {}
130 
131  void createdInstr(MachineInstr &MI) override {
132  assert(getCurrentInst() && "Inserted instruction without a current MI");
133 
134  // Only print the check message if we're actually checking it.
135 #ifndef NDEBUG
136  LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
137  << " was copied to " << MI);
138 #endif
139  assert(CurrInst->getDebugLoc() == MI.getDebugLoc() &&
140  "Line info was not transferred to all instructions");
141  }
142 };
143 } // namespace
144 #endif // ifndef NDEBUG
145 
146 
153 }
154 
156 IRTranslator::allocateVRegs(const Value &Val) {
157  assert(!VMap.contains(Val) && "Value already allocated in VMap");
158  auto *Regs = VMap.getVRegs(Val);
159  auto *Offsets = VMap.getOffsets(Val);
160  SmallVector<LLT, 4> SplitTys;
161  computeValueLLTs(*DL, *Val.getType(), SplitTys,
162  Offsets->empty() ? Offsets : nullptr);
163  for (unsigned i = 0; i < SplitTys.size(); ++i)
164  Regs->push_back(0);
165  return *Regs;
166 }
167 
168 ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) {
169  auto VRegsIt = VMap.findVRegs(Val);
170  if (VRegsIt != VMap.vregs_end())
171  return *VRegsIt->second;
172 
173  if (Val.getType()->isVoidTy())
174  return *VMap.getVRegs(Val);
175 
176  // Create entry for this type.
177  auto *VRegs = VMap.getVRegs(Val);
178  auto *Offsets = VMap.getOffsets(Val);
179 
180  assert(Val.getType()->isSized() &&
181  "Don't know how to create an empty vreg");
182 
183  SmallVector<LLT, 4> SplitTys;
184  computeValueLLTs(*DL, *Val.getType(), SplitTys,
185  Offsets->empty() ? Offsets : nullptr);
186 
187  if (!isa<Constant>(Val)) {
188  for (auto Ty : SplitTys)
189  VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
190  return *VRegs;
191  }
192 
193  if (Val.getType()->isAggregateType()) {
194  // UndefValue, ConstantAggregateZero
195  auto &C = cast<Constant>(Val);
196  unsigned Idx = 0;
197  while (auto Elt = C.getAggregateElement(Idx++)) {
198  auto EltRegs = getOrCreateVRegs(*Elt);
199  llvm::copy(EltRegs, std::back_inserter(*VRegs));
200  }
201  } else {
202  assert(SplitTys.size() == 1 && "unexpectedly split LLT");
203  VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
204  bool Success = translate(cast<Constant>(Val), VRegs->front());
205  if (!Success) {
206  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
207  MF->getFunction().getSubprogram(),
208  &MF->getFunction().getEntryBlock());
209  R << "unable to translate constant: " << ore::NV("Type", Val.getType());
210  reportTranslationError(*MF, *TPC, *ORE, R);
211  return *VRegs;
212  }
213  }
214 
215  return *VRegs;
216 }
217 
218 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
219  if (FrameIndices.find(&AI) != FrameIndices.end())
220  return FrameIndices[&AI];
221 
222  unsigned ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
223  unsigned Size =
224  ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
225 
226  // Always allocate at least one byte.
227  Size = std::max(Size, 1u);
228 
229  unsigned Alignment = AI.getAlignment();
230  if (!Alignment)
231  Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
232 
233  int &FI = FrameIndices[&AI];
234  FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
235  return FI;
236 }
237 
238 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
239  unsigned Alignment = 0;
240  Type *ValTy = nullptr;
241  if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
242  Alignment = SI->getAlignment();
243  ValTy = SI->getValueOperand()->getType();
244  } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
245  Alignment = LI->getAlignment();
246  ValTy = LI->getType();
247  } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
248  // TODO(PR27168): This instruction has no alignment attribute, but unlike
249  // the default alignment for load/store, the default here is to assume
250  // it has NATURAL alignment, not DataLayout-specified alignment.
251  const DataLayout &DL = AI->getModule()->getDataLayout();
252  Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
253  ValTy = AI->getCompareOperand()->getType();
254  } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
255  // TODO(PR27168): This instruction has no alignment attribute, but unlike
256  // the default alignment for load/store, the default here is to assume
257  // it has NATURAL alignment, not DataLayout-specified alignment.
258  const DataLayout &DL = AI->getModule()->getDataLayout();
259  Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType());
260  ValTy = AI->getType();
261  } else {
262  OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
263  R << "unable to translate memop: " << ore::NV("Opcode", &I);
264  reportTranslationError(*MF, *TPC, *ORE, R);
265  return 1;
266  }
267 
268  return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
269 }
270 
271 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
272  MachineBasicBlock *&MBB = BBToMBB[&BB];
273  assert(MBB && "BasicBlock was not encountered before");
274  return *MBB;
275 }
276 
277 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
278  assert(NewPred && "new predecessor must be a real MachineBasicBlock");
279  MachinePreds[Edge].push_back(NewPred);
280 }
281 
282 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
283  MachineIRBuilder &MIRBuilder) {
284  // FIXME: handle signed/unsigned wrapping flags.
285 
286  // Get or create a virtual register for each value.
287  // Unless the value is a Constant => loadimm cst?
288  // or inline constant each time?
289  // Creation of a virtual register needs to have a size.
290  unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
291  unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
292  unsigned Res = getOrCreateVReg(U);
293  uint16_t Flags = 0;
294  if (isa<Instruction>(U)) {
295  const Instruction &I = cast<Instruction>(U);
297  }
298 
299  MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
300  return true;
301 }
302 
303 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
304  // -0.0 - X --> G_FNEG
305  if (isa<Constant>(U.getOperand(0)) &&
307  MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
308  .addDef(getOrCreateVReg(U))
309  .addUse(getOrCreateVReg(*U.getOperand(1)));
310  return true;
311  }
312  return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
313 }
314 
315 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
316  MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
317  .addDef(getOrCreateVReg(U))
318  .addUse(getOrCreateVReg(*U.getOperand(0)));
319  return true;
320 }
321 
322 bool IRTranslator::translateCompare(const User &U,
323  MachineIRBuilder &MIRBuilder) {
324  const CmpInst *CI = dyn_cast<CmpInst>(&U);
325  unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
326  unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
327  unsigned Res = getOrCreateVReg(U);
328  CmpInst::Predicate Pred =
329  CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
330  cast<ConstantExpr>(U).getPredicate());
331  if (CmpInst::isIntPredicate(Pred))
332  MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
333  else if (Pred == CmpInst::FCMP_FALSE)
334  MIRBuilder.buildCopy(
335  Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
336  else if (Pred == CmpInst::FCMP_TRUE)
337  MIRBuilder.buildCopy(
338  Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
339  else {
340  MIRBuilder.buildInstr(TargetOpcode::G_FCMP, {Res}, {Pred, Op0, Op1},
342  }
343 
344  return true;
345 }
346 
347 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
348  const ReturnInst &RI = cast<ReturnInst>(U);
349  const Value *Ret = RI.getReturnValue();
350  if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
351  Ret = nullptr;
352 
353  ArrayRef<unsigned> VRegs;
354  if (Ret)
355  VRegs = getOrCreateVRegs(*Ret);
356 
357  // The target may mess up with the insertion point, but
358  // this is not important as a return is the last instruction
359  // of the block anyway.
360 
361  return CLI->lowerReturn(MIRBuilder, Ret, VRegs);
362 }
363 
364 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
365  const BranchInst &BrInst = cast<BranchInst>(U);
366  unsigned Succ = 0;
367  if (!BrInst.isUnconditional()) {
368  // We want a G_BRCOND to the true BB followed by an unconditional branch.
369  unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
370  const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
371  MachineBasicBlock &TrueBB = getMBB(TrueTgt);
372  MIRBuilder.buildBrCond(Tst, TrueBB);
373  }
374 
375  const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
376  MachineBasicBlock &TgtBB = getMBB(BrTgt);
377  MachineBasicBlock &CurBB = MIRBuilder.getMBB();
378 
379  // If the unconditional target is the layout successor, fallthrough.
380  if (!CurBB.isLayoutSuccessor(&TgtBB))
381  MIRBuilder.buildBr(TgtBB);
382 
383  // Link successors.
384  for (const BasicBlock *Succ : successors(&BrInst))
385  CurBB.addSuccessor(&getMBB(*Succ));
386  return true;
387 }
388 
389 bool IRTranslator::translateSwitch(const User &U,
390  MachineIRBuilder &MIRBuilder) {
391  // For now, just translate as a chain of conditional branches.
392  // FIXME: could we share most of the logic/code in
393  // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel?
394  // At first sight, it seems most of the logic in there is independent of
395  // SelectionDAG-specifics and a lot of work went in to optimize switch
396  // lowering in there.
397 
398  const SwitchInst &SwInst = cast<SwitchInst>(U);
399  const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
400  const BasicBlock *OrigBB = SwInst.getParent();
401 
402  LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL);
403  for (auto &CaseIt : SwInst.cases()) {
404  const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
405  const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
406  MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue);
407  MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
408  const BasicBlock *TrueBB = CaseIt.getCaseSuccessor();
409  MachineBasicBlock &TrueMBB = getMBB(*TrueBB);
410 
411  MIRBuilder.buildBrCond(Tst, TrueMBB);
412  CurMBB.addSuccessor(&TrueMBB);
413  addMachineCFGPred({OrigBB, TrueBB}, &CurMBB);
414 
415  MachineBasicBlock *FalseMBB =
416  MF->CreateMachineBasicBlock(SwInst.getParent());
417  // Insert the comparison blocks one after the other.
418  MF->insert(std::next(CurMBB.getIterator()), FalseMBB);
419  MIRBuilder.buildBr(*FalseMBB);
420  CurMBB.addSuccessor(FalseMBB);
421 
422  MIRBuilder.setMBB(*FalseMBB);
423  }
424  // handle default case
425  const BasicBlock *DefaultBB = SwInst.getDefaultDest();
426  MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB);
427  MIRBuilder.buildBr(DefaultMBB);
428  MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
429  CurMBB.addSuccessor(&DefaultMBB);
430  addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB);
431 
432  return true;
433 }
434 
435 bool IRTranslator::translateIndirectBr(const User &U,
436  MachineIRBuilder &MIRBuilder) {
437  const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
438 
439  const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress());
440  MIRBuilder.buildBrIndirect(Tgt);
441 
442  // Link successors.
443  MachineBasicBlock &CurBB = MIRBuilder.getMBB();
444  for (const BasicBlock *Succ : successors(&BrInst))
445  CurBB.addSuccessor(&getMBB(*Succ));
446 
447  return true;
448 }
449 
450 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
451  const LoadInst &LI = cast<LoadInst>(U);
452 
453  auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
455  Flags |= MachineMemOperand::MOLoad;
456 
457  if (DL->getTypeStoreSize(LI.getType()) == 0)
458  return true;
459 
460  ArrayRef<unsigned> Regs = getOrCreateVRegs(LI);
461  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
462  unsigned Base = getOrCreateVReg(*LI.getPointerOperand());
463 
464  Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType());
465  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
466 
467  for (unsigned i = 0; i < Regs.size(); ++i) {
468  unsigned Addr = 0;
469  MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
470 
471  MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
472  unsigned BaseAlign = getMemOpAlignment(LI);
473  auto MMO = MF->getMachineMemOperand(
474  Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
475  MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
476  LI.getSyncScopeID(), LI.getOrdering());
477  MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
478  }
479 
480  return true;
481 }
482 
483 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
484  const StoreInst &SI = cast<StoreInst>(U);
485  auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
488 
489  if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
490  return true;
491 
492  ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand());
493  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
494  unsigned Base = getOrCreateVReg(*SI.getPointerOperand());
495 
496  Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType());
497  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
498 
499  for (unsigned i = 0; i < Vals.size(); ++i) {
500  unsigned Addr = 0;
501  MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
502 
503  MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
504  unsigned BaseAlign = getMemOpAlignment(SI);
505  auto MMO = MF->getMachineMemOperand(
506  Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8,
507  MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
508  SI.getSyncScopeID(), SI.getOrdering());
509  MIRBuilder.buildStore(Vals[i], Addr, *MMO);
510  }
511  return true;
512 }
513 
514 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
515  const Value *Src = U.getOperand(0);
517 
518  // getIndexedOffsetInType is designed for GEPs, so the first index is the
519  // usual array element rather than looking into the actual aggregate.
520  SmallVector<Value *, 1> Indices;
521  Indices.push_back(ConstantInt::get(Int32Ty, 0));
522 
523  if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
524  for (auto Idx : EVI->indices())
525  Indices.push_back(ConstantInt::get(Int32Ty, Idx));
526  } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
527  for (auto Idx : IVI->indices())
528  Indices.push_back(ConstantInt::get(Int32Ty, Idx));
529  } else {
530  for (unsigned i = 1; i < U.getNumOperands(); ++i)
531  Indices.push_back(U.getOperand(i));
532  }
533 
534  return 8 * static_cast<uint64_t>(
535  DL.getIndexedOffsetInType(Src->getType(), Indices));
536 }
537 
538 bool IRTranslator::translateExtractValue(const User &U,
539  MachineIRBuilder &MIRBuilder) {
540  const Value *Src = U.getOperand(0);
541  uint64_t Offset = getOffsetFromIndices(U, *DL);
542  ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
543  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
544  unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
545  auto &DstRegs = allocateVRegs(U);
546 
547  for (unsigned i = 0; i < DstRegs.size(); ++i)
548  DstRegs[i] = SrcRegs[Idx++];
549 
550  return true;
551 }
552 
553 bool IRTranslator::translateInsertValue(const User &U,
554  MachineIRBuilder &MIRBuilder) {
555  const Value *Src = U.getOperand(0);
556  uint64_t Offset = getOffsetFromIndices(U, *DL);
557  auto &DstRegs = allocateVRegs(U);
558  ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
559  ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
560  ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
561  auto InsertedIt = InsertedRegs.begin();
562 
563  for (unsigned i = 0; i < DstRegs.size(); ++i) {
564  if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
565  DstRegs[i] = *InsertedIt++;
566  else
567  DstRegs[i] = SrcRegs[i];
568  }
569 
570  return true;
571 }
572 
573 bool IRTranslator::translateSelect(const User &U,
574  MachineIRBuilder &MIRBuilder) {
575  unsigned Tst = getOrCreateVReg(*U.getOperand(0));
576  ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U);
577  ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
578  ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
579 
580  const SelectInst &SI = cast<SelectInst>(U);
581  uint16_t Flags = 0;
582  if (const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition()))
584 
585  for (unsigned i = 0; i < ResRegs.size(); ++i) {
586  MIRBuilder.buildInstr(TargetOpcode::G_SELECT, {ResRegs[i]},
587  {Tst, Op0Regs[i], Op1Regs[i]}, Flags);
588  }
589 
590  return true;
591 }
592 
593 bool IRTranslator::translateBitCast(const User &U,
594  MachineIRBuilder &MIRBuilder) {
595  // If we're bitcasting to the source type, we can reuse the source vreg.
596  if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
597  getLLTForType(*U.getType(), *DL)) {
598  unsigned SrcReg = getOrCreateVReg(*U.getOperand(0));
599  auto &Regs = *VMap.getVRegs(U);
600  // If we already assigned a vreg for this bitcast, we can't change that.
601  // Emit a copy to satisfy the users we already emitted.
602  if (!Regs.empty())
603  MIRBuilder.buildCopy(Regs[0], SrcReg);
604  else {
605  Regs.push_back(SrcReg);
606  VMap.getOffsets(U)->push_back(0);
607  }
608  return true;
609  }
610  return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
611 }
612 
613 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
614  MachineIRBuilder &MIRBuilder) {
615  unsigned Op = getOrCreateVReg(*U.getOperand(0));
616  unsigned Res = getOrCreateVReg(U);
617  MIRBuilder.buildInstr(Opcode, {Res}, {Op});
618  return true;
619 }
620 
621 bool IRTranslator::translateGetElementPtr(const User &U,
622  MachineIRBuilder &MIRBuilder) {
623  // FIXME: support vector GEPs.
624  if (U.getType()->isVectorTy())
625  return false;
626 
627  Value &Op0 = *U.getOperand(0);
628  unsigned BaseReg = getOrCreateVReg(Op0);
629  Type *PtrIRTy = Op0.getType();
630  LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
631  Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
632  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
633 
634  int64_t Offset = 0;
635  for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
636  GTI != E; ++GTI) {
637  const Value *Idx = GTI.getOperand();
638  if (StructType *StTy = GTI.getStructTypeOrNull()) {
639  unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
640  Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
641  continue;
642  } else {
643  uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
644 
645  // If this is a scalar constant or a splat vector of constants,
646  // handle it quickly.
647  if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
648  Offset += ElementSize * CI->getSExtValue();
649  continue;
650  }
651 
652  if (Offset != 0) {
653  unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
654  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
655  auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
656  MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetMIB.getReg(0));
657 
658  BaseReg = NewBaseReg;
659  Offset = 0;
660  }
661 
662  unsigned IdxReg = getOrCreateVReg(*Idx);
663  if (MRI->getType(IdxReg) != OffsetTy) {
664  unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
665  MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
666  IdxReg = NewIdxReg;
667  }
668 
669  // N = N + Idx * ElementSize;
670  // Avoid doing it for ElementSize of 1.
671  unsigned GepOffsetReg;
672  if (ElementSize != 1) {
673  GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
674  auto ElementSizeMIB = MIRBuilder.buildConstant(
675  getLLTForType(*OffsetIRTy, *DL), ElementSize);
676  MIRBuilder.buildMul(GepOffsetReg, ElementSizeMIB.getReg(0), IdxReg);
677  } else
678  GepOffsetReg = IdxReg;
679 
680  unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
681  MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg);
682  BaseReg = NewBaseReg;
683  }
684  }
685 
686  if (Offset != 0) {
687  auto OffsetMIB =
688  MIRBuilder.buildConstant(getLLTForType(*OffsetIRTy, *DL), Offset);
689  MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
690  return true;
691  }
692 
693  MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
694  return true;
695 }
696 
697 bool IRTranslator::translateMemfunc(const CallInst &CI,
698  MachineIRBuilder &MIRBuilder,
699  unsigned ID) {
700  LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
701  Type *DstTy = CI.getArgOperand(0)->getType();
702  if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
703  SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
704  return false;
705 
707  for (int i = 0; i < 3; ++i) {
708  const auto &Arg = CI.getArgOperand(i);
709  Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
710  }
711 
712  const char *Callee;
713  switch (ID) {
714  case Intrinsic::memmove:
715  case Intrinsic::memcpy: {
716  Type *SrcTy = CI.getArgOperand(1)->getType();
717  if(cast<PointerType>(SrcTy)->getAddressSpace() != 0)
718  return false;
719  Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove";
720  break;
721  }
722  case Intrinsic::memset:
723  Callee = "memset";
724  break;
725  default:
726  return false;
727  }
728 
729  return CLI->lowerCall(MIRBuilder, CI.getCallingConv(),
730  MachineOperand::CreateES(Callee),
731  CallLowering::ArgInfo(0, CI.getType()), Args);
732 }
733 
734 void IRTranslator::getStackGuard(unsigned DstReg,
735  MachineIRBuilder &MIRBuilder) {
736  const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
737  MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
738  auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
739  MIB.addDef(DstReg);
740 
741  auto &TLI = *MF->getSubtarget().getTargetLowering();
742  Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
743  if (!Global)
744  return;
745 
746  MachinePointerInfo MPInfo(Global);
750  MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
751  DL->getPointerABIAlignment(0));
752  MIB.setMemRefs({MemRef});
753 }
754 
755 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
756  MachineIRBuilder &MIRBuilder) {
757  ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI);
758  MIRBuilder.buildInstr(Op)
759  .addDef(ResRegs[0])
760  .addDef(ResRegs[1])
761  .addUse(getOrCreateVReg(*CI.getOperand(0)))
762  .addUse(getOrCreateVReg(*CI.getOperand(1)));
763 
764  return true;
765 }
766 
767 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
768  switch (ID) {
769  default:
770  break;
771  case Intrinsic::bswap:
772  return TargetOpcode::G_BSWAP;
773  case Intrinsic::ceil:
774  return TargetOpcode::G_FCEIL;
775  case Intrinsic::cos:
776  return TargetOpcode::G_FCOS;
777  case Intrinsic::ctpop:
778  return TargetOpcode::G_CTPOP;
779  case Intrinsic::exp:
780  return TargetOpcode::G_FEXP;
781  case Intrinsic::exp2:
782  return TargetOpcode::G_FEXP2;
783  case Intrinsic::fabs:
784  return TargetOpcode::G_FABS;
785  case Intrinsic::copysign:
786  return TargetOpcode::G_FCOPYSIGN;
787  case Intrinsic::canonicalize:
788  return TargetOpcode::G_FCANONICALIZE;
789  case Intrinsic::floor:
790  return TargetOpcode::G_FFLOOR;
791  case Intrinsic::fma:
792  return TargetOpcode::G_FMA;
793  case Intrinsic::log:
794  return TargetOpcode::G_FLOG;
795  case Intrinsic::log2:
796  return TargetOpcode::G_FLOG2;
797  case Intrinsic::log10:
798  return TargetOpcode::G_FLOG10;
799  case Intrinsic::nearbyint:
800  return TargetOpcode::G_FNEARBYINT;
801  case Intrinsic::pow:
802  return TargetOpcode::G_FPOW;
803  case Intrinsic::rint:
804  return TargetOpcode::G_FRINT;
805  case Intrinsic::round:
806  return TargetOpcode::G_INTRINSIC_ROUND;
807  case Intrinsic::sin:
808  return TargetOpcode::G_FSIN;
809  case Intrinsic::sqrt:
810  return TargetOpcode::G_FSQRT;
811  case Intrinsic::trunc:
812  return TargetOpcode::G_INTRINSIC_TRUNC;
813  }
815 }
816 
817 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
818  Intrinsic::ID ID,
819  MachineIRBuilder &MIRBuilder) {
820 
821  unsigned Op = getSimpleIntrinsicOpcode(ID);
822 
823  // Is this a simple intrinsic?
824  if (Op == Intrinsic::not_intrinsic)
825  return false;
826 
827  // Yes. Let's translate it.
829  for (auto &Arg : CI.arg_operands())
830  VRegs.push_back(getOrCreateVReg(*Arg));
831 
832  MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
834  return true;
835 }
836 
837 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
838  MachineIRBuilder &MIRBuilder) {
839 
840  // If this is a simple intrinsic (that is, we just need to add a def of
841  // a vreg, and uses for each arg operand, then translate it.
842  if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
843  return true;
844 
845  switch (ID) {
846  default:
847  break;
848  case Intrinsic::lifetime_start:
849  case Intrinsic::lifetime_end: {
850  // No stack colouring in O0, discard region information.
851  if (MF->getTarget().getOptLevel() == CodeGenOpt::None)
852  return true;
853 
854  unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
856 
857  // Get the underlying objects for the location passed on the lifetime
858  // marker.
860  GetUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL);
861 
862  // Iterate over each underlying object, creating lifetime markers for each
863  // static alloca. Quit if we find a non-static alloca.
864  for (const Value *V : Allocas) {
865  const AllocaInst *AI = dyn_cast<AllocaInst>(V);
866  if (!AI)
867  continue;
868 
869  if (!AI->isStaticAlloca())
870  return true;
871 
872  MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
873  }
874  return true;
875  }
876  case Intrinsic::dbg_declare: {
877  const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
878  assert(DI.getVariable() && "Missing variable");
879 
880  const Value *Address = DI.getAddress();
881  if (!Address || isa<UndefValue>(Address)) {
882  LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
883  return true;
884  }
885 
887  MIRBuilder.getDebugLoc()) &&
888  "Expected inlined-at fields to agree");
889  auto AI = dyn_cast<AllocaInst>(Address);
890  if (AI && AI->isStaticAlloca()) {
891  // Static allocas are tracked at the MF level, no need for DBG_VALUE
892  // instructions (in fact, they get ignored if they *do* exist).
893  MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
894  getOrCreateFrameIndex(*AI), DI.getDebugLoc());
895  } else {
896  // A dbg.declare describes the address of a source variable, so lower it
897  // into an indirect DBG_VALUE.
898  MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
899  DI.getVariable(), DI.getExpression());
900  }
901  return true;
902  }
903  case Intrinsic::dbg_label: {
904  const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
905  assert(DI.getLabel() && "Missing label");
906 
908  MIRBuilder.getDebugLoc()) &&
909  "Expected inlined-at fields to agree");
910 
911  MIRBuilder.buildDbgLabel(DI.getLabel());
912  return true;
913  }
914  case Intrinsic::vaend:
915  // No target I know of cares about va_end. Certainly no in-tree target
916  // does. Simplest intrinsic ever!
917  return true;
918  case Intrinsic::vastart: {
919  auto &TLI = *MF->getSubtarget().getTargetLowering();
920  Value *Ptr = CI.getArgOperand(0);
921  unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
922 
923  // FIXME: Get alignment
924  MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
925  .addUse(getOrCreateVReg(*Ptr))
926  .addMemOperand(MF->getMachineMemOperand(
927  MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 1));
928  return true;
929  }
930  case Intrinsic::dbg_value: {
931  // This form of DBG_VALUE is target-independent.
932  const DbgValueInst &DI = cast<DbgValueInst>(CI);
933  const Value *V = DI.getValue();
935  MIRBuilder.getDebugLoc()) &&
936  "Expected inlined-at fields to agree");
937  if (!V) {
938  // Currently the optimizer can produce this; insert an undef to
939  // help debugging. Probably the optimizer should not do this.
940  MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
941  } else if (const auto *CI = dyn_cast<Constant>(V)) {
942  MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
943  } else {
944  unsigned Reg = getOrCreateVReg(*V);
945  // FIXME: This does not handle register-indirect values at offset 0. The
946  // direct/indirect thing shouldn't really be handled by something as
947  // implicit as reg+noreg vs reg+imm in the first palce, but it seems
948  // pretty baked in right now.
949  MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
950  }
951  return true;
952  }
953  case Intrinsic::uadd_with_overflow:
954  return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
955  case Intrinsic::sadd_with_overflow:
956  return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
957  case Intrinsic::usub_with_overflow:
958  return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
959  case Intrinsic::ssub_with_overflow:
960  return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
961  case Intrinsic::umul_with_overflow:
962  return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
963  case Intrinsic::smul_with_overflow:
964  return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
965  case Intrinsic::fmuladd: {
966  const TargetMachine &TM = MF->getTarget();
967  const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
968  unsigned Dst = getOrCreateVReg(CI);
969  unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0));
970  unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1));
971  unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2));
973  TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
974  // TODO: Revisit this to see if we should move this part of the
975  // lowering to the combiner.
976  MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2},
978  } else {
979  LLT Ty = getLLTForType(*CI.getType(), *DL);
980  auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1},
982  MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2},
984  }
985  return true;
986  }
987  case Intrinsic::memcpy:
988  case Intrinsic::memmove:
989  case Intrinsic::memset:
990  return translateMemfunc(CI, MIRBuilder, ID);
991  case Intrinsic::eh_typeid_for: {
993  unsigned Reg = getOrCreateVReg(CI);
994  unsigned TypeID = MF->getTypeIDFor(GV);
995  MIRBuilder.buildConstant(Reg, TypeID);
996  return true;
997  }
998  case Intrinsic::objectsize: {
999  // If we don't know by now, we're never going to know.
1000  const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
1001 
1002  MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
1003  return true;
1004  }
1005  case Intrinsic::is_constant:
1006  // If this wasn't constant-folded away by now, then it's not a
1007  // constant.
1008  MIRBuilder.buildConstant(getOrCreateVReg(CI), 0);
1009  return true;
1010  case Intrinsic::stackguard:
1011  getStackGuard(getOrCreateVReg(CI), MIRBuilder);
1012  return true;
1013  case Intrinsic::stackprotector: {
1014  LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1015  unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
1016  getStackGuard(GuardVal, MIRBuilder);
1017 
1018  AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
1019  int FI = getOrCreateFrameIndex(*Slot);
1020  MF->getFrameInfo().setStackProtectorIndex(FI);
1021 
1022  MIRBuilder.buildStore(
1023  GuardVal, getOrCreateVReg(*Slot),
1024  *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
1027  PtrTy.getSizeInBits() / 8, 8));
1028  return true;
1029  }
1030  case Intrinsic::stacksave: {
1031  // Save the stack pointer to the location provided by the intrinsic.
1032  unsigned Reg = getOrCreateVReg(CI);
1033  unsigned StackPtr = MF->getSubtarget()
1034  .getTargetLowering()
1035  ->getStackPointerRegisterToSaveRestore();
1036 
1037  // If the target doesn't specify a stack pointer, then fall back.
1038  if (!StackPtr)
1039  return false;
1040 
1041  MIRBuilder.buildCopy(Reg, StackPtr);
1042  return true;
1043  }
1044  case Intrinsic::stackrestore: {
1045  // Restore the stack pointer from the location provided by the intrinsic.
1046  unsigned Reg = getOrCreateVReg(*CI.getArgOperand(0));
1047  unsigned StackPtr = MF->getSubtarget()
1048  .getTargetLowering()
1049  ->getStackPointerRegisterToSaveRestore();
1050 
1051  // If the target doesn't specify a stack pointer, then fall back.
1052  if (!StackPtr)
1053  return false;
1054 
1055  MIRBuilder.buildCopy(StackPtr, Reg);
1056  return true;
1057  }
1058  case Intrinsic::cttz:
1059  case Intrinsic::ctlz: {
1060  ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
1061  bool isTrailing = ID == Intrinsic::cttz;
1062  unsigned Opcode = isTrailing
1063  ? Cst->isZero() ? TargetOpcode::G_CTTZ
1064  : TargetOpcode::G_CTTZ_ZERO_UNDEF
1065  : Cst->isZero() ? TargetOpcode::G_CTLZ
1066  : TargetOpcode::G_CTLZ_ZERO_UNDEF;
1067  MIRBuilder.buildInstr(Opcode)
1068  .addDef(getOrCreateVReg(CI))
1069  .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
1070  return true;
1071  }
1072  case Intrinsic::invariant_start: {
1073  LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1074  unsigned Undef = MRI->createGenericVirtualRegister(PtrTy);
1075  MIRBuilder.buildUndef(Undef);
1076  return true;
1077  }
1078  case Intrinsic::invariant_end:
1079  return true;
1080  }
1081  return false;
1082 }
1083 
1084 bool IRTranslator::translateInlineAsm(const CallInst &CI,
1085  MachineIRBuilder &MIRBuilder) {
1086  const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
1087  if (!IA.getConstraintString().empty())
1088  return false;
1089 
1090  unsigned ExtraInfo = 0;
1091  if (IA.hasSideEffects())
1092  ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1093  if (IA.getDialect() == InlineAsm::AD_Intel)
1094  ExtraInfo |= InlineAsm::Extra_AsmDialect;
1095 
1097  .addExternalSymbol(IA.getAsmString().c_str())
1098  .addImm(ExtraInfo);
1099 
1100  return true;
1101 }
1102 
1103 unsigned IRTranslator::packRegs(const Value &V,
1104  MachineIRBuilder &MIRBuilder) {
1105  ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1106  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
1107  LLT BigTy = getLLTForType(*V.getType(), *DL);
1108 
1109  if (Regs.size() == 1)
1110  return Regs[0];
1111 
1112  unsigned Dst = MRI->createGenericVirtualRegister(BigTy);
1113  MIRBuilder.buildUndef(Dst);
1114  for (unsigned i = 0; i < Regs.size(); ++i) {
1115  unsigned NewDst = MRI->createGenericVirtualRegister(BigTy);
1116  MIRBuilder.buildInsert(NewDst, Dst, Regs[i], Offsets[i]);
1117  Dst = NewDst;
1118  }
1119  return Dst;
1120 }
1121 
1122 void IRTranslator::unpackRegs(const Value &V, unsigned Src,
1123  MachineIRBuilder &MIRBuilder) {
1124  ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1125  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
1126 
1127  for (unsigned i = 0; i < Regs.size(); ++i)
1128  MIRBuilder.buildExtract(Regs[i], Src, Offsets[i]);
1129 }
1130 
1131 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
1132  const CallInst &CI = cast<CallInst>(U);
1133  auto TII = MF->getTarget().getIntrinsicInfo();
1134  const Function *F = CI.getCalledFunction();
1135 
1136  // FIXME: support Windows dllimport function calls.
1137  if (F && F->hasDLLImportStorageClass())
1138  return false;
1139 
1140  if (CI.isInlineAsm())
1141  return translateInlineAsm(CI, MIRBuilder);
1142 
1144  if (F && F->isIntrinsic()) {
1145  ID = F->getIntrinsicID();
1146  if (TII && ID == Intrinsic::not_intrinsic)
1147  ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
1148  }
1149 
1150  if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
1151  bool IsSplitType = valueIsSplit(CI);
1152  unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister(
1153  getLLTForType(*CI.getType(), *DL))
1154  : getOrCreateVReg(CI);
1155 
1157  for (auto &Arg: CI.arg_operands())
1158  Args.push_back(packRegs(*Arg, MIRBuilder));
1159 
1160  MF->getFrameInfo().setHasCalls(true);
1161  bool Success = CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() {
1162  return getOrCreateVReg(*CI.getCalledValue());
1163  });
1164 
1165  if (IsSplitType)
1166  unpackRegs(CI, Res, MIRBuilder);
1167  return Success;
1168  }
1169 
1170  assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
1171 
1172  if (translateKnownIntrinsic(CI, ID, MIRBuilder))
1173  return true;
1174 
1175  ArrayRef<unsigned> ResultRegs;
1176  if (!CI.getType()->isVoidTy())
1177  ResultRegs = getOrCreateVRegs(CI);
1178 
1179  MachineInstrBuilder MIB =
1180  MIRBuilder.buildIntrinsic(ID, ResultRegs, !CI.doesNotAccessMemory());
1181  if (isa<FPMathOperator>(CI))
1182  MIB->copyIRFlags(CI);
1183 
1184  for (auto &Arg : CI.arg_operands()) {
1185  // Some intrinsics take metadata parameters. Reject them.
1186  if (isa<MetadataAsValue>(Arg))
1187  return false;
1188  MIB.addUse(packRegs(*Arg, MIRBuilder));
1189  }
1190 
1191  // Add a MachineMemOperand if it is a target mem intrinsic.
1192  const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1193  TargetLowering::IntrinsicInfo Info;
1194  // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
1195  if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
1196  unsigned Align = Info.align;
1197  if (Align == 0)
1198  Align = DL->getABITypeAlignment(Info.memVT.getTypeForEVT(F->getContext()));
1199 
1200  uint64_t Size = Info.memVT.getStoreSize();
1201  MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
1202  Info.flags, Size, Align));
1203  }
1204 
1205  return true;
1206 }
1207 
1208 bool IRTranslator::translateInvoke(const User &U,
1209  MachineIRBuilder &MIRBuilder) {
1210  const InvokeInst &I = cast<InvokeInst>(U);
1211  MCContext &Context = MF->getContext();
1212 
1213  const BasicBlock *ReturnBB = I.getSuccessor(0);
1214  const BasicBlock *EHPadBB = I.getSuccessor(1);
1215 
1216  const Value *Callee = I.getCalledValue();
1217  const Function *Fn = dyn_cast<Function>(Callee);
1218  if (isa<InlineAsm>(Callee))
1219  return false;
1220 
1221  // FIXME: support invoking patchpoint and statepoint intrinsics.
1222  if (Fn && Fn->isIntrinsic())
1223  return false;
1224 
1225  // FIXME: support whatever these are.
1227  return false;
1228 
1229  // FIXME: support Windows exception handling.
1230  if (!isa<LandingPadInst>(EHPadBB->front()))
1231  return false;
1232 
1233  // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
1234  // the region covered by the try.
1235  MCSymbol *BeginSymbol = Context.createTempSymbol();
1236  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
1237 
1238  unsigned Res = 0;
1239  if (!I.getType()->isVoidTy())
1240  Res = MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL));
1242  for (auto &Arg: I.arg_operands())
1243  Args.push_back(packRegs(*Arg, MIRBuilder));
1244 
1245  if (!CLI->lowerCall(MIRBuilder, &I, Res, Args,
1246  [&]() { return getOrCreateVReg(*I.getCalledValue()); }))
1247  return false;
1248 
1249  unpackRegs(I, Res, MIRBuilder);
1250 
1251  MCSymbol *EndSymbol = Context.createTempSymbol();
1252  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
1253 
1254  // FIXME: track probabilities.
1255  MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
1256  &ReturnMBB = getMBB(*ReturnBB);
1257  MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
1258  MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
1259  MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
1260  MIRBuilder.buildBr(ReturnMBB);
1261 
1262  return true;
1263 }
1264 
1265 bool IRTranslator::translateCallBr(const User &U,
1266  MachineIRBuilder &MIRBuilder) {
1267  // FIXME: Implement this.
1268  return false;
1269 }
1270 
1271 bool IRTranslator::translateLandingPad(const User &U,
1272  MachineIRBuilder &MIRBuilder) {
1273  const LandingPadInst &LP = cast<LandingPadInst>(U);
1274 
1275  MachineBasicBlock &MBB = MIRBuilder.getMBB();
1276 
1277  MBB.setIsEHPad();
1278 
1279  // If there aren't registers to copy the values into (e.g., during SjLj
1280  // exceptions), then don't bother.
1281  auto &TLI = *MF->getSubtarget().getTargetLowering();
1282  const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
1283  if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
1284  TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
1285  return true;
1286 
1287  // If landingpad's return type is token type, we don't create DAG nodes
1288  // for its exception pointer and selector value. The extraction of exception
1289  // pointer or selector value from token type landingpads is not currently
1290  // supported.
1291  if (LP.getType()->isTokenTy())
1292  return true;
1293 
1294  // Add a label to mark the beginning of the landing pad. Deletion of the
1295  // landing pad can thus be detected via the MachineModuleInfo.
1297  .addSym(MF->addLandingPad(&MBB));
1298 
1299  LLT Ty = getLLTForType(*LP.getType(), *DL);
1300  unsigned Undef = MRI->createGenericVirtualRegister(Ty);
1301  MIRBuilder.buildUndef(Undef);
1302 
1303  SmallVector<LLT, 2> Tys;
1304  for (Type *Ty : cast<StructType>(LP.getType())->elements())
1305  Tys.push_back(getLLTForType(*Ty, *DL));
1306  assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
1307 
1308  // Mark exception register as live in.
1309  unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
1310  if (!ExceptionReg)
1311  return false;
1312 
1313  MBB.addLiveIn(ExceptionReg);
1314  ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP);
1315  MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
1316 
1317  unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
1318  if (!SelectorReg)
1319  return false;
1320 
1321  MBB.addLiveIn(SelectorReg);
1322  unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
1323  MIRBuilder.buildCopy(PtrVReg, SelectorReg);
1324  MIRBuilder.buildCast(ResRegs[1], PtrVReg);
1325 
1326  return true;
1327 }
1328 
1329 bool IRTranslator::translateAlloca(const User &U,
1330  MachineIRBuilder &MIRBuilder) {
1331  auto &AI = cast<AllocaInst>(U);
1332 
1333  if (AI.isSwiftError())
1334  return false;
1335 
1336  if (AI.isStaticAlloca()) {
1337  unsigned Res = getOrCreateVReg(AI);
1338  int FI = getOrCreateFrameIndex(AI);
1339  MIRBuilder.buildFrameIndex(Res, FI);
1340  return true;
1341  }
1342 
1343  // FIXME: support stack probing for Windows.
1344  if (MF->getTarget().getTargetTriple().isOSWindows())
1345  return false;
1346 
1347  // Now we're in the harder dynamic case.
1348  Type *Ty = AI.getAllocatedType();
1349  unsigned Align =
1350  std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
1351 
1352  unsigned NumElts = getOrCreateVReg(*AI.getArraySize());
1353 
1354  Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1355  LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1356  if (MRI->getType(NumElts) != IntPtrTy) {
1357  unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1358  MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1359  NumElts = ExtElts;
1360  }
1361 
1362  unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1363  unsigned TySize =
1364  getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
1365  MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1366 
1367  LLT PtrTy = getLLTForType(*AI.getType(), *DL);
1368  auto &TLI = *MF->getSubtarget().getTargetLowering();
1369  unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
1370 
1371  unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy);
1372  MIRBuilder.buildCopy(SPTmp, SPReg);
1373 
1374  unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
1375  MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize);
1376 
1377  // Handle alignment. We have to realign if the allocation granule was smaller
1378  // than stack alignment, or the specific alloca requires more than stack
1379  // alignment.
1380  unsigned StackAlign =
1381  MF->getSubtarget().getFrameLowering()->getStackAlignment();
1382  Align = std::max(Align, StackAlign);
1383  if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) {
1384  // Round the size of the allocation up to the stack alignment size
1385  // by add SA-1 to the size. This doesn't overflow because we're computing
1386  // an address inside an alloca.
1387  unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
1388  MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align));
1389  AllocTmp = AlignedAlloc;
1390  }
1391 
1392  MIRBuilder.buildCopy(SPReg, AllocTmp);
1393  MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp);
1394 
1395  MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
1396  assert(MF->getFrameInfo().hasVarSizedObjects());
1397  return true;
1398 }
1399 
1400 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1401  // FIXME: We may need more info about the type. Because of how LLT works,
1402  // we're completely discarding the i64/double distinction here (amongst
1403  // others). Fortunately the ABIs I know of where that matters don't use va_arg
1404  // anyway but that's not guaranteed.
1405  MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
1406  .addDef(getOrCreateVReg(U))
1407  .addUse(getOrCreateVReg(*U.getOperand(0)))
1408  .addImm(DL->getABITypeAlignment(U.getType()));
1409  return true;
1410 }
1411 
1412 bool IRTranslator::translateInsertElement(const User &U,
1413  MachineIRBuilder &MIRBuilder) {
1414  // If it is a <1 x Ty> vector, use the scalar as it is
1415  // not a legal vector type in LLT.
1416  if (U.getType()->getVectorNumElements() == 1) {
1417  unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1418  auto &Regs = *VMap.getVRegs(U);
1419  if (Regs.empty()) {
1420  Regs.push_back(Elt);
1421  VMap.getOffsets(U)->push_back(0);
1422  } else {
1423  MIRBuilder.buildCopy(Regs[0], Elt);
1424  }
1425  return true;
1426  }
1427 
1428  unsigned Res = getOrCreateVReg(U);
1429  unsigned Val = getOrCreateVReg(*U.getOperand(0));
1430  unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1431  unsigned Idx = getOrCreateVReg(*U.getOperand(2));
1432  MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1433  return true;
1434 }
1435 
1436 bool IRTranslator::translateExtractElement(const User &U,
1437  MachineIRBuilder &MIRBuilder) {
1438  // If it is a <1 x Ty> vector, use the scalar as it is
1439  // not a legal vector type in LLT.
1440  if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
1441  unsigned Elt = getOrCreateVReg(*U.getOperand(0));
1442  auto &Regs = *VMap.getVRegs(U);
1443  if (Regs.empty()) {
1444  Regs.push_back(Elt);
1445  VMap.getOffsets(U)->push_back(0);
1446  } else {
1447  MIRBuilder.buildCopy(Regs[0], Elt);
1448  }
1449  return true;
1450  }
1451  unsigned Res = getOrCreateVReg(U);
1452  unsigned Val = getOrCreateVReg(*U.getOperand(0));
1453  const auto &TLI = *MF->getSubtarget().getTargetLowering();
1454  unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
1455  unsigned Idx = 0;
1456  if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
1457  if (CI->getBitWidth() != PreferredVecIdxWidth) {
1458  APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
1459  auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
1460  Idx = getOrCreateVReg(*NewIdxCI);
1461  }
1462  }
1463  if (!Idx)
1464  Idx = getOrCreateVReg(*U.getOperand(1));
1465  if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
1466  const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
1467  Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg();
1468  }
1469  MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1470  return true;
1471 }
1472 
1473 bool IRTranslator::translateShuffleVector(const User &U,
1474  MachineIRBuilder &MIRBuilder) {
1475  MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
1476  .addDef(getOrCreateVReg(U))
1477  .addUse(getOrCreateVReg(*U.getOperand(0)))
1478  .addUse(getOrCreateVReg(*U.getOperand(1)))
1479  .addUse(getOrCreateVReg(*U.getOperand(2)));
1480  return true;
1481 }
1482 
1483 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1484  const PHINode &PI = cast<PHINode>(U);
1485 
1487  for (auto Reg : getOrCreateVRegs(PI)) {
1488  auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
1489  Insts.push_back(MIB.getInstr());
1490  }
1491 
1492  PendingPHIs.emplace_back(&PI, std::move(Insts));
1493  return true;
1494 }
1495 
1496 bool IRTranslator::translateAtomicCmpXchg(const User &U,
1497  MachineIRBuilder &MIRBuilder) {
1498  const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
1499 
1500  if (I.isWeak())
1501  return false;
1502 
1503  auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1506 
1507  Type *ResType = I.getType();
1508  Type *ValType = ResType->Type::getStructElementType(0);
1509 
1510  auto Res = getOrCreateVRegs(I);
1511  unsigned OldValRes = Res[0];
1512  unsigned SuccessRes = Res[1];
1513  unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1514  unsigned Cmp = getOrCreateVReg(*I.getCompareOperand());
1515  unsigned NewVal = getOrCreateVReg(*I.getNewValOperand());
1516 
1517  MIRBuilder.buildAtomicCmpXchgWithSuccess(
1518  OldValRes, SuccessRes, Addr, Cmp, NewVal,
1519  *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1520  Flags, DL->getTypeStoreSize(ValType),
1521  getMemOpAlignment(I), AAMDNodes(), nullptr,
1523  I.getFailureOrdering()));
1524  return true;
1525 }
1526 
1527 bool IRTranslator::translateAtomicRMW(const User &U,
1528  MachineIRBuilder &MIRBuilder) {
1529  const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
1530 
1531  auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1534 
1535  Type *ResType = I.getType();
1536 
1537  unsigned Res = getOrCreateVReg(I);
1538  unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1539  unsigned Val = getOrCreateVReg(*I.getValOperand());
1540 
1541  unsigned Opcode = 0;
1542  switch (I.getOperation()) {
1543  default:
1544  llvm_unreachable("Unknown atomicrmw op");
1545  return false;
1546  case AtomicRMWInst::Xchg:
1547  Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
1548  break;
1549  case AtomicRMWInst::Add:
1550  Opcode = TargetOpcode::G_ATOMICRMW_ADD;
1551  break;
1552  case AtomicRMWInst::Sub:
1553  Opcode = TargetOpcode::G_ATOMICRMW_SUB;
1554  break;
1555  case AtomicRMWInst::And:
1556  Opcode = TargetOpcode::G_ATOMICRMW_AND;
1557  break;
1558  case AtomicRMWInst::Nand:
1559  Opcode = TargetOpcode::G_ATOMICRMW_NAND;
1560  break;
1561  case AtomicRMWInst::Or:
1562  Opcode = TargetOpcode::G_ATOMICRMW_OR;
1563  break;
1564  case AtomicRMWInst::Xor:
1565  Opcode = TargetOpcode::G_ATOMICRMW_XOR;
1566  break;
1567  case AtomicRMWInst::Max:
1568  Opcode = TargetOpcode::G_ATOMICRMW_MAX;
1569  break;
1570  case AtomicRMWInst::Min:
1571  Opcode = TargetOpcode::G_ATOMICRMW_MIN;
1572  break;
1573  case AtomicRMWInst::UMax:
1574  Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
1575  break;
1576  case AtomicRMWInst::UMin:
1577  Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
1578  break;
1579  }
1580 
1581  MIRBuilder.buildAtomicRMW(
1582  Opcode, Res, Addr, Val,
1583  *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1584  Flags, DL->getTypeStoreSize(ResType),
1585  getMemOpAlignment(I), AAMDNodes(), nullptr,
1586  I.getSyncScopeID(), I.getOrdering()));
1587  return true;
1588 }
1589 
1590 void IRTranslator::finishPendingPhis() {
1591 #ifndef NDEBUG
1592  DILocationVerifier Verifier;
1593  GISelObserverWrapper WrapperObserver(&Verifier);
1594  RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
1595 #endif // ifndef NDEBUG
1596  for (auto &Phi : PendingPHIs) {
1597  const PHINode *PI = Phi.first;
1598  ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
1599  EntryBuilder->setDebugLoc(PI->getDebugLoc());
1600 #ifndef NDEBUG
1601  Verifier.setCurrentInst(PI);
1602 #endif // ifndef NDEBUG
1603 
1604  // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
1605  // won't create extra control flow here, otherwise we need to find the
1606  // dominating predecessor here (or perhaps force the weirder IRTranslators
1607  // to provide a simple boundary).
1608  SmallSet<const BasicBlock *, 4> HandledPreds;
1609 
1610  for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
1611  auto IRPred = PI->getIncomingBlock(i);
1612  if (HandledPreds.count(IRPred))
1613  continue;
1614 
1615  HandledPreds.insert(IRPred);
1616  ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
1617  for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
1618  assert(Pred->isSuccessor(ComponentPHIs[0]->getParent()) &&
1619  "incorrect CFG at MachineBasicBlock level");
1620  for (unsigned j = 0; j < ValRegs.size(); ++j) {
1621  MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
1622  MIB.addUse(ValRegs[j]);
1623  MIB.addMBB(Pred);
1624  }
1625  }
1626  }
1627  }
1628 }
1629 
1630 bool IRTranslator::valueIsSplit(const Value &V,
1632  SmallVector<LLT, 4> SplitTys;
1633  if (Offsets && !Offsets->empty())
1634  Offsets->clear();
1635  computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
1636  return SplitTys.size() > 1;
1637 }
1638 
1639 bool IRTranslator::translate(const Instruction &Inst) {
1640  CurBuilder->setDebugLoc(Inst.getDebugLoc());
1641  EntryBuilder->setDebugLoc(Inst.getDebugLoc());
1642  switch(Inst.getOpcode()) {
1643 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1644  case Instruction::OPCODE: \
1645  return translate##OPCODE(Inst, *CurBuilder.get());
1646 #include "llvm/IR/Instruction.def"
1647  default:
1648  return false;
1649  }
1650 }
1651 
1652 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
1653  if (auto CI = dyn_cast<ConstantInt>(&C))
1654  EntryBuilder->buildConstant(Reg, *CI);
1655  else if (auto CF = dyn_cast<ConstantFP>(&C))
1656  EntryBuilder->buildFConstant(Reg, *CF);
1657  else if (isa<UndefValue>(C))
1658  EntryBuilder->buildUndef(Reg);
1659  else if (isa<ConstantPointerNull>(C)) {
1660  // As we are trying to build a constant val of 0 into a pointer,
1661  // insert a cast to make them correct with respect to types.
1662  unsigned NullSize = DL->getTypeSizeInBits(C.getType());
1663  auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
1664  auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
1665  unsigned ZeroReg = getOrCreateVReg(*ZeroVal);
1666  EntryBuilder->buildCast(Reg, ZeroReg);
1667  } else if (auto GV = dyn_cast<GlobalValue>(&C))
1668  EntryBuilder->buildGlobalValue(Reg, GV);
1669  else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
1670  if (!CAZ->getType()->isVectorTy())
1671  return false;
1672  // Return the scalar if it is a <1 x Ty> vector.
1673  if (CAZ->getNumElements() == 1)
1674  return translate(*CAZ->getElementValue(0u), Reg);
1676  for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
1677  Constant &Elt = *CAZ->getElementValue(i);
1678  Ops.push_back(getOrCreateVReg(Elt));
1679  }
1680  EntryBuilder->buildBuildVector(Reg, Ops);
1681  } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
1682  // Return the scalar if it is a <1 x Ty> vector.
1683  if (CV->getNumElements() == 1)
1684  return translate(*CV->getElementAsConstant(0), Reg);
1686  for (unsigned i = 0; i < CV->getNumElements(); ++i) {
1687  Constant &Elt = *CV->getElementAsConstant(i);
1688  Ops.push_back(getOrCreateVReg(Elt));
1689  }
1690  EntryBuilder->buildBuildVector(Reg, Ops);
1691  } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
1692  switch(CE->getOpcode()) {
1693 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1694  case Instruction::OPCODE: \
1695  return translate##OPCODE(*CE, *EntryBuilder.get());
1696 #include "llvm/IR/Instruction.def"
1697  default:
1698  return false;
1699  }
1700  } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
1701  if (CV->getNumOperands() == 1)
1702  return translate(*CV->getOperand(0), Reg);
1704  for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
1705  Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
1706  }
1707  EntryBuilder->buildBuildVector(Reg, Ops);
1708  } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
1709  EntryBuilder->buildBlockAddress(Reg, BA);
1710  } else
1711  return false;
1712 
1713  return true;
1714 }
1715 
1716 void IRTranslator::finalizeFunction() {
1717  // Release the memory used by the different maps we
1718  // needed during the translation.
1719  PendingPHIs.clear();
1720  VMap.reset();
1721  FrameIndices.clear();
1722  MachinePreds.clear();
1723  // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
1724  // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
1725  // destroying it twice (in ~IRTranslator() and ~LLVMContext())
1726  EntryBuilder.reset();
1727  CurBuilder.reset();
1728 }
1729 
1731  MF = &CurMF;
1732  const Function &F = MF->getFunction();
1733  if (F.empty())
1734  return false;
1736  getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
1737  // Set the CSEConfig and run the analysis.
1738  GISelCSEInfo *CSEInfo = nullptr;
1739  TPC = &getAnalysis<TargetPassConfig>();
1740  bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
1742  : TPC->isGISelCSEEnabled();
1743 
1744  if (EnableCSE) {
1745  EntryBuilder = make_unique<CSEMIRBuilder>(CurMF);
1746  CSEInfo = &Wrapper.get(TPC->getCSEConfig());
1747  EntryBuilder->setCSEInfo(CSEInfo);
1748  CurBuilder = make_unique<CSEMIRBuilder>(CurMF);
1749  CurBuilder->setCSEInfo(CSEInfo);
1750  } else {
1751  EntryBuilder = make_unique<MachineIRBuilder>();
1752  CurBuilder = make_unique<MachineIRBuilder>();
1753  }
1754  CLI = MF->getSubtarget().getCallLowering();
1755  CurBuilder->setMF(*MF);
1756  EntryBuilder->setMF(*MF);
1757  MRI = &MF->getRegInfo();
1758  DL = &F.getParent()->getDataLayout();
1759  ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F);
1760 
1761  assert(PendingPHIs.empty() && "stale PHIs");
1762 
1763  if (!DL->isLittleEndian()) {
1764  // Currently we don't properly handle big endian code.
1765  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1766  F.getSubprogram(), &F.getEntryBlock());
1767  R << "unable to translate in big endian mode";
1768  reportTranslationError(*MF, *TPC, *ORE, R);
1769  }
1770 
1771  // Release the per-function state when we return, whether we succeeded or not.
1772  auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
1773 
1774  // Setup a separate basic-block for the arguments and constants
1775  MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
1776  MF->push_back(EntryBB);
1777  EntryBuilder->setMBB(*EntryBB);
1778 
1779  // Create all blocks, in IR order, to preserve the layout.
1780  for (const BasicBlock &BB: F) {
1781  auto *&MBB = BBToMBB[&BB];
1782 
1783  MBB = MF->CreateMachineBasicBlock(&BB);
1784  MF->push_back(MBB);
1785 
1786  if (BB.hasAddressTaken())
1787  MBB->setHasAddressTaken();
1788  }
1789 
1790  // Make our arguments/constants entry block fallthrough to the IR entry block.
1791  EntryBB->addSuccessor(&getMBB(F.front()));
1792 
1793  // Lower the actual args into this basic block.
1794  SmallVector<unsigned, 8> VRegArgs;
1795  for (const Argument &Arg: F.args()) {
1796  if (DL->getTypeStoreSize(Arg.getType()) == 0)
1797  continue; // Don't handle zero sized types.
1798  VRegArgs.push_back(
1799  MRI->createGenericVirtualRegister(getLLTForType(*Arg.getType(), *DL)));
1800  }
1801 
1802  // We don't currently support translating swifterror or swiftself functions.
1803  for (auto &Arg : F.args()) {
1804  if (Arg.hasSwiftErrorAttr() || Arg.hasSwiftSelfAttr()) {
1805  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1806  F.getSubprogram(), &F.getEntryBlock());
1807  R << "unable to lower arguments due to swifterror/swiftself: "
1808  << ore::NV("Prototype", F.getType());
1809  reportTranslationError(*MF, *TPC, *ORE, R);
1810  return false;
1811  }
1812  }
1813 
1814  if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) {
1815  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1816  F.getSubprogram(), &F.getEntryBlock());
1817  R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
1818  reportTranslationError(*MF, *TPC, *ORE, R);
1819  return false;
1820  }
1821 
1822  auto ArgIt = F.arg_begin();
1823  for (auto &VArg : VRegArgs) {
1824  // If the argument is an unsplit scalar then don't use unpackRegs to avoid
1825  // creating redundant copies.
1826  if (!valueIsSplit(*ArgIt, VMap.getOffsets(*ArgIt))) {
1827  auto &VRegs = *VMap.getVRegs(cast<Value>(*ArgIt));
1828  assert(VRegs.empty() && "VRegs already populated?");
1829  VRegs.push_back(VArg);
1830  } else {
1831  unpackRegs(*ArgIt, VArg, *EntryBuilder.get());
1832  }
1833  ArgIt++;
1834  }
1835 
1836  // Need to visit defs before uses when translating instructions.
1837  GISelObserverWrapper WrapperObserver;
1838  if (EnableCSE && CSEInfo)
1839  WrapperObserver.addObserver(CSEInfo);
1840  {
1842 #ifndef NDEBUG
1843  DILocationVerifier Verifier;
1844  WrapperObserver.addObserver(&Verifier);
1845 #endif // ifndef NDEBUG
1846  RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
1847  for (const BasicBlock *BB : RPOT) {
1848  MachineBasicBlock &MBB = getMBB(*BB);
1849  // Set the insertion point of all the following translations to
1850  // the end of this basic block.
1851  CurBuilder->setMBB(MBB);
1852 
1853  for (const Instruction &Inst : *BB) {
1854 #ifndef NDEBUG
1855  Verifier.setCurrentInst(&Inst);
1856 #endif // ifndef NDEBUG
1857  if (translate(Inst))
1858  continue;
1859 
1860  OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1861  Inst.getDebugLoc(), BB);
1862  R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
1863 
1864  if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
1865  std::string InstStrStorage;
1866  raw_string_ostream InstStr(InstStrStorage);
1867  InstStr << Inst;
1868 
1869  R << ": '" << InstStr.str() << "'";
1870  }
1871 
1872  reportTranslationError(*MF, *TPC, *ORE, R);
1873  return false;
1874  }
1875  }
1876 #ifndef NDEBUG
1877  WrapperObserver.removeObserver(&Verifier);
1878 #endif
1879  }
1880 
1881  finishPendingPhis();
1882 
1883  // Merge the argument lowering and constants block with its single
1884  // successor, the LLVM-IR entry block. We want the basic block to
1885  // be maximal.
1886  assert(EntryBB->succ_size() == 1 &&
1887  "Custom BB used for lowering should have only one successor");
1888  // Get the successor of the current entry block.
1889  MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
1890  assert(NewEntryBB.pred_size() == 1 &&
1891  "LLVM-IR entry block has a predecessor!?");
1892  // Move all the instruction from the current entry block to the
1893  // new entry block.
1894  NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
1895  EntryBB->end());
1896 
1897  // Update the live-in information for the new entry block.
1898  for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
1899  NewEntryBB.addLiveIn(LiveIn);
1900  NewEntryBB.sortUniqueLiveIns();
1901 
1902  // Get rid of the now empty basic block.
1903  EntryBB->removeSuccessor(&NewEntryBB);
1904  MF->remove(EntryBB);
1905  MF->DeleteMachineBasicBlock(EntryBB);
1906 
1907  assert(&MF->front() == &NewEntryBB &&
1908  "New entry wasn't next in the list of basic block!");
1909 
1910  // Initialize stack protector information.
1911  StackProtector &SP = getAnalysis<StackProtector>();
1912  SP.copyToMachineFrameInfo(MF->getFrameInfo());
1913 
1914  return false;
1915 }
auto lower_bound(R &&Range, T &&Value) -> decltype(adl_begin(Range))
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1281
uint64_t CallInst * C
void initializeIRTranslatorPass(PassRegistry &)
Return a value (possibly void), from a function.
Value * getValueOperand()
Definition: Instructions.h:409
bool isIntrinsic() const
isIntrinsic - Returns true if the function&#39;s name starts with "llvm.".
Definition: Function.h:198
A simple RAII based CSEInfo installer.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:645
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:699
bool empty() const
Definition: Function.h:667
static IntegerType * getInt1Ty(LLVMContext &C)
Definition: Type.cpp:172
The CSE Analysis object.
Definition: CSEInfo.h:71
Diagnostic information for missed-optimization remarks.
This instruction extracts a struct member or array element value from an aggregate value...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
*p = old <signed v ? old : v
Definition: Instructions.h:721
iterator_range< CaseIt > cases()
Iteration adapter for range-for loops.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
This class represents an incoming formal argument to a Function.
Definition: Argument.h:29
LLVMContext & Context
bool doesNotAccessMemory(unsigned OpNo) const
Definition: InstrTypes.h:1528
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0, unsigned Op1)
Build and insert Res = G_GEP Op0, Op1.
DiagnosticInfoOptimizationBase::Argument NV
This represents the llvm.dbg.label instruction.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:139
This class represents lattice values for constants.
Definition: AllocatorList.h:23
MachineInstrBuilder buildIndirectDbgValue(unsigned Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
Definition: Utils.cpp:364
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:264
iterator begin() const
Definition: ArrayRef.h:136
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:528
void push_back(const T &Elt)
Definition: SmallVector.h:211
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< unsigned > Res, bool HasSideEffects)
Build and insert either a G_INTRINSIC (if HasSideEffects is false) or G_INTRINSIC_W_SIDE_EFFECTS inst...
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:382
unsigned getReg() const
getReg - Returns the register number.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, const TargetPassConfig &TPC, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
This class represents a function call, abstracting a target machine&#39;s calling convention.
unsigned Reg
This file contains the declarations for metadata subclasses.
Value * getCondition() const
static uint64_t round(uint64_t Acc, uint64_t Input)
Definition: xxhash.cpp:57
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
Definition: Instructions.h:384
gep_type_iterator gep_type_end(const User *GEP)
const std::string & getAsmString() const
Definition: InlineAsm.h:80
*p = old <unsigned v ? old : v
Definition: Instructions.h:725
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:135
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:247
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1045
*p = old >unsigned v ? old : v
Definition: Instructions.h:723
LLVM_NODISCARD detail::scope_exit< typename std::decay< Callable >::type > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:58
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:709
Type * getPointerOperandType() const
Definition: Instructions.h:415
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:261
BasicBlock * getSuccessor(unsigned i) const
unsigned const TargetRegisterInfo * TRI
F(f)
The actual analysis pass wrapper.
Definition: CSEInfo.h:218
An instruction for reading from memory.
Definition: Instructions.h:167
void setMF(MachineFunction &MF)
Definition: CSEInfo.cpp:77
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:691
Value * getCondition() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:229
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Definition: Analysis.cpp:158
*p = old >signed v ? old : v
Definition: Instructions.h:719
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:274
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:595
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert `Res0, ...
bool hasSideEffects() const
Definition: InlineAsm.h:66
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1218
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:50
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:231
A description of a memory reference used in the backend.
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Definition: InstrTypes.h:1755
This class represents the LLVM &#39;select&#39; instruction.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
#define DEBUG_TYPE
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
unsigned getAlignment() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:112
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:96
Class to represent struct types.
Definition: DerivedTypes.h:232
DILabel * getLabel() const
BinOp getOperation() const
Definition: Instructions.h:750
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
Definition: Instructions.h:569
TypeID
Definitions of all of the base types for the Type system.
Definition: Type.h:54
The memory access is dereferenceable (i.e., doesn&#39;t trap).
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Definition: Instructions.h:557
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Target-Independent Code Generator Pass Configuration Options.
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:690
Context object for machine code objects.
Definition: MCContext.h:62
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:244
Definition: Lint.cpp:83
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:582
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, unsigned OldValRes, unsigned Addr, unsigned Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:125
iterator_range< User::op_iterator > arg_operands()
Definition: InstrTypes.h:1210
An instruction for storing to memory.
Definition: Instructions.h:320
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
Value * getOperand(unsigned i) const
Definition: User.h:169
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
This corresponds to the llvm.lifetime.
Definition: ISDOpcodes.h:872
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:140
The memory access is volatile.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this label.
const BasicBlock & getEntryBlock() const
Definition: Function.h:645
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
Definition: MathExtras.h:609
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
Abstract class that contains various methods for clients to notify about changes. ...
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fuse-fp-ops=xxx option.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
void GetUnderlyingObjects(const Value *V, SmallVectorImpl< const Value *> &Objects, const DataLayout &DL, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to GetUnderlyingObject except that it can look through phi and select instruct...
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:802
unsigned const MachineRegisterInfo * MRI
Value * getCalledValue() const
Definition: InstrTypes.h:1257
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1504
Conditional or Unconditional Branch instruction.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
Value * getAddress() const
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:148
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:41
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1333
Value * getValue() const
MachineInstrBuilder buildPtrMask(unsigned Res, unsigned Op0, uint32_t NumBits)
Build and insert Res = G_PTR_MASK Op0, NumBits.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:134
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const Instruction & front() const
Definition: BasicBlock.h:280
void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...
Definition: Analysis.cpp:127
Indirect Branch Instruction.
Helper class to build MachineInstr.
BasicBlock * getDefaultDest() const
DIExpression * getExpression() const
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
Represent the analysis usage information of a pass.
GISelCSEInfo & get(std::unique_ptr< CSEConfigBase > CSEOpt, bool ReCompute=false)
Takes a CSEConfigBase object that defines what opcodes get CSEd.
Definition: CSEInfo.cpp:363
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:709
MachineInstrBuilder buildInsert(unsigned Res, unsigned Src, unsigned Op, unsigned Index)
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Value * getPointerOperand()
Definition: Instructions.h:284
self_iterator getIterator()
Definition: ilist_node.h:81
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn&#39;t already there.
Definition: SmallSet.h:180
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
static double log2(double V)
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:328
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:726
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:196
MachineInstrBuilder buildBrIndirect(unsigned Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:608
BasicBlock * getSuccessor(unsigned i) const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:92
size_t size() const
Definition: SmallVector.h:52
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
static uint16_t copyFlagsFromInstruction(const Instruction &I)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:789
Simple wrapper that does the following.
Definition: CSEInfo.h:200
This class contains a discriminated union of information about pointers in memory operands...
std::string & str()
Flushes the stream contents to the target string and returns the string&#39;s reference.
Definition: raw_ostream.h:498
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
const std::string & getConstraintString() const
Definition: InlineAsm.h:81
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:105
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:698
MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
The memory access writes data.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches, switches, etc.
Definition: BasicBlock.h:391
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
Value * getValOperand()
Definition: Instructions.h:815
Predicate getPredicate(unsigned Condition, unsigned Hint)
Return predicate consisting of specified condition and hint bits.
Definition: PPCPredicates.h:87
unsigned getNumOperands() const
Definition: User.h:191
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, Optional< unsigned > Flags=None)
Build and insert Res = G_MUL Op0, Op1.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:841
iterator end() const
Definition: ArrayRef.h:137
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:257
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:643
MachineInstrBuilder buildBrCond(unsigned Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition: Type.cpp:179
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:631
DebugLoc getDebugLoc()
Get the current instruction&#39;s debug location.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
Definition: Function.h:193
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:538
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
unsigned getVectorNumElements() const
Definition: DerivedTypes.h:493
bool isIntPredicate() const
Definition: InstrTypes.h:802
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:55
Class for arbitrary precision integers.
Definition: APInt.h:69
amdgpu Simplify well known AMD library false FunctionCallee Callee
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(unsigned OldValRes, unsigned SuccessRes, unsigned Addr, unsigned CmpVal, unsigned NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr, CmpVal, NewVal, MMO.
static MachineOperand CreateES(const char *SymName, unsigned char TargetFlags=0)
static char ID
Definition: IRTranslator.h:60
virtual bool isFMAFasterThanFMulAndFAdd(EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
The memory access reads data.
#define Success
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Representation of each machine instruction.
Definition: MachineInstr.h:63
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:784
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
void addObserver(GISelChangeObserver *O)
bool isVolatile() const
Return true if this is a store to a volatile memory location.
Definition: Instructions.h:353
MachineInstrBuilder buildDirectDbgValue(unsigned Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:321
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Type * getPointerOperandType() const
Definition: Instructions.h:287
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:175
void removeObserver(GISelChangeObserver *O)
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Definition: Instructions.h:372
This represents the llvm.dbg.value instruction.
bool isTokenTy() const
Return true if this is &#39;token&#39;.
Definition: Type.h:193
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1321
verify safepoint Safepoint IR Verifier
Value * getPointerOperand()
Definition: Instructions.h:811
TargetOptions Options
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation.
Definition: InstrTypes.h:1264
static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:259
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned char TargetFlags=0) const
void push_back(MachineInstr *MI)
#define I(x, y, z)
Definition: MD5.cpp:58
static Constant * getZeroValueForNegation(Type *Ty)
Floating point negation must be implemented with f(x) = -0.0 - x.
Definition: Constants.cpp:780
Pair of physical register and lane mask.
The memory access always returns the same value (or traps).
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:192
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:332
uint32_t Size
Definition: Profile.cpp:46
DILocalVariable * getVariable() const
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool isUnconditional() const
Optional< MachineInstrBuilder > materializeGEP(unsigned &Res, unsigned Op0, const LLT &ValueTy, uint64_t Value)
Materialize and insert Res = G_GEP Op0, (G_CONSTANT Value)
AsmDialect getDialect() const
Definition: InlineAsm.h:68
Multiway switch.
This file declares the IRTranslator pass.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:482
aarch64 promote const
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:565
LLVM Value Representation.
Definition: Value.h:72
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:444
succ_range successors(Instruction *I)
Definition: CFG.h:259
This file describes how to lower LLVM calls to machine code calls.
MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", false, false) INITIALIZE_PASS_END(IRTranslator
Invoke instruction.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:65
IRTranslator LLVM IR MI
const MachineInstrBuilder & addDef(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
Simple wrapper observer that takes several observers, and calls each one for each event...
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size...
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
#define LLVM_DEBUG(X)
Definition: Debug.h:122
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:413
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1237
This represents the llvm.dbg.declare instruction.
Value * getPointerOperand()
Definition: Instructions.h:412
The optimization diagnostic interface.
Statically lint checks LLVM IR
Definition: Lint.cpp:192
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value *> Indices) const
Returns the offset from the beginning of the type for the specified indices.
Definition: DataLayout.cpp:806
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
Definition: Instructions.h:774
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:711
IntegerType * Int32Ty
This file describes how to lower LLVM code to machine code.
const BasicBlock * getParent() const
Definition: Instruction.h:66
virtual const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const
Returns a TargetRegisterClass used for pointer values.
an instruction to allocate memory on the stack
Definition: Instructions.h:59
This instruction inserts a struct field of array element value into an aggregate value.
gep_type_iterator gep_type_begin(const User *GEP)
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:164