LLVM  15.0.0git
FastISel.cpp
Go to the documentation of this file.
1 //===- FastISel.cpp - Implementation of the FastISel class ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of the FastISel class.
10 //
11 // "Fast" instruction selection is designed to emit very poor code quickly.
12 // Also, it is not designed to be able to do much lowering, so most illegal
13 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
14 // also not intended to be able to do much optimization, except in a few cases
15 // where doing optimizations reduces overall compile time. For example, folding
16 // constants into immediate fields is often done, because it's cheap and it
17 // reduces the number of instructions later phases have to examine.
18 //
19 // "Fast" instruction selection is able to fail gracefully and transfer
20 // control to the SelectionDAG selector for operations that it doesn't
21 // support. In many cases, this allows us to avoid duplicating a lot of
22 // the complicated lowering logic that SelectionDAG currently has.
23 //
24 // The intended use for "fast" instruction selection is "-O0" mode
25 // compilation, where the quality of the generated code is irrelevant when
26 // weighed against the speed at which the code can be generated. Also,
27 // at -O0, the LLVM optimizers are not running, and this makes the
28 // compile time of codegen a much higher portion of the overall compile
29 // time. Despite its limitations, "fast" instruction selection is able to
30 // handle enough code on its own to provide noticeable overall speedups
31 // in -O0 compiles.
32 //
33 // Basic operations are supported in a target-independent way, by reading
34 // the same instruction descriptions that the SelectionDAG selector reads,
35 // and identifying simple arithmetic operations that can be directly selected
36 // from simple operators. More complicated operations currently require
37 // target-specific code.
38 //
39 //===----------------------------------------------------------------------===//
40 
41 #include "llvm/CodeGen/FastISel.h"
42 #include "llvm/ADT/APFloat.h"
43 #include "llvm/ADT/APSInt.h"
44 #include "llvm/ADT/DenseMap.h"
45 #include "llvm/ADT/Optional.h"
46 #include "llvm/ADT/SmallPtrSet.h"
47 #include "llvm/ADT/SmallString.h"
48 #include "llvm/ADT/SmallVector.h"
49 #include "llvm/ADT/Statistic.h"
52 #include "llvm/CodeGen/Analysis.h"
63 #include "llvm/CodeGen/StackMaps.h"
68 #include "llvm/IR/Argument.h"
69 #include "llvm/IR/Attributes.h"
70 #include "llvm/IR/BasicBlock.h"
71 #include "llvm/IR/CallingConv.h"
72 #include "llvm/IR/Constant.h"
73 #include "llvm/IR/Constants.h"
74 #include "llvm/IR/DataLayout.h"
75 #include "llvm/IR/DebugLoc.h"
76 #include "llvm/IR/DerivedTypes.h"
77 #include "llvm/IR/DiagnosticInfo.h"
78 #include "llvm/IR/Function.h"
80 #include "llvm/IR/GlobalValue.h"
81 #include "llvm/IR/InlineAsm.h"
82 #include "llvm/IR/InstrTypes.h"
83 #include "llvm/IR/Instruction.h"
84 #include "llvm/IR/Instructions.h"
85 #include "llvm/IR/IntrinsicInst.h"
86 #include "llvm/IR/LLVMContext.h"
87 #include "llvm/IR/Mangler.h"
88 #include "llvm/IR/Metadata.h"
89 #include "llvm/IR/Operator.h"
90 #include "llvm/IR/PatternMatch.h"
91 #include "llvm/IR/Type.h"
92 #include "llvm/IR/User.h"
93 #include "llvm/IR/Value.h"
94 #include "llvm/MC/MCContext.h"
95 #include "llvm/MC/MCInstrDesc.h"
96 #include "llvm/Support/Casting.h"
97 #include "llvm/Support/Debug.h"
100 #include "llvm/Support/MathExtras.h"
104 #include <algorithm>
105 #include <cassert>
106 #include <cstdint>
107 #include <iterator>
108 #include <utility>
109 
110 using namespace llvm;
111 using namespace PatternMatch;
112 
113 #define DEBUG_TYPE "isel"
114 
115 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
116  "target-independent selector");
117 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
118  "target-specific selector");
119 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
120 
121 /// Set the current block to which generated machine instructions will be
122 /// appended.
124  assert(LocalValueMap.empty() &&
125  "local values should be cleared after finishing a BB");
126 
127  // Instructions are appended to FuncInfo.MBB. If the basic block already
128  // contains labels or copies, use the last instruction as the last local
129  // value.
130  EmitStartPt = nullptr;
131  if (!FuncInfo.MBB->empty())
132  EmitStartPt = &FuncInfo.MBB->back();
133  LastLocalValue = EmitStartPt;
134 }
135 
136 void FastISel::finishBasicBlock() { flushLocalValueMap(); }
137 
139  if (!FuncInfo.CanLowerReturn)
140  // Fallback to SDISel argument lowering code to deal with sret pointer
141  // parameter.
142  return false;
143 
144  if (!fastLowerArguments())
145  return false;
146 
147  // Enter arguments into ValueMap for uses in non-entry BBs.
148  for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
149  E = FuncInfo.Fn->arg_end();
150  I != E; ++I) {
151  DenseMap<const Value *, Register>::iterator VI = LocalValueMap.find(&*I);
152  assert(VI != LocalValueMap.end() && "Missed an argument?");
153  FuncInfo.ValueMap[&*I] = VI->second;
154  }
155  return true;
156 }
157 
158 /// Return the defined register if this instruction defines exactly one
159 /// virtual register and uses no other virtual registers. Otherwise return 0.
161  Register RegDef;
162  for (const MachineOperand &MO : MI.operands()) {
163  if (!MO.isReg())
164  continue;
165  if (MO.isDef()) {
166  if (RegDef)
167  return Register();
168  RegDef = MO.getReg();
169  } else if (MO.getReg().isVirtual()) {
170  // This is another use of a vreg. Don't delete it.
171  return Register();
172  }
173  }
174  return RegDef;
175 }
176 
177 static bool isRegUsedByPhiNodes(Register DefReg,
178  FunctionLoweringInfo &FuncInfo) {
179  for (auto &P : FuncInfo.PHINodesToUpdate)
180  if (P.second == DefReg)
181  return true;
182  return false;
183 }
184 
185 void FastISel::flushLocalValueMap() {
186  // If FastISel bails out, it could leave local value instructions behind
187  // that aren't used for anything. Detect and erase those.
188  if (LastLocalValue != EmitStartPt) {
189  // Save the first instruction after local values, for later.
190  MachineBasicBlock::iterator FirstNonValue(LastLocalValue);
191  ++FirstNonValue;
192 
194  EmitStartPt ? MachineBasicBlock::reverse_iterator(EmitStartPt)
195  : FuncInfo.MBB->rend();
196  MachineBasicBlock::reverse_iterator RI(LastLocalValue);
197  for (MachineInstr &LocalMI :
199  Register DefReg = findLocalRegDef(LocalMI);
200  if (!DefReg)
201  continue;
202  if (FuncInfo.RegsWithFixups.count(DefReg))
203  continue;
204  bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo);
205  if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) {
206  if (EmitStartPt == &LocalMI)
207  EmitStartPt = EmitStartPt->getPrevNode();
208  LLVM_DEBUG(dbgs() << "removing dead local value materialization"
209  << LocalMI);
210  LocalMI.eraseFromParent();
211  }
212  }
213 
214  if (FirstNonValue != FuncInfo.MBB->end()) {
215  // See if there are any local value instructions left. If so, we want to
216  // make sure the first one has a debug location; if it doesn't, use the
217  // first non-value instruction's debug location.
218 
219  // If EmitStartPt is non-null, this block had copies at the top before
220  // FastISel started doing anything; it points to the last one, so the
221  // first local value instruction is the one after EmitStartPt.
222  // If EmitStartPt is null, the first local value instruction is at the
223  // top of the block.
224  MachineBasicBlock::iterator FirstLocalValue =
225  EmitStartPt ? ++MachineBasicBlock::iterator(EmitStartPt)
226  : FuncInfo.MBB->begin();
227  if (FirstLocalValue != FirstNonValue && !FirstLocalValue->getDebugLoc())
228  FirstLocalValue->setDebugLoc(FirstNonValue->getDebugLoc());
229  }
230  }
231 
232  LocalValueMap.clear();
233  LastLocalValue = EmitStartPt;
234  recomputeInsertPt();
235  SavedInsertPt = FuncInfo.InsertPt;
236 }
237 
239  EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
240  // Don't handle non-simple values in FastISel.
241  if (!RealVT.isSimple())
242  return Register();
243 
244  // Ignore illegal types. We must do this before looking up the value
245  // in ValueMap because Arguments are given virtual registers regardless
246  // of whether FastISel can handle them.
247  MVT VT = RealVT.getSimpleVT();
248  if (!TLI.isTypeLegal(VT)) {
249  // Handle integer promotions, though, because they're common and easy.
250  if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
251  VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
252  else
253  return Register();
254  }
255 
256  // Look up the value to see if we already have a register for it.
257  Register Reg = lookUpRegForValue(V);
258  if (Reg)
259  return Reg;
260 
261  // In bottom-up mode, just create the virtual register which will be used
262  // to hold the value. It will be materialized later.
263  if (isa<Instruction>(V) &&
264  (!isa<AllocaInst>(V) ||
265  !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
266  return FuncInfo.InitializeRegForValue(V);
267 
268  SavePoint SaveInsertPt = enterLocalValueArea();
269 
270  // Materialize the value in a register. Emit any instructions in the
271  // local value area.
272  Reg = materializeRegForValue(V, VT);
273 
274  leaveLocalValueArea(SaveInsertPt);
275 
276  return Reg;
277 }
278 
279 Register FastISel::materializeConstant(const Value *V, MVT VT) {
280  Register Reg;
281  if (const auto *CI = dyn_cast<ConstantInt>(V)) {
282  if (CI->getValue().getActiveBits() <= 64)
283  Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
284  } else if (isa<AllocaInst>(V))
285  Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
286  else if (isa<ConstantPointerNull>(V))
287  // Translate this as an integer zero so that it can be
288  // local-CSE'd with actual integer zeros.
289  Reg =
290  getRegForValue(Constant::getNullValue(DL.getIntPtrType(V->getType())));
291  else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
292  if (CF->isNullValue())
293  Reg = fastMaterializeFloatZero(CF);
294  else
295  // Try to emit the constant directly.
296  Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
297 
298  if (!Reg) {
299  // Try to emit the constant by using an integer constant with a cast.
300  const APFloat &Flt = CF->getValueAPF();
301  EVT IntVT = TLI.getPointerTy(DL);
302  uint32_t IntBitWidth = IntVT.getSizeInBits();
303  APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false);
304  bool isExact;
305  (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
306  if (isExact) {
307  Register IntegerReg =
308  getRegForValue(ConstantInt::get(V->getContext(), SIntVal));
309  if (IntegerReg)
310  Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
311  IntegerReg);
312  }
313  }
314  } else if (const auto *Op = dyn_cast<Operator>(V)) {
315  if (!selectOperator(Op, Op->getOpcode()))
316  if (!isa<Instruction>(Op) ||
317  !fastSelectInstruction(cast<Instruction>(Op)))
318  return 0;
319  Reg = lookUpRegForValue(Op);
320  } else if (isa<UndefValue>(V)) {
321  Reg = createResultReg(TLI.getRegClassFor(VT));
322  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
323  TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
324  }
325  return Reg;
326 }
327 
328 /// Helper for getRegForValue. This function is called when the value isn't
329 /// already available in a register and must be materialized with new
330 /// instructions.
331 Register FastISel::materializeRegForValue(const Value *V, MVT VT) {
332  Register Reg;
333  // Give the target-specific code a try first.
334  if (isa<Constant>(V))
335  Reg = fastMaterializeConstant(cast<Constant>(V));
336 
337  // If target-specific code couldn't or didn't want to handle the value, then
338  // give target-independent code a try.
339  if (!Reg)
340  Reg = materializeConstant(V, VT);
341 
342  // Don't cache constant materializations in the general ValueMap.
343  // To do so would require tracking what uses they dominate.
344  if (Reg) {
345  LocalValueMap[V] = Reg;
346  LastLocalValue = MRI.getVRegDef(Reg);
347  }
348  return Reg;
349 }
350 
352  // Look up the value to see if we already have a register for it. We
353  // cache values defined by Instructions across blocks, and other values
354  // only locally. This is because Instructions already have the SSA
355  // def-dominates-use requirement enforced.
356  DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(V);
357  if (I != FuncInfo.ValueMap.end())
358  return I->second;
359  return LocalValueMap[V];
360 }
361 
362 void FastISel::updateValueMap(const Value *I, Register Reg, unsigned NumRegs) {
363  if (!isa<Instruction>(I)) {
364  LocalValueMap[I] = Reg;
365  return;
366  }
367 
368  Register &AssignedReg = FuncInfo.ValueMap[I];
369  if (!AssignedReg)
370  // Use the new register.
371  AssignedReg = Reg;
372  else if (Reg != AssignedReg) {
373  // Arrange for uses of AssignedReg to be replaced by uses of Reg.
374  for (unsigned i = 0; i < NumRegs; i++) {
375  FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
376  FuncInfo.RegsWithFixups.insert(Reg + i);
377  }
378 
379  AssignedReg = Reg;
380  }
381 }
382 
384  Register IdxN = getRegForValue(Idx);
385  if (!IdxN)
386  // Unhandled operand. Halt "fast" selection and bail.
387  return Register();
388 
389  // If the index is smaller or larger than intptr_t, truncate or extend it.
390  MVT PtrVT = TLI.getPointerTy(DL);
391  EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
392  if (IdxVT.bitsLT(PtrVT)) {
393  IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN);
394  } else if (IdxVT.bitsGT(PtrVT)) {
395  IdxN =
396  fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN);
397  }
398  return IdxN;
399 }
400 
402  if (getLastLocalValue()) {
403  FuncInfo.InsertPt = getLastLocalValue();
404  FuncInfo.MBB = FuncInfo.InsertPt->getParent();
405  ++FuncInfo.InsertPt;
406  } else
407  FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
408 
409  // Now skip past any EH_LABELs, which must remain at the beginning.
410  while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
411  FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
412  ++FuncInfo.InsertPt;
413 }
414 
417  assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
418  "Invalid iterator!");
419  while (I != E) {
420  if (SavedInsertPt == I)
421  SavedInsertPt = E;
422  if (EmitStartPt == I)
423  EmitStartPt = E.isValid() ? &*E : nullptr;
424  if (LastLocalValue == I)
425  LastLocalValue = E.isValid() ? &*E : nullptr;
426 
427  MachineInstr *Dead = &*I;
428  ++I;
429  Dead->eraseFromParent();
430  ++NumFastIselDead;
431  }
432  recomputeInsertPt();
433 }
434 
436  SavePoint OldInsertPt = FuncInfo.InsertPt;
437  recomputeInsertPt();
438  return OldInsertPt;
439 }
440 
442  if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
443  LastLocalValue = &*std::prev(FuncInfo.InsertPt);
444 
445  // Restore the previous insert position.
446  FuncInfo.InsertPt = OldInsertPt;
447 }
448 
449 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
450  EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
451  if (VT == MVT::Other || !VT.isSimple())
452  // Unhandled type. Halt "fast" selection and bail.
453  return false;
454 
455  // We only handle legal types. For example, on x86-32 the instruction
456  // selector contains all of the 64-bit instructions from x86-64,
457  // under the assumption that i64 won't be used if the target doesn't
458  // support it.
459  if (!TLI.isTypeLegal(VT)) {
460  // MVT::i1 is special. Allow AND, OR, or XOR because they
461  // don't require additional zeroing, which makes them easy.
462  if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
463  ISDOpcode == ISD::XOR))
464  VT = TLI.getTypeToTransformTo(I->getContext(), VT);
465  else
466  return false;
467  }
468 
469  // Check if the first operand is a constant, and handle it as "ri". At -O0,
470  // we don't have anything that canonicalizes operand order.
471  if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
472  if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
473  Register Op1 = getRegForValue(I->getOperand(1));
474  if (!Op1)
475  return false;
476 
477  Register ResultReg =
478  fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, CI->getZExtValue(),
479  VT.getSimpleVT());
480  if (!ResultReg)
481  return false;
482 
483  // We successfully emitted code for the given LLVM Instruction.
484  updateValueMap(I, ResultReg);
485  return true;
486  }
487 
488  Register Op0 = getRegForValue(I->getOperand(0));
489  if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
490  return false;
491 
492  // Check if the second operand is a constant and handle it appropriately.
493  if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
494  uint64_t Imm = CI->getSExtValue();
495 
496  // Transform "sdiv exact X, 8" -> "sra X, 3".
497  if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
498  cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
499  Imm = Log2_64(Imm);
500  ISDOpcode = ISD::SRA;
501  }
502 
503  // Transform "urem x, pow2" -> "and x, pow2-1".
504  if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
505  isPowerOf2_64(Imm)) {
506  --Imm;
507  ISDOpcode = ISD::AND;
508  }
509 
510  Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, Imm,
511  VT.getSimpleVT());
512  if (!ResultReg)
513  return false;
514 
515  // We successfully emitted code for the given LLVM Instruction.
516  updateValueMap(I, ResultReg);
517  return true;
518  }
519 
520  Register Op1 = getRegForValue(I->getOperand(1));
521  if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
522  return false;
523 
524  // Now we have both operands in registers. Emit the instruction.
525  Register ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
526  ISDOpcode, Op0, Op1);
527  if (!ResultReg)
528  // Target-specific code wasn't able to find a machine opcode for
529  // the given ISD opcode and type. Halt "fast" selection and bail.
530  return false;
531 
532  // We successfully emitted code for the given LLVM Instruction.
533  updateValueMap(I, ResultReg);
534  return true;
535 }
536 
538  Register N = getRegForValue(I->getOperand(0));
539  if (!N) // Unhandled operand. Halt "fast" selection and bail.
540  return false;
541 
542  // FIXME: The code below does not handle vector GEPs. Halt "fast" selection
543  // and bail.
544  if (isa<VectorType>(I->getType()))
545  return false;
546 
547  // Keep a running tab of the total offset to coalesce multiple N = N + Offset
548  // into a single N = N + TotalOffset.
549  uint64_t TotalOffs = 0;
550  // FIXME: What's a good SWAG number for MaxOffs?
551  uint64_t MaxOffs = 2048;
552  MVT VT = TLI.getPointerTy(DL);
554  GTI != E; ++GTI) {
555  const Value *Idx = GTI.getOperand();
556  if (StructType *StTy = GTI.getStructTypeOrNull()) {
557  uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
558  if (Field) {
559  // N = N + Offset
560  TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
561  if (TotalOffs >= MaxOffs) {
562  N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
563  if (!N) // Unhandled operand. Halt "fast" selection and bail.
564  return false;
565  TotalOffs = 0;
566  }
567  }
568  } else {
569  Type *Ty = GTI.getIndexedType();
570 
571  // If this is a constant subscript, handle it quickly.
572  if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
573  if (CI->isZero())
574  continue;
575  // N = N + Offset
576  uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
577  TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
578  if (TotalOffs >= MaxOffs) {
579  N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
580  if (!N) // Unhandled operand. Halt "fast" selection and bail.
581  return false;
582  TotalOffs = 0;
583  }
584  continue;
585  }
586  if (TotalOffs) {
587  N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
588  if (!N) // Unhandled operand. Halt "fast" selection and bail.
589  return false;
590  TotalOffs = 0;
591  }
592 
593  // N = N + Idx * ElementSize;
594  uint64_t ElementSize = DL.getTypeAllocSize(Ty);
595  Register IdxN = getRegForGEPIndex(Idx);
596  if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
597  return false;
598 
599  if (ElementSize != 1) {
600  IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
601  if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
602  return false;
603  }
604  N = fastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
605  if (!N) // Unhandled operand. Halt "fast" selection and bail.
606  return false;
607  }
608  }
609  if (TotalOffs) {
610  N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
611  if (!N) // Unhandled operand. Halt "fast" selection and bail.
612  return false;
613  }
614 
615  // We successfully emitted code for the given LLVM Instruction.
616  updateValueMap(I, N);
617  return true;
618 }
619 
620 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
621  const CallInst *CI, unsigned StartIdx) {
622  for (unsigned i = StartIdx, e = CI->arg_size(); i != e; ++i) {
623  Value *Val = CI->getArgOperand(i);
624  // Check for constants and encode them with a StackMaps::ConstantOp prefix.
625  if (const auto *C = dyn_cast<ConstantInt>(Val)) {
626  Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
627  Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
628  } else if (isa<ConstantPointerNull>(Val)) {
629  Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
630  Ops.push_back(MachineOperand::CreateImm(0));
631  } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
632  // Values coming from a stack location also require a special encoding,
633  // but that is added later on by the target specific frame index
634  // elimination implementation.
635  auto SI = FuncInfo.StaticAllocaMap.find(AI);
636  if (SI != FuncInfo.StaticAllocaMap.end())
637  Ops.push_back(MachineOperand::CreateFI(SI->second));
638  else
639  return false;
640  } else {
641  Register Reg = getRegForValue(Val);
642  if (!Reg)
643  return false;
644  Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
645  }
646  }
647  return true;
648 }
649 
651  // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
652  // [live variables...])
653  assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
654  "Stackmap cannot return a value.");
655 
656  // The stackmap intrinsic only records the live variables (the arguments
657  // passed to it) and emits NOPS (if requested). Unlike the patchpoint
658  // intrinsic, this won't be lowered to a function call. This means we don't
659  // have to worry about calling conventions and target-specific lowering code.
660  // Instead we perform the call lowering right here.
661  //
662  // CALLSEQ_START(0, 0...)
663  // STACKMAP(id, nbytes, ...)
664  // CALLSEQ_END(0, 0)
665  //
667 
668  // Add the <id> and <numBytes> constants.
669  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
670  "Expected a constant integer.");
671  const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
672  Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
673 
674  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
675  "Expected a constant integer.");
676  const auto *NumBytes =
677  cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
678  Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
679 
680  // Push live variables for the stack map (skipping the first two arguments
681  // <id> and <numBytes>).
682  if (!addStackMapLiveVars(Ops, I, 2))
683  return false;
684 
685  // We are not adding any register mask info here, because the stackmap doesn't
686  // clobber anything.
687 
688  // Add scratch registers as implicit def and early clobber.
689  CallingConv::ID CC = I->getCallingConv();
690  const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
691  for (unsigned i = 0; ScratchRegs[i]; ++i)
692  Ops.push_back(MachineOperand::CreateReg(
693  ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
694  /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
695 
696  // Issue CALLSEQ_START
697  unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
698  auto Builder =
699  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown));
700  const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
701  for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
702  Builder.addImm(0);
703 
704  // Issue STACKMAP.
705  MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
706  TII.get(TargetOpcode::STACKMAP));
707  for (auto const &MO : Ops)
708  MIB.add(MO);
709 
710  // Issue CALLSEQ_END
711  unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
712  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
713  .addImm(0)
714  .addImm(0);
715 
716  // Inform the Frame Information that we have a stackmap in this function.
717  FuncInfo.MF->getFrameInfo().setHasStackMap();
718 
719  return true;
720 }
721 
722 /// Lower an argument list according to the target calling convention.
723 ///
724 /// This is a helper for lowering intrinsics that follow a target calling
725 /// convention or require stack pointer adjustment. Only a subset of the
726 /// intrinsic's operands need to participate in the calling convention.
727 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
728  unsigned NumArgs, const Value *Callee,
729  bool ForceRetVoidTy, CallLoweringInfo &CLI) {
730  ArgListTy Args;
731  Args.reserve(NumArgs);
732 
733  // Populate the argument list.
734  for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
735  Value *V = CI->getOperand(ArgI);
736 
737  assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
738 
739  ArgListEntry Entry;
740  Entry.Val = V;
741  Entry.Ty = V->getType();
742  Entry.setAttributes(CI, ArgI);
743  Args.push_back(Entry);
744  }
745 
746  Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
747  : CI->getType();
748  CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
749 
750  return lowerCallTo(CLI);
751 }
752 
754  const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
755  StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
756  SmallString<32> MangledName;
757  Mangler::getNameWithPrefix(MangledName, Target, DL);
758  MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
759  return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
760 }
761 
763  // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
764  // i32 <numBytes>,
765  // i8* <target>,
766  // i32 <numArgs>,
767  // [Args...],
768  // [live variables...])
769  CallingConv::ID CC = I->getCallingConv();
770  bool IsAnyRegCC = CC == CallingConv::AnyReg;
771  bool HasDef = !I->getType()->isVoidTy();
772  Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
773 
774  // Get the real number of arguments participating in the call <numArgs>
775  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
776  "Expected a constant integer.");
777  const auto *NumArgsVal =
778  cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
779  unsigned NumArgs = NumArgsVal->getZExtValue();
780 
781  // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
782  // This includes all meta-operands up to but not including CC.
783  unsigned NumMetaOpers = PatchPointOpers::CCPos;
784  assert(I->arg_size() >= NumMetaOpers + NumArgs &&
785  "Not enough arguments provided to the patchpoint intrinsic");
786 
787  // For AnyRegCC the arguments are lowered later on manually.
788  unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
789  CallLoweringInfo CLI;
790  CLI.setIsPatchPoint();
791  if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
792  return false;
793 
794  assert(CLI.Call && "No call instruction specified.");
795 
797 
798  // Add an explicit result reg if we use the anyreg calling convention.
799  if (IsAnyRegCC && HasDef) {
800  assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
801  CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
802  CLI.NumResultRegs = 1;
803  Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*isDef=*/true));
804  }
805 
806  // Add the <id> and <numBytes> constants.
807  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
808  "Expected a constant integer.");
809  const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
810  Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
811 
812  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
813  "Expected a constant integer.");
814  const auto *NumBytes =
815  cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
816  Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
817 
818  // Add the call target.
819  if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
820  uint64_t CalleeConstAddr =
821  cast<ConstantInt>(C->getOperand(0))->getZExtValue();
822  Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
823  } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
824  if (C->getOpcode() == Instruction::IntToPtr) {
825  uint64_t CalleeConstAddr =
826  cast<ConstantInt>(C->getOperand(0))->getZExtValue();
827  Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
828  } else
829  llvm_unreachable("Unsupported ConstantExpr.");
830  } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
831  Ops.push_back(MachineOperand::CreateGA(GV, 0));
832  } else if (isa<ConstantPointerNull>(Callee))
833  Ops.push_back(MachineOperand::CreateImm(0));
834  else
835  llvm_unreachable("Unsupported callee address.");
836 
837  // Adjust <numArgs> to account for any arguments that have been passed on
838  // the stack instead.
839  unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
840  Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
841 
842  // Add the calling convention
843  Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
844 
845  // Add the arguments we omitted previously. The register allocator should
846  // place these in any free register.
847  if (IsAnyRegCC) {
848  for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
849  Register Reg = getRegForValue(I->getArgOperand(i));
850  if (!Reg)
851  return false;
852  Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
853  }
854  }
855 
856  // Push the arguments from the call instruction.
857  for (auto Reg : CLI.OutRegs)
858  Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
859 
860  // Push live variables for the stack map.
861  if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
862  return false;
863 
864  // Push the register mask info.
865  Ops.push_back(MachineOperand::CreateRegMask(
866  TRI.getCallPreservedMask(*FuncInfo.MF, CC)));
867 
868  // Add scratch registers as implicit def and early clobber.
869  const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
870  for (unsigned i = 0; ScratchRegs[i]; ++i)
871  Ops.push_back(MachineOperand::CreateReg(
872  ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
873  /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
874 
875  // Add implicit defs (return values).
876  for (auto Reg : CLI.InRegs)
877  Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/true,
878  /*isImp=*/true));
879 
880  // Insert the patchpoint instruction before the call generated by the target.
881  MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc,
882  TII.get(TargetOpcode::PATCHPOINT));
883 
884  for (auto &MO : Ops)
885  MIB.add(MO);
886 
887  MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
888 
889  // Delete the original call instruction.
890  CLI.Call->eraseFromParent();
891 
892  // Inform the Frame Information that we have a patchpoint in this function.
893  FuncInfo.MF->getFrameInfo().setHasPatchPoint();
894 
895  if (CLI.NumResultRegs)
896  updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
897  return true;
898 }
899 
901  const auto &Triple = TM.getTargetTriple();
903  return true; // don't do anything to this instruction.
905  Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
906  /*isDef=*/false));
907  Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
908  /*isDef=*/false));
909  MachineInstrBuilder MIB =
910  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
911  TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
912  for (auto &MO : Ops)
913  MIB.add(MO);
914 
915  // Insert the Patchable Event Call instruction, that gets lowered properly.
916  return true;
917 }
918 
920  const auto &Triple = TM.getTargetTriple();
922  return true; // don't do anything to this instruction.
924  Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
925  /*isDef=*/false));
926  Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
927  /*isDef=*/false));
928  Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(2)),
929  /*isDef=*/false));
930  MachineInstrBuilder MIB =
931  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
932  TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL));
933  for (auto &MO : Ops)
934  MIB.add(MO);
935 
936  // Insert the Patchable Typed Event Call instruction, that gets lowered properly.
937  return true;
938 }
939 
940 /// Returns an AttributeList representing the attributes applied to the return
941 /// value of the given call.
944  if (CLI.RetSExt)
945  Attrs.push_back(Attribute::SExt);
946  if (CLI.RetZExt)
947  Attrs.push_back(Attribute::ZExt);
948  if (CLI.IsInReg)
949  Attrs.push_back(Attribute::InReg);
950 
952  Attrs);
953 }
954 
955 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
956  unsigned NumArgs) {
957  MCContext &Ctx = MF->getContext();
958  SmallString<32> MangledName;
959  Mangler::getNameWithPrefix(MangledName, SymName, DL);
960  MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
961  return lowerCallTo(CI, Sym, NumArgs);
962 }
963 
965  unsigned NumArgs) {
966  FunctionType *FTy = CI->getFunctionType();
967  Type *RetTy = CI->getType();
968 
969  ArgListTy Args;
970  Args.reserve(NumArgs);
971 
972  // Populate the argument list.
973  // Attributes for args start at offset 1, after the return attribute.
974  for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
975  Value *V = CI->getOperand(ArgI);
976 
977  assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
978 
979  ArgListEntry Entry;
980  Entry.Val = V;
981  Entry.Ty = V->getType();
982  Entry.setAttributes(CI, ArgI);
983  Args.push_back(Entry);
984  }
985  TLI.markLibCallAttributes(MF, CI->getCallingConv(), Args);
986 
987  CallLoweringInfo CLI;
988  CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), *CI, NumArgs);
989 
990  return lowerCallTo(CLI);
991 }
992 
994  // Handle the incoming return values from the call.
995  CLI.clearIns();
996  SmallVector<EVT, 4> RetTys;
997  ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
998 
1000  GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
1001 
1002  bool CanLowerReturn = TLI.CanLowerReturn(
1003  CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
1004 
1005  // FIXME: sret demotion isn't supported yet - bail out.
1006  if (!CanLowerReturn)
1007  return false;
1008 
1009  for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
1010  EVT VT = RetTys[I];
1011  MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
1012  unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
1013  for (unsigned i = 0; i != NumRegs; ++i) {
1014  ISD::InputArg MyFlags;
1015  MyFlags.VT = RegisterVT;
1016  MyFlags.ArgVT = VT;
1017  MyFlags.Used = CLI.IsReturnValueUsed;
1018  if (CLI.RetSExt)
1019  MyFlags.Flags.setSExt();
1020  if (CLI.RetZExt)
1021  MyFlags.Flags.setZExt();
1022  if (CLI.IsInReg)
1023  MyFlags.Flags.setInReg();
1024  CLI.Ins.push_back(MyFlags);
1025  }
1026  }
1027 
1028  // Handle all of the outgoing arguments.
1029  CLI.clearOuts();
1030  for (auto &Arg : CLI.getArgs()) {
1031  Type *FinalType = Arg.Ty;
1032  if (Arg.IsByVal)
1033  FinalType = Arg.IndirectType;
1034  bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
1035  FinalType, CLI.CallConv, CLI.IsVarArg, DL);
1036 
1037  ISD::ArgFlagsTy Flags;
1038  if (Arg.IsZExt)
1039  Flags.setZExt();
1040  if (Arg.IsSExt)
1041  Flags.setSExt();
1042  if (Arg.IsInReg)
1043  Flags.setInReg();
1044  if (Arg.IsSRet)
1045  Flags.setSRet();
1046  if (Arg.IsSwiftSelf)
1047  Flags.setSwiftSelf();
1048  if (Arg.IsSwiftAsync)
1049  Flags.setSwiftAsync();
1050  if (Arg.IsSwiftError)
1051  Flags.setSwiftError();
1052  if (Arg.IsCFGuardTarget)
1053  Flags.setCFGuardTarget();
1054  if (Arg.IsByVal)
1055  Flags.setByVal();
1056  if (Arg.IsInAlloca) {
1057  Flags.setInAlloca();
1058  // Set the byval flag for CCAssignFn callbacks that don't know about
1059  // inalloca. This way we can know how many bytes we should've allocated
1060  // and how many bytes a callee cleanup function will pop. If we port
1061  // inalloca to more targets, we'll have to add custom inalloca handling in
1062  // the various CC lowering callbacks.
1063  Flags.setByVal();
1064  }
1065  if (Arg.IsPreallocated) {
1066  Flags.setPreallocated();
1067  // Set the byval flag for CCAssignFn callbacks that don't know about
1068  // preallocated. This way we can know how many bytes we should've
1069  // allocated and how many bytes a callee cleanup function will pop. If we
1070  // port preallocated to more targets, we'll have to add custom
1071  // preallocated handling in the various CC lowering callbacks.
1072  Flags.setByVal();
1073  }
1074  MaybeAlign MemAlign = Arg.Alignment;
1075  if (Arg.IsByVal || Arg.IsInAlloca || Arg.IsPreallocated) {
1076  unsigned FrameSize = DL.getTypeAllocSize(Arg.IndirectType);
1077 
1078  // For ByVal, alignment should come from FE. BE will guess if this info
1079  // is not there, but there are cases it cannot get right.
1080  if (!MemAlign)
1081  MemAlign = Align(TLI.getByValTypeAlignment(Arg.IndirectType, DL));
1082  Flags.setByValSize(FrameSize);
1083  } else if (!MemAlign) {
1084  MemAlign = DL.getABITypeAlign(Arg.Ty);
1085  }
1086  Flags.setMemAlign(*MemAlign);
1087  if (Arg.IsNest)
1088  Flags.setNest();
1089  if (NeedsRegBlock)
1090  Flags.setInConsecutiveRegs();
1091  Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
1092  CLI.OutVals.push_back(Arg.Val);
1093  CLI.OutFlags.push_back(Flags);
1094  }
1095 
1096  if (!fastLowerCall(CLI))
1097  return false;
1098 
1099  // Set all unused physreg defs as dead.
1100  assert(CLI.Call && "No call instruction specified.");
1101  CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
1102 
1103  if (CLI.NumResultRegs && CLI.CB)
1104  updateValueMap(CLI.CB, CLI.ResultReg, CLI.NumResultRegs);
1105 
1106  // Set labels for heapallocsite call.
1107  if (CLI.CB)
1108  if (MDNode *MD = CLI.CB->getMetadata("heapallocsite"))
1109  CLI.Call->setHeapAllocMarker(*MF, MD);
1110 
1111  return true;
1112 }
1113 
1115  FunctionType *FuncTy = CI->getFunctionType();
1116  Type *RetTy = CI->getType();
1117 
1118  ArgListTy Args;
1119  ArgListEntry Entry;
1120  Args.reserve(CI->arg_size());
1121 
1122  for (auto i = CI->arg_begin(), e = CI->arg_end(); i != e; ++i) {
1123  Value *V = *i;
1124 
1125  // Skip empty types
1126  if (V->getType()->isEmptyTy())
1127  continue;
1128 
1129  Entry.Val = V;
1130  Entry.Ty = V->getType();
1131 
1132  // Skip the first return-type Attribute to get to params.
1133  Entry.setAttributes(CI, i - CI->arg_begin());
1134  Args.push_back(Entry);
1135  }
1136 
1137  // Check if target-independent constraints permit a tail call here.
1138  // Target-dependent constraints are checked within fastLowerCall.
1139  bool IsTailCall = CI->isTailCall();
1140  if (IsTailCall && !isInTailCallPosition(*CI, TM))
1141  IsTailCall = false;
1142  if (IsTailCall && MF->getFunction()
1143  .getFnAttribute("disable-tail-calls")
1144  .getValueAsBool())
1145  IsTailCall = false;
1146 
1147  CallLoweringInfo CLI;
1148  CLI.setCallee(RetTy, FuncTy, CI->getCalledOperand(), std::move(Args), *CI)
1149  .setTailCall(IsTailCall);
1150 
1151  diagnoseDontCall(*CI);
1152 
1153  return lowerCallTo(CLI);
1154 }
1155 
1157  const CallInst *Call = cast<CallInst>(I);
1158 
1159  // Handle simple inline asms.
1160  if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledOperand())) {
1161  // Don't attempt to handle constraints.
1162  if (!IA->getConstraintString().empty())
1163  return false;
1164 
1165  unsigned ExtraInfo = 0;
1166  if (IA->hasSideEffects())
1167  ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1168  if (IA->isAlignStack())
1169  ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1170  if (Call->isConvergent())
1171  ExtraInfo |= InlineAsm::Extra_IsConvergent;
1172  ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
1173 
1174  MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1176  MIB.addExternalSymbol(IA->getAsmString().c_str());
1177  MIB.addImm(ExtraInfo);
1178 
1179  const MDNode *SrcLoc = Call->getMetadata("srcloc");
1180  if (SrcLoc)
1181  MIB.addMetadata(SrcLoc);
1182 
1183  return true;
1184  }
1185 
1186  // Handle intrinsic function calls.
1187  if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1188  return selectIntrinsicCall(II);
1189 
1190  return lowerCall(Call);
1191 }
1192 
1194  switch (II->getIntrinsicID()) {
1195  default:
1196  break;
1197  // At -O0 we don't care about the lifetime intrinsics.
1198  case Intrinsic::lifetime_start:
1199  case Intrinsic::lifetime_end:
1200  // The donothing intrinsic does, well, nothing.
1201  case Intrinsic::donothing:
1202  // Neither does the sideeffect intrinsic.
1203  case Intrinsic::sideeffect:
1204  // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1205  case Intrinsic::assume:
1206  // Neither does the llvm.experimental.noalias.scope.decl intrinsic
1207  case Intrinsic::experimental_noalias_scope_decl:
1208  return true;
1209  case Intrinsic::dbg_declare: {
1210  const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1211  assert(DI->getVariable() && "Missing variable");
1212  if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1213  LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1214  << " (!hasDebugInfo)\n");
1215  return true;
1216  }
1217 
1218  const Value *Address = DI->getAddress();
1219  if (!Address || isa<UndefValue>(Address)) {
1220  LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1221  << " (bad/undef address)\n");
1222  return true;
1223  }
1224 
1225  // Byval arguments with frame indices were already handled after argument
1226  // lowering and before isel.
1227  const auto *Arg =
1228  dyn_cast<Argument>(Address->stripInBoundsConstantOffsets());
1229  if (Arg && FuncInfo.getArgumentFrameIndex(Arg) != INT_MAX)
1230  return true;
1231 
1233  if (Register Reg = lookUpRegForValue(Address))
1234  Op = MachineOperand::CreateReg(Reg, false);
1235 
1236  // If we have a VLA that has a "use" in a metadata node that's then used
1237  // here but it has no other uses, then we have a problem. E.g.,
1238  //
1239  // int foo (const int *x) {
1240  // char a[*x];
1241  // return 0;
1242  // }
1243  //
1244  // If we assign 'a' a vreg and fast isel later on has to use the selection
1245  // DAG isel, it will want to copy the value to the vreg. However, there are
1246  // no uses, which goes counter to what selection DAG isel expects.
1247  if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1248  (!isa<AllocaInst>(Address) ||
1249  !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1250  Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
1251  false);
1252 
1253  if (Op) {
1255  "Expected inlined-at fields to agree");
1256  // A dbg.declare describes the address of a source variable, so lower it
1257  // into an indirect DBG_VALUE.
1258  auto Builder =
1259  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1260  TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true, *Op,
1261  DI->getVariable(), DI->getExpression());
1262 
1263  // If using instruction referencing, mutate this into a DBG_INSTR_REF,
1264  // to be later patched up by finalizeDebugInstrRefs. Tack a deref onto
1265  // the expression, we don't have an "indirect" flag in DBG_INSTR_REF.
1266  if (UseInstrRefDebugInfo && Op->isReg()) {
1267  Builder->setDesc(TII.get(TargetOpcode::DBG_INSTR_REF));
1268  Builder->getOperand(1).ChangeToImmediate(0);
1269  auto *NewExpr =
1271  Builder->getOperand(3).setMetadata(NewExpr);
1272  }
1273  } else {
1274  // We can't yet handle anything else here because it would require
1275  // generating code, thus altering codegen because of debug info.
1276  LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1277  << " (no materialized reg for address)\n");
1278  }
1279  return true;
1280  }
1281  case Intrinsic::dbg_value: {
1282  // This form of DBG_VALUE is target-independent.
1283  const DbgValueInst *DI = cast<DbgValueInst>(II);
1284  const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1285  const Value *V = DI->getValue();
1287  "Expected inlined-at fields to agree");
1288  if (!V || isa<UndefValue>(V) || DI->hasArgList()) {
1289  // DI is either undef or cannot produce a valid DBG_VALUE, so produce an
1290  // undef DBG_VALUE to terminate any prior location.
1291  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, false, 0U,
1292  DI->getVariable(), DI->getExpression());
1293  } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1294  // See if there's an expression to constant-fold.
1295  DIExpression *Expr = DI->getExpression();
1296  if (Expr)
1297  std::tie(Expr, CI) = Expr->constantFold(CI);
1298  if (CI->getBitWidth() > 64)
1299  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1300  .addCImm(CI)
1301  .addImm(0U)
1302  .addMetadata(DI->getVariable())
1303  .addMetadata(Expr);
1304  else
1305  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1306  .addImm(CI->getZExtValue())
1307  .addImm(0U)
1308  .addMetadata(DI->getVariable())
1309  .addMetadata(Expr);
1310  } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1311  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1312  .addFPImm(CF)
1313  .addImm(0U)
1314  .addMetadata(DI->getVariable())
1315  .addMetadata(DI->getExpression());
1316  } else if (Register Reg = lookUpRegForValue(V)) {
1317  // FIXME: This does not handle register-indirect values at offset 0.
1318  bool IsIndirect = false;
1319  auto Builder =
1320  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
1321  DI->getVariable(), DI->getExpression());
1322 
1323  // If using instruction referencing, mutate this into a DBG_INSTR_REF,
1324  // to be later patched up by finalizeDebugInstrRefs.
1325  if (UseInstrRefDebugInfo) {
1326  Builder->setDesc(TII.get(TargetOpcode::DBG_INSTR_REF));
1327  Builder->getOperand(1).ChangeToImmediate(0);
1328  }
1329  } else {
1330  // We don't know how to handle other cases, so we drop.
1331  LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1332  }
1333  return true;
1334  }
1335  case Intrinsic::dbg_label: {
1336  const DbgLabelInst *DI = cast<DbgLabelInst>(II);
1337  assert(DI->getLabel() && "Missing label");
1338  if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1339  LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1340  return true;
1341  }
1342 
1343  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1344  TII.get(TargetOpcode::DBG_LABEL)).addMetadata(DI->getLabel());
1345  return true;
1346  }
1347  case Intrinsic::objectsize:
1348  llvm_unreachable("llvm.objectsize.* should have been lowered already");
1349 
1350  case Intrinsic::is_constant:
1351  llvm_unreachable("llvm.is.constant.* should have been lowered already");
1352 
1353  case Intrinsic::launder_invariant_group:
1354  case Intrinsic::strip_invariant_group:
1355  case Intrinsic::expect: {
1356  Register ResultReg = getRegForValue(II->getArgOperand(0));
1357  if (!ResultReg)
1358  return false;
1359  updateValueMap(II, ResultReg);
1360  return true;
1361  }
1362  case Intrinsic::experimental_stackmap:
1363  return selectStackmap(II);
1364  case Intrinsic::experimental_patchpoint_void:
1365  case Intrinsic::experimental_patchpoint_i64:
1366  return selectPatchpoint(II);
1367 
1368  case Intrinsic::xray_customevent:
1369  return selectXRayCustomEvent(II);
1370  case Intrinsic::xray_typedevent:
1371  return selectXRayTypedEvent(II);
1372  }
1373 
1374  return fastLowerIntrinsicCall(II);
1375 }
1376 
1377 bool FastISel::selectCast(const User *I, unsigned Opcode) {
1378  EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1379  EVT DstVT = TLI.getValueType(DL, I->getType());
1380 
1381  if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1382  !DstVT.isSimple())
1383  // Unhandled type. Halt "fast" selection and bail.
1384  return false;
1385 
1386  // Check if the destination type is legal.
1387  if (!TLI.isTypeLegal(DstVT))
1388  return false;
1389 
1390  // Check if the source operand is legal.
1391  if (!TLI.isTypeLegal(SrcVT))
1392  return false;
1393 
1394  Register InputReg = getRegForValue(I->getOperand(0));
1395  if (!InputReg)
1396  // Unhandled operand. Halt "fast" selection and bail.
1397  return false;
1398 
1399  Register ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1400  Opcode, InputReg);
1401  if (!ResultReg)
1402  return false;
1403 
1404  updateValueMap(I, ResultReg);
1405  return true;
1406 }
1407 
1409  EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1410  EVT DstEVT = TLI.getValueType(DL, I->getType());
1411  if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1412  !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1413  // Unhandled type. Halt "fast" selection and bail.
1414  return false;
1415 
1416  MVT SrcVT = SrcEVT.getSimpleVT();
1417  MVT DstVT = DstEVT.getSimpleVT();
1418  Register Op0 = getRegForValue(I->getOperand(0));
1419  if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1420  return false;
1421 
1422  // If the bitcast doesn't change the type, just use the operand value.
1423  if (SrcVT == DstVT) {
1424  updateValueMap(I, Op0);
1425  return true;
1426  }
1427 
1428  // Otherwise, select a BITCAST opcode.
1429  Register ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0);
1430  if (!ResultReg)
1431  return false;
1432 
1433  updateValueMap(I, ResultReg);
1434  return true;
1435 }
1436 
1438  Register Reg = getRegForValue(I->getOperand(0));
1439  if (!Reg)
1440  // Unhandled operand.
1441  return false;
1442 
1443  EVT ETy = TLI.getValueType(DL, I->getOperand(0)->getType());
1444  if (ETy == MVT::Other || !TLI.isTypeLegal(ETy))
1445  // Unhandled type, bail out.
1446  return false;
1447 
1448  MVT Ty = ETy.getSimpleVT();
1449  const TargetRegisterClass *TyRegClass = TLI.getRegClassFor(Ty);
1450  Register ResultReg = createResultReg(TyRegClass);
1451  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1452  TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg);
1453 
1454  updateValueMap(I, ResultReg);
1455  return true;
1456 }
1457 
1458 // Remove local value instructions starting from the instruction after
1459 // SavedLastLocalValue to the current function insert point.
1460 void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1461 {
1462  MachineInstr *CurLastLocalValue = getLastLocalValue();
1463  if (CurLastLocalValue != SavedLastLocalValue) {
1464  // Find the first local value instruction to be deleted.
1465  // This is the instruction after SavedLastLocalValue if it is non-NULL.
1466  // Otherwise it's the first instruction in the block.
1467  MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1468  if (SavedLastLocalValue)
1469  ++FirstDeadInst;
1470  else
1471  FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1472  setLastLocalValue(SavedLastLocalValue);
1473  removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1474  }
1475 }
1476 
1478  // Flush the local value map before starting each instruction.
1479  // This improves locality and debugging, and can reduce spills.
1480  // Reuse of values across IR instructions is relatively uncommon.
1481  flushLocalValueMap();
1482 
1483  MachineInstr *SavedLastLocalValue = getLastLocalValue();
1484  // Just before the terminator instruction, insert instructions to
1485  // feed PHI nodes in successor blocks.
1486  if (I->isTerminator()) {
1487  if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1488  // PHI node handling may have generated local value instructions,
1489  // even though it failed to handle all PHI nodes.
1490  // We remove these instructions because SelectionDAGISel will generate
1491  // them again.
1492  removeDeadLocalValueCode(SavedLastLocalValue);
1493  return false;
1494  }
1495  }
1496 
1497  // FastISel does not handle any operand bundles except OB_funclet.
1498  if (auto *Call = dyn_cast<CallBase>(I))
1499  for (unsigned i = 0, e = Call->getNumOperandBundles(); i != e; ++i)
1500  if (Call->getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1501  return false;
1502 
1503  DbgLoc = I->getDebugLoc();
1504 
1505  SavedInsertPt = FuncInfo.InsertPt;
1506 
1507  if (const auto *Call = dyn_cast<CallInst>(I)) {
1508  const Function *F = Call->getCalledFunction();
1509  LibFunc Func;
1510 
1511  // As a special case, don't handle calls to builtin library functions that
1512  // may be translated directly to target instructions.
1513  if (F && !F->hasLocalLinkage() && F->hasName() &&
1514  LibInfo->getLibFunc(F->getName(), Func) &&
1515  LibInfo->hasOptimizedCodeGen(Func))
1516  return false;
1517 
1518  // Don't handle Intrinsic::trap if a trap function is specified.
1519  if (F && F->getIntrinsicID() == Intrinsic::trap &&
1520  Call->hasFnAttr("trap-func-name"))
1521  return false;
1522  }
1523 
1524  // First, try doing target-independent selection.
1525  if (!SkipTargetIndependentISel) {
1526  if (selectOperator(I, I->getOpcode())) {
1527  ++NumFastIselSuccessIndependent;
1528  DbgLoc = DebugLoc();
1529  return true;
1530  }
1531  // Remove dead code.
1532  recomputeInsertPt();
1533  if (SavedInsertPt != FuncInfo.InsertPt)
1534  removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1535  SavedInsertPt = FuncInfo.InsertPt;
1536  }
1537  // Next, try calling the target to attempt to handle the instruction.
1538  if (fastSelectInstruction(I)) {
1539  ++NumFastIselSuccessTarget;
1540  DbgLoc = DebugLoc();
1541  return true;
1542  }
1543  // Remove dead code.
1544  recomputeInsertPt();
1545  if (SavedInsertPt != FuncInfo.InsertPt)
1546  removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1547 
1548  DbgLoc = DebugLoc();
1549  // Undo phi node updates, because they will be added again by SelectionDAG.
1550  if (I->isTerminator()) {
1551  // PHI node handling may have generated local value instructions.
1552  // We remove them because SelectionDAGISel will generate them again.
1553  removeDeadLocalValueCode(SavedLastLocalValue);
1554  FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
1555  }
1556  return false;
1557 }
1558 
1559 /// Emit an unconditional branch to the given block, unless it is the immediate
1560 /// (fall-through) successor, and update the CFG.
1562  const DebugLoc &DbgLoc) {
1563  if (FuncInfo.MBB->getBasicBlock()->sizeWithoutDebug() > 1 &&
1564  FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1565  // For more accurate line information if this is the only non-debug
1566  // instruction in the block then emit it, otherwise we have the
1567  // unconditional fall-through case, which needs no instructions.
1568  } else {
1569  // The unconditional branch case.
1570  TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
1571  SmallVector<MachineOperand, 0>(), DbgLoc);
1572  }
1573  if (FuncInfo.BPI) {
1574  auto BranchProbability = FuncInfo.BPI->getEdgeProbability(
1575  FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
1576  FuncInfo.MBB->addSuccessor(MSucc, BranchProbability);
1577  } else
1578  FuncInfo.MBB->addSuccessorWithoutProb(MSucc);
1579 }
1580 
1582  MachineBasicBlock *TrueMBB,
1583  MachineBasicBlock *FalseMBB) {
1584  // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1585  // happen in degenerate IR and MachineIR forbids to have a block twice in the
1586  // successor/predecessor lists.
1587  if (TrueMBB != FalseMBB) {
1588  if (FuncInfo.BPI) {
1589  auto BranchProbability =
1590  FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1591  FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability);
1592  } else
1593  FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB);
1594  }
1595 
1596  fastEmitBranch(FalseMBB, DbgLoc);
1597 }
1598 
1599 /// Emit an FNeg operation.
1600 bool FastISel::selectFNeg(const User *I, const Value *In) {
1601  Register OpReg = getRegForValue(In);
1602  if (!OpReg)
1603  return false;
1604 
1605  // If the target has ISD::FNEG, use it.
1606  EVT VT = TLI.getValueType(DL, I->getType());
1607  Register ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1608  OpReg);
1609  if (ResultReg) {
1610  updateValueMap(I, ResultReg);
1611  return true;
1612  }
1613 
1614  // Bitcast the value to integer, twiddle the sign bit with xor,
1615  // and then bitcast it back to floating-point.
1616  if (VT.getSizeInBits() > 64)
1617  return false;
1618  EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1619  if (!TLI.isTypeLegal(IntVT))
1620  return false;
1621 
1622  Register IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1623  ISD::BITCAST, OpReg);
1624  if (!IntReg)
1625  return false;
1626 
1627  Register IntResultReg = fastEmit_ri_(
1628  IntVT.getSimpleVT(), ISD::XOR, IntReg,
1629  UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1630  if (!IntResultReg)
1631  return false;
1632 
1633  ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1634  IntResultReg);
1635  if (!ResultReg)
1636  return false;
1637 
1638  updateValueMap(I, ResultReg);
1639  return true;
1640 }
1641 
1643  const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1644  if (!EVI)
1645  return false;
1646 
1647  // Make sure we only try to handle extracts with a legal result. But also
1648  // allow i1 because it's easy.
1649  EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1650  if (!RealVT.isSimple())
1651  return false;
1652  MVT VT = RealVT.getSimpleVT();
1653  if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1654  return false;
1655 
1656  const Value *Op0 = EVI->getOperand(0);
1657  Type *AggTy = Op0->getType();
1658 
1659  // Get the base result register.
1660  unsigned ResultReg;
1661  DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(Op0);
1662  if (I != FuncInfo.ValueMap.end())
1663  ResultReg = I->second;
1664  else if (isa<Instruction>(Op0))
1665  ResultReg = FuncInfo.InitializeRegForValue(Op0);
1666  else
1667  return false; // fast-isel can't handle aggregate constants at the moment
1668 
1669  // Get the actual result register, which is an offset from the base register.
1670  unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1671 
1672  SmallVector<EVT, 4> AggValueVTs;
1673  ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1674 
1675  for (unsigned i = 0; i < VTIndex; i++)
1676  ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1677 
1678  updateValueMap(EVI, ResultReg);
1679  return true;
1680 }
1681 
1682 bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1683  switch (Opcode) {
1684  case Instruction::Add:
1685  return selectBinaryOp(I, ISD::ADD);
1686  case Instruction::FAdd:
1687  return selectBinaryOp(I, ISD::FADD);
1688  case Instruction::Sub:
1689  return selectBinaryOp(I, ISD::SUB);
1690  case Instruction::FSub:
1691  return selectBinaryOp(I, ISD::FSUB);
1692  case Instruction::Mul:
1693  return selectBinaryOp(I, ISD::MUL);
1694  case Instruction::FMul:
1695  return selectBinaryOp(I, ISD::FMUL);
1696  case Instruction::SDiv:
1697  return selectBinaryOp(I, ISD::SDIV);
1698  case Instruction::UDiv:
1699  return selectBinaryOp(I, ISD::UDIV);
1700  case Instruction::FDiv:
1701  return selectBinaryOp(I, ISD::FDIV);
1702  case Instruction::SRem:
1703  return selectBinaryOp(I, ISD::SREM);
1704  case Instruction::URem:
1705  return selectBinaryOp(I, ISD::UREM);
1706  case Instruction::FRem:
1707  return selectBinaryOp(I, ISD::FREM);
1708  case Instruction::Shl:
1709  return selectBinaryOp(I, ISD::SHL);
1710  case Instruction::LShr:
1711  return selectBinaryOp(I, ISD::SRL);
1712  case Instruction::AShr:
1713  return selectBinaryOp(I, ISD::SRA);
1714  case Instruction::And:
1715  return selectBinaryOp(I, ISD::AND);
1716  case Instruction::Or:
1717  return selectBinaryOp(I, ISD::OR);
1718  case Instruction::Xor:
1719  return selectBinaryOp(I, ISD::XOR);
1720 
1721  case Instruction::FNeg:
1722  return selectFNeg(I, I->getOperand(0));
1723 
1724  case Instruction::GetElementPtr:
1725  return selectGetElementPtr(I);
1726 
1727  case Instruction::Br: {
1728  const BranchInst *BI = cast<BranchInst>(I);
1729 
1730  if (BI->isUnconditional()) {
1731  const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1732  MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1733  fastEmitBranch(MSucc, BI->getDebugLoc());
1734  return true;
1735  }
1736 
1737  // Conditional branches are not handed yet.
1738  // Halt "fast" selection and bail.
1739  return false;
1740  }
1741 
1742  case Instruction::Unreachable:
1743  if (TM.Options.TrapUnreachable)
1744  return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1745  else
1746  return true;
1747 
1748  case Instruction::Alloca:
1749  // FunctionLowering has the static-sized case covered.
1750  if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1751  return true;
1752 
1753  // Dynamic-sized alloca is not handled yet.
1754  return false;
1755 
1756  case Instruction::Call:
1757  // On AIX, normal call lowering uses the DAG-ISEL path currently so that the
1758  // callee of the direct function call instruction will be mapped to the
1759  // symbol for the function's entry point, which is distinct from the
1760  // function descriptor symbol. The latter is the symbol whose XCOFF symbol
1761  // name is the C-linkage name of the source level function.
1762  // But fast isel still has the ability to do selection for intrinsics.
1763  if (TM.getTargetTriple().isOSAIX() && !isa<IntrinsicInst>(I))
1764  return false;
1765  return selectCall(I);
1766 
1767  case Instruction::BitCast:
1768  return selectBitCast(I);
1769 
1770  case Instruction::FPToSI:
1771  return selectCast(I, ISD::FP_TO_SINT);
1772  case Instruction::ZExt:
1773  return selectCast(I, ISD::ZERO_EXTEND);
1774  case Instruction::SExt:
1775  return selectCast(I, ISD::SIGN_EXTEND);
1776  case Instruction::Trunc:
1777  return selectCast(I, ISD::TRUNCATE);
1778  case Instruction::SIToFP:
1779  return selectCast(I, ISD::SINT_TO_FP);
1780 
1781  case Instruction::IntToPtr: // Deliberate fall-through.
1782  case Instruction::PtrToInt: {
1783  EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1784  EVT DstVT = TLI.getValueType(DL, I->getType());
1785  if (DstVT.bitsGT(SrcVT))
1786  return selectCast(I, ISD::ZERO_EXTEND);
1787  if (DstVT.bitsLT(SrcVT))
1788  return selectCast(I, ISD::TRUNCATE);
1789  Register Reg = getRegForValue(I->getOperand(0));
1790  if (!Reg)
1791  return false;
1792  updateValueMap(I, Reg);
1793  return true;
1794  }
1795 
1796  case Instruction::ExtractValue:
1797  return selectExtractValue(I);
1798 
1799  case Instruction::Freeze:
1800  return selectFreeze(I);
1801 
1802  case Instruction::PHI:
1803  llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1804 
1805  default:
1806  // Unhandled instruction. Halt "fast" selection and bail.
1807  return false;
1808  }
1809 }
1810 
1812  const TargetLibraryInfo *LibInfo,
1813  bool SkipTargetIndependentISel)
1814  : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1815  MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1816  TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1817  TII(*MF->getSubtarget().getInstrInfo()),
1818  TLI(*MF->getSubtarget().getTargetLowering()),
1819  TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1820  SkipTargetIndependentISel(SkipTargetIndependentISel) {}
1821 
1822 FastISel::~FastISel() = default;
1823 
1824 bool FastISel::fastLowerArguments() { return false; }
1825 
1826 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1827 
1829  return false;
1830 }
1831 
1832 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1833 
1834 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/) {
1835  return 0;
1836 }
1837 
1838 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1839  unsigned /*Op1*/) {
1840  return 0;
1841 }
1842 
1843 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1844  return 0;
1845 }
1846 
1847 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1848  const ConstantFP * /*FPImm*/) {
1849  return 0;
1850 }
1851 
1852 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1853  uint64_t /*Imm*/) {
1854  return 0;
1855 }
1856 
1857 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
1858 /// instruction with an immediate operand using fastEmit_ri.
1859 /// If that fails, it materializes the immediate into a register and try
1860 /// fastEmit_rr instead.
1861 Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1862  uint64_t Imm, MVT ImmType) {
1863  // If this is a multiply by a power of two, emit this as a shift left.
1864  if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1865  Opcode = ISD::SHL;
1866  Imm = Log2_64(Imm);
1867  } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1868  // div x, 8 -> srl x, 3
1869  Opcode = ISD::SRL;
1870  Imm = Log2_64(Imm);
1871  }
1872 
1873  // Horrible hack (to be removed), check to make sure shift amounts are
1874  // in-range.
1875  if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1876  Imm >= VT.getSizeInBits())
1877  return 0;
1878 
1879  // First check if immediate type is legal. If not, we can't use the ri form.
1880  Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Imm);
1881  if (ResultReg)
1882  return ResultReg;
1883  Register MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1884  if (!MaterialReg) {
1885  // This is a bit ugly/slow, but failing here means falling out of
1886  // fast-isel, which would be very slow.
1887  IntegerType *ITy =
1889  MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1890  if (!MaterialReg)
1891  return 0;
1892  }
1893  return fastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
1894 }
1895 
1897  return MRI.createVirtualRegister(RC);
1898 }
1899 
1901  unsigned OpNum) {
1902  if (Op.isVirtual()) {
1903  const TargetRegisterClass *RegClass =
1904  TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
1905  if (!MRI.constrainRegClass(Op, RegClass)) {
1906  // If it's not legal to COPY between the register classes, something
1907  // has gone very wrong before we got here.
1908  Register NewOp = createResultReg(RegClass);
1910  TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1911  return NewOp;
1912  }
1913  }
1914  return Op;
1915 }
1916 
1917 Register FastISel::fastEmitInst_(unsigned MachineInstOpcode,
1918  const TargetRegisterClass *RC) {
1919  Register ResultReg = createResultReg(RC);
1920  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1921 
1922  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
1923  return ResultReg;
1924 }
1925 
1926 Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
1927  const TargetRegisterClass *RC, unsigned Op0) {
1928  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1929 
1930  Register ResultReg = createResultReg(RC);
1931  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1932 
1933  if (II.getNumDefs() >= 1)
1934  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1935  .addReg(Op0);
1936  else {
1938  .addReg(Op0);
1940  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1941  }
1942 
1943  return ResultReg;
1944 }
1945 
1946 Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
1947  const TargetRegisterClass *RC, unsigned Op0,
1948  unsigned Op1) {
1949  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1950 
1951  Register ResultReg = createResultReg(RC);
1952  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1953  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1954 
1955  if (II.getNumDefs() >= 1)
1956  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1957  .addReg(Op0)
1958  .addReg(Op1);
1959  else {
1961  .addReg(Op0)
1962  .addReg(Op1);
1964  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1965  }
1966  return ResultReg;
1967 }
1968 
1969 Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
1970  const TargetRegisterClass *RC, unsigned Op0,
1971  unsigned Op1, unsigned Op2) {
1972  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1973 
1974  Register ResultReg = createResultReg(RC);
1975  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1976  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1977  Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
1978 
1979  if (II.getNumDefs() >= 1)
1980  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1981  .addReg(Op0)
1982  .addReg(Op1)
1983  .addReg(Op2);
1984  else {
1986  .addReg(Op0)
1987  .addReg(Op1)
1988  .addReg(Op2);
1990  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1991  }
1992  return ResultReg;
1993 }
1994 
1995 Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
1996  const TargetRegisterClass *RC, unsigned Op0,
1997  uint64_t Imm) {
1998  const MCInstrDesc &II = TII.get(MachineInstOpcode);
1999 
2000  Register ResultReg = createResultReg(RC);
2001  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2002 
2003  if (II.getNumDefs() >= 1)
2004  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2005  .addReg(Op0)
2006  .addImm(Imm);
2007  else {
2009  .addReg(Op0)
2010  .addImm(Imm);
2012  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2013  }
2014  return ResultReg;
2015 }
2016 
2017 Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
2018  const TargetRegisterClass *RC, unsigned Op0,
2019  uint64_t Imm1, uint64_t Imm2) {
2020  const MCInstrDesc &II = TII.get(MachineInstOpcode);
2021 
2022  Register ResultReg = createResultReg(RC);
2023  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2024 
2025  if (II.getNumDefs() >= 1)
2026  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2027  .addReg(Op0)
2028  .addImm(Imm1)
2029  .addImm(Imm2);
2030  else {
2032  .addReg(Op0)
2033  .addImm(Imm1)
2034  .addImm(Imm2);
2036  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2037  }
2038  return ResultReg;
2039 }
2040 
2041 Register FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
2042  const TargetRegisterClass *RC,
2043  const ConstantFP *FPImm) {
2044  const MCInstrDesc &II = TII.get(MachineInstOpcode);
2045 
2046  Register ResultReg = createResultReg(RC);
2047 
2048  if (II.getNumDefs() >= 1)
2049  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2050  .addFPImm(FPImm);
2051  else {
2053  .addFPImm(FPImm);
2055  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2056  }
2057  return ResultReg;
2058 }
2059 
2060 Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
2061  const TargetRegisterClass *RC, unsigned Op0,
2062  unsigned Op1, uint64_t Imm) {
2063  const MCInstrDesc &II = TII.get(MachineInstOpcode);
2064 
2065  Register ResultReg = createResultReg(RC);
2066  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2067  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2068 
2069  if (II.getNumDefs() >= 1)
2070  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2071  .addReg(Op0)
2072  .addReg(Op1)
2073  .addImm(Imm);
2074  else {
2076  .addReg(Op0)
2077  .addReg(Op1)
2078  .addImm(Imm);
2080  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2081  }
2082  return ResultReg;
2083 }
2084 
2085 Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
2086  const TargetRegisterClass *RC, uint64_t Imm) {
2087  Register ResultReg = createResultReg(RC);
2088  const MCInstrDesc &II = TII.get(MachineInstOpcode);
2089 
2090  if (II.getNumDefs() >= 1)
2091  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2092  .addImm(Imm);
2093  else {
2096  TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2097  }
2098  return ResultReg;
2099 }
2100 
2102  uint32_t Idx) {
2103  Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2105  "Cannot yet extract from physregs");
2106  const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2108  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
2109  ResultReg).addReg(Op0, 0, Idx);
2110  return ResultReg;
2111 }
2112 
2113 /// Emit MachineInstrs to compute the value of Op with all but the least
2114 /// significant bit set to zero.
2116  return fastEmit_ri(VT, VT, ISD::AND, Op0, 1);
2117 }
2118 
2119 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2120 /// Emit code to ensure constants are copied into registers when needed.
2121 /// Remember the virtual registers that need to be added to the Machine PHI
2122 /// nodes as input. We cannot just directly add them, because expansion
2123 /// might result in multiple MBB's for one BB. As such, the start of the
2124 /// BB might correspond to a different MBB than the end.
2125 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2126  const Instruction *TI = LLVMBB->getTerminator();
2127 
2130 
2131  // Check successor nodes' PHI nodes that expect a constant to be available
2132  // from this block.
2133  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
2134  const BasicBlock *SuccBB = TI->getSuccessor(succ);
2135  if (!isa<PHINode>(SuccBB->begin()))
2136  continue;
2137  MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2138 
2139  // If this terminator has multiple identical successors (common for
2140  // switches), only handle each succ once.
2141  if (!SuccsHandled.insert(SuccMBB).second)
2142  continue;
2143 
2144  MachineBasicBlock::iterator MBBI = SuccMBB->begin();
2145 
2146  // At this point we know that there is a 1-1 correspondence between LLVM PHI
2147  // nodes and Machine PHI nodes, but the incoming operands have not been
2148  // emitted yet.
2149  for (const PHINode &PN : SuccBB->phis()) {
2150  // Ignore dead phi's.
2151  if (PN.use_empty())
2152  continue;
2153 
2154  // Only handle legal types. Two interesting things to note here. First,
2155  // by bailing out early, we may leave behind some dead instructions,
2156  // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2157  // own moves. Second, this check is necessary because FastISel doesn't
2158  // use CreateRegs to create registers, so it always creates
2159  // exactly one register for each non-void instruction.
2160  EVT VT = TLI.getValueType(DL, PN.getType(), /*AllowUnknown=*/true);
2161  if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2162  // Handle integer promotions, though, because they're common and easy.
2163  if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2165  return false;
2166  }
2167  }
2168 
2169  const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
2170 
2171  // Set the DebugLoc for the copy. Use the location of the operand if
2172  // there is one; otherwise no location, flushLocalValueMap will fix it.
2173  DbgLoc = DebugLoc();
2174  if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2175  DbgLoc = Inst->getDebugLoc();
2176 
2177  Register Reg = getRegForValue(PHIOp);
2178  if (!Reg) {
2180  return false;
2181  }
2182  FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg));
2183  DbgLoc = DebugLoc();
2184  }
2185  }
2186 
2187  return true;
2188 }
2189 
2190 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2191  assert(LI->hasOneUse() &&
2192  "tryToFoldLoad expected a LoadInst with a single use");
2193  // We know that the load has a single use, but don't know what it is. If it
2194  // isn't one of the folded instructions, then we can't succeed here. Handle
2195  // this by scanning the single-use users of the load until we get to FoldInst.
2196  unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2197 
2198  const Instruction *TheUser = LI->user_back();
2199  while (TheUser != FoldInst && // Scan up until we find FoldInst.
2200  // Stay in the right block.
2201  TheUser->getParent() == FoldInst->getParent() &&
2202  --MaxUsers) { // Don't scan too far.
2203  // If there are multiple or no uses of this instruction, then bail out.
2204  if (!TheUser->hasOneUse())
2205  return false;
2206 
2207  TheUser = TheUser->user_back();
2208  }
2209 
2210  // If we didn't find the fold instruction, then we failed to collapse the
2211  // sequence.
2212  if (TheUser != FoldInst)
2213  return false;
2214 
2215  // Don't try to fold volatile loads. Target has to deal with alignment
2216  // constraints.
2217  if (LI->isVolatile())
2218  return false;
2219 
2220  // Figure out which vreg this is going into. If there is no assigned vreg yet
2221  // then there actually was no reference to it. Perhaps the load is referenced
2222  // by a dead instruction.
2223  Register LoadReg = getRegForValue(LI);
2224  if (!LoadReg)
2225  return false;
2226 
2227  // We can't fold if this vreg has no uses or more than one use. Multiple uses
2228  // may mean that the instruction got lowered to multiple MIs, or the use of
2229  // the loaded value ended up being multiple operands of the result.
2230  if (!MRI.hasOneUse(LoadReg))
2231  return false;
2232 
2233  // If the register has fixups, there may be additional uses through a
2234  // different alias of the register.
2235  if (FuncInfo.RegsWithFixups.contains(LoadReg))
2236  return false;
2237 
2239  MachineInstr *User = RI->getParent();
2240 
2241  // Set the insertion point properly. Folding the load can cause generation of
2242  // other random instructions (like sign extends) for addressing modes; make
2243  // sure they get inserted in a logical place before the new instruction.
2245  FuncInfo.MBB = User->getParent();
2246 
2247  // Ask the target to try folding the load.
2248  return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2249 }
2250 
2251 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
2252  // Must be an add.
2253  if (!isa<AddOperator>(Add))
2254  return false;
2255  // Type size needs to match.
2256  if (DL.getTypeSizeInBits(GEP->getType()) !=
2257  DL.getTypeSizeInBits(Add->getType()))
2258  return false;
2259  // Must be in the same basic block.
2260  if (isa<Instruction>(Add) &&
2261  FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2262  return false;
2263  // Must have a constant operand.
2264  return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2265 }
2266 
2269  const Value *Ptr;
2270  Type *ValTy;
2271  MaybeAlign Alignment;
2273  bool IsVolatile;
2274 
2275  if (const auto *LI = dyn_cast<LoadInst>(I)) {
2276  Alignment = LI->getAlign();
2277  IsVolatile = LI->isVolatile();
2278  Flags = MachineMemOperand::MOLoad;
2279  Ptr = LI->getPointerOperand();
2280  ValTy = LI->getType();
2281  } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2282  Alignment = SI->getAlign();
2283  IsVolatile = SI->isVolatile();
2285  Ptr = SI->getPointerOperand();
2286  ValTy = SI->getValueOperand()->getType();
2287  } else
2288  return nullptr;
2289 
2290  bool IsNonTemporal = I->hasMetadata(LLVMContext::MD_nontemporal);
2291  bool IsInvariant = I->hasMetadata(LLVMContext::MD_invariant_load);
2292  bool IsDereferenceable = I->hasMetadata(LLVMContext::MD_dereferenceable);
2293  const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2294 
2295  AAMDNodes AAInfo = I->getAAMetadata();
2296 
2297  if (!Alignment) // Ensure that codegen never sees alignment 0.
2298  Alignment = DL.getABITypeAlign(ValTy);
2299 
2300  unsigned Size = DL.getTypeStoreSize(ValTy);
2301 
2302  if (IsVolatile)
2304  if (IsNonTemporal)
2306  if (IsDereferenceable)
2308  if (IsInvariant)
2310 
2311  return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2312  *Alignment, AAInfo, Ranges);
2313 }
2314 
2316  // If both operands are the same, then try to optimize or fold the cmp.
2318  if (CI->getOperand(0) != CI->getOperand(1))
2319  return Predicate;
2320 
2321  switch (Predicate) {
2322  default: llvm_unreachable("Invalid predicate!");
2339 
2350  }
2351 
2352  return Predicate;
2353 }
llvm::DIExpression::prepend
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
Definition: DebugInfoMetadata.cpp:1451
llvm::FunctionLoweringInfo::Fn
const Function * Fn
Definition: FunctionLoweringInfo.h:54
llvm::ISD::SUB
@ SUB
Definition: ISDOpcodes.h:240
i
i
Definition: README.txt:29
llvm::TargetRegisterInfo::getSubClassWithSubReg
virtual const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const
Returns the largest legal sub-class of RC that supports the sub-register index Idx.
Definition: TargetRegisterInfo.h:624
llvm::MCInstrDesc::getNumDefs
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:245
llvm::CmpInst::FCMP_ULE
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:734
ValueTypes.h
llvm::DbgVariableIntrinsic::getExpression
DIExpression * getExpression() const
Definition: IntrinsicInst.h:258
llvm::Argument
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
llvm::MachineInstrBuilder::addCImm
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
Definition: MachineInstrBuilder.h:136
Attrs
Function Attrs
Definition: README_ALTIVEC.txt:215
llvm::MachineInstr::setPhysRegsDeadExcept
void setPhysRegsDeadExcept(ArrayRef< Register > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
Definition: MachineInstr.cpp:1995
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:104
MachineInstr.h
MathExtras.h
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:131
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::FastISel::CallLoweringInfo::CallConv
CallingConv::ID CallConv
Definition: FastISel.h:84
llvm::DbgDeclareInst::getAddress
Value * getAddress() const
Definition: IntrinsicInst.h:311
llvm::FastISel::lowerCallTo
bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs)
Definition: FastISel.cpp:964
llvm::FastISel::selectPatchpoint
bool selectPatchpoint(const CallInst *I)
Definition: FastISel.cpp:762
llvm::FastISel::CallLoweringInfo::OutFlags
SmallVector< ISD::ArgFlagsTy, 16 > OutFlags
Definition: FastISel.h:94
llvm::MCSymbol
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
llvm::CmpInst::ICMP_EQ
@ ICMP_EQ
equal
Definition: InstrTypes.h:740
llvm::MachineInstrBuilder::addFPImm
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
Definition: MachineInstrBuilder.h:141
llvm::make_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Definition: iterator_range.h:53
Optional.h
llvm::InlineAsm::Extra_HasSideEffects
@ Extra_HasSideEffects
Definition: InlineAsm.h:228
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::MachineOperand::CreateReg
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
Definition: MachineOperand.h:800
llvm::ISD::OR
@ OR
Definition: ISDOpcodes.h:667
llvm::FastISel::getRegForValue
Register getRegForValue(const Value *V)
Create a virtual register and arrange for it to be assigned the value for the given LLVM value.
Definition: FastISel.cpp:238
llvm::ISD::ArgFlagsTy::setSwiftSelf
void setSwiftSelf()
Definition: TargetCallingConv.h:98
llvm::FastISel::~FastISel
virtual ~FastISel()
llvm::Value::hasOneUse
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
llvm::MachineBasicBlock::getBasicBlock
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
Definition: MachineBasicBlock.h:205
llvm::generic_gep_type_iterator
Definition: GetElementPtrTypeIterator.h:31
llvm::FastISel::fastEmit_f
virtual unsigned fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode, const ConstantFP *FPImm)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1847
llvm::ISD::BITCAST
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:886
Metadata.h
llvm::MachineRegisterInfo::createVirtualRegister
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Definition: MachineRegisterInfo.cpp:156
llvm::ISD::ArgFlagsTy::setNest
void setNest()
Definition: TargetCallingConv.h:119
llvm::CmpInst::Predicate
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:719
IntrinsicInst.h
llvm::FastISel::selectCast
bool selectCast(const User *I, unsigned Opcode)
Definition: FastISel.cpp:1377
MCInstrDesc.h
llvm::MCContext
Context object for machine code objects.
Definition: MCContext.h:74
llvm::MachineInstrBuilder::add
const MachineInstrBuilder & add(const MachineOperand &MO) const
Definition: MachineInstrBuilder.h:224
llvm::Function
Definition: Function.h:60
llvm::ISD::ConstantFP
@ ConstantFP
Definition: ISDOpcodes.h:77
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
llvm::ISD::UDIV
@ UDIV
Definition: ISDOpcodes.h:243
llvm::IntrinsicInst::getIntrinsicID
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:53
llvm::DataLayout::getTypeSizeInBits
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:673
llvm::FastISel::updateValueMap
void updateValueMap(const Value *I, Register Reg, unsigned NumRegs=1)
Update the value map to include the new mapping for this instruction, or insert an extra copy to get ...
Definition: FastISel.cpp:362
llvm::FunctionLoweringInfo::InsertPt
MachineBasicBlock::iterator InsertPt
MBB - The current insert position inside the current block.
Definition: FunctionLoweringInfo.h:156
llvm::ISD::ArgFlagsTy::setMemAlign
void setMemAlign(Align A)
Definition: TargetCallingConv.h:148
GetElementPtrTypeIterator.h
llvm::Target
Target - Wrapper for Target specific information.
Definition: TargetRegistry.h:140
llvm::FastISel::fastEmit_ri_
Register fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, uint64_t Imm, MVT ImmType)
This method is a wrapper of fastEmit_ri.
Definition: FastISel.cpp:1861
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1185
llvm::FastISel::fastEmitInst_f
Register fastEmitInst_f(unsigned MachineInstOpcode, const TargetRegisterClass *RC, const ConstantFP *FPImm)
Emit a MachineInstr with a floating point immediate, and a result register in the given register clas...
Definition: FastISel.cpp:2041
Statistic.h
InlineAsm.h
llvm::FastISel::CallLoweringInfo::setIsPatchPoint
CallLoweringInfo & setIsPatchPoint(bool Value=true)
Definition: FastISel.h:181
llvm::FastISel::FastISel
FastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo, bool SkipTargetIndependentISel=false)
Definition: FastISel.cpp:1811
llvm::FastISel::CallLoweringInfo::ResultReg
Register ResultReg
Definition: FastISel.h:90
llvm::FastISel::CallLoweringInfo::InRegs
SmallVector< Register, 4 > InRegs
Definition: FastISel.h:97
llvm::MachineFunction::getMachineMemOperand
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Definition: MachineFunction.cpp:456
llvm::FastISel::finishBasicBlock
void finishBasicBlock()
Flush the local value map.
Definition: FastISel.cpp:136
llvm::FunctionLoweringInfo::RegsWithFixups
DenseSet< Register > RegsWithFixups
Definition: FunctionLoweringInfo.h:144
llvm::CmpInst::FCMP_ONE
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:727
llvm::DataLayout::getTypeStoreSize
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Definition: DataLayout.h:474
ErrorHandling.h
llvm::FastISel::lowerArguments
bool lowerArguments()
Do "fast" instruction selection for function arguments and append the machine instructions to the cur...
Definition: FastISel.cpp:138
llvm::FastISel::fastEmitZExtFromI1
Register fastEmitZExtFromI1(MVT VT, unsigned Op0)
Emit MachineInstrs to compute the value of Op with all but the least significant bit set to zero.
Definition: FastISel.cpp:2115
llvm::X86Disassembler::Reg
Reg
All possible values of the reg field in the ModR/M byte.
Definition: X86DisassemblerDecoder.h:462
llvm::ISD::InputArg::Used
bool Used
Definition: TargetCallingConv.h:199
llvm::MachineMemOperand::MOInvariant
@ MOInvariant
The memory access always returns the same value (or traps).
Definition: MachineMemOperand.h:143
llvm::ISD::EH_LABEL
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
Definition: ISDOpcodes.h:1026
llvm::CmpInst::ICMP_NE
@ ICMP_NE
not equal
Definition: InstrTypes.h:741
llvm::FastISel::selectFreeze
bool selectFreeze(const User *I)
Definition: FastISel.cpp:1437
llvm::AttributeList::get
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute >> Attrs)
Create an AttributeList with the specified parameters in it.
Definition: Attributes.cpp:1015
llvm::InlineAsm::Extra_AsmDialect
@ Extra_AsmDialect
Definition: InlineAsm.h:230
llvm::FastISel::enterLocalValueArea
SavePoint enterLocalValueArea()
Prepare InsertPt to begin inserting instructions into the local value area and return the old insert ...
Definition: FastISel.cpp:435
llvm::Triple
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
MachineBasicBlock.h
llvm::ISD::FP_TO_SINT
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:819
NewExpr
Definition: ItaniumDemangle.h:1922
llvm::CmpInst::ICMP_SGT
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:746
llvm::AAMDNodes
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:652
llvm::Function::getContext
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:319
llvm::FastISel::CallLoweringInfo::OutVals
SmallVector< Value *, 16 > OutVals
Definition: FastISel.h:93
llvm::DenseMapIterator
Definition: DenseMap.h:57
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
DenseMap.h
llvm::Triple::x86_64
@ x86_64
Definition: Triple.h:86
llvm::FunctionLoweringInfo::MBB
MachineBasicBlock * MBB
MBB - The current block.
Definition: FunctionLoweringInfo.h:153
llvm::InlineAsm::Extra_IsAlignStack
@ Extra_IsAlignStack
Definition: InlineAsm.h:229
llvm::diagnoseDontCall
void diagnoseDontCall(const CallInst &CI)
Definition: DiagnosticInfo.cpp:410
llvm::AttributeList
Definition: Attributes.h:408
llvm::FastISel::fastEmitInst_rii
Register fastEmitInst_rii(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, uint64_t Imm1, uint64_t Imm2)
Emit a MachineInstr with one register operand and two immediate operands.
Definition: FastISel.cpp:2017
TargetInstrInfo.h
llvm::FastISel::fastEmit_rr
virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, unsigned Op1)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1838
llvm::MachineMemOperand
A description of a memory reference used in the backend.
Definition: MachineMemOperand.h:126
llvm::CallBase::getFunctionType
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1254
llvm::sys::path::begin
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:226
llvm::FastISel::CallLoweringInfo::RetSExt
bool RetSExt
Definition: FastISel.h:71
llvm::MachineMemOperand::MODereferenceable
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
Definition: MachineMemOperand.h:141
llvm::ComputeLinearIndex
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
Definition: Analysis.cpp:33
llvm::DbgVariableIntrinsic::getVariable
DILocalVariable * getVariable() const
Definition: IntrinsicInst.h:254
llvm::Optional
Definition: APInt.h:33
llvm::FastISel::TLI
const TargetLowering & TLI
Definition: FastISel.h:211
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
llvm::Triple::isOSLinux
bool isOSLinux() const
Tests whether the OS is Linux.
Definition: Triple.h:619
Operator.h
llvm::CmpInst::ICMP_SLE
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:749
llvm::CallBase::arg_begin
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1316
llvm::DIExpression
DWARF expression.
Definition: DebugInfoMetadata.h:2548
llvm::gep_type_begin
gep_type_iterator gep_type_begin(const User *GEP)
Definition: GetElementPtrTypeIterator.h:123
llvm::CmpInst::FCMP_OGT
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:723
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1620
llvm::FastISel::getRegForGEPIndex
Register getRegForGEPIndex(const Value *Idx)
This is a wrapper around getRegForValue that also takes care of truncating or sign-extending the give...
Definition: FastISel.cpp:383
llvm::MachineRegisterInfo::defusechain_iterator::getOperandNo
unsigned getOperandNo() const
getOperandNo - Return the operand # of this MachineOperand in its MachineInstr.
Definition: MachineRegisterInfo.h:1081
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
F
#define F(x, y, z)
Definition: MD5.cpp:55
MachineRegisterInfo.h
llvm::FastISel::CallLoweringInfo::RetZExt
bool RetZExt
Definition: FastISel.h:72
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:55
llvm::ComputeValueVTs
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:121
llvm::ISD::INLINEASM
@ INLINEASM
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:1018
llvm::gep_type_end
gep_type_iterator gep_type_end(const User *GEP)
Definition: GetElementPtrTypeIterator.h:130
llvm::EVT::isSimple
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:129
MachineValueType.h
llvm::CmpInst::FCMP_ULT
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:733
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::MachineFrameInfo::setHasStackMap
void setHasStackMap(bool s=true)
Definition: MachineFrameInfo.h:383
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:186
Instruction.h
llvm::MachineBasicBlock::reverse_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Definition: MachineBasicBlock.h:244
llvm::ISD::InputArg::ArgVT
EVT ArgVT
Definition: TargetCallingConv.h:198
TargetLowering.h
llvm::LLVMContext::OB_funclet
@ OB_funclet
Definition: LLVMContext.h:91
llvm::TargetInstrInfo::getRegClass
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
Definition: TargetInstrInfo.cpp:45
llvm::Instruction::getNumSuccessors
unsigned getNumSuccessors() const
Return the number of successors that this instruction has.
Definition: Instruction.cpp:777
llvm::Intrinsic::getType
FunctionType * getType(LLVMContext &Context, ID id, ArrayRef< Type * > Tys=None)
Return the function type for an intrinsic.
Definition: Function.cpp:1366
llvm::MVT::i1
@ i1
Definition: MachineValueType.h:43
llvm::MCContext::getOrCreateSymbol
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:204
llvm::APFloat::convertToInteger
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
Definition: APFloat.h:1104
llvm::Log2_64
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:629
GlobalValue.h
TargetMachine.h
llvm::MachineInstr::setHeapAllocMarker
void setHeapAllocMarker(MachineFunction &MF, MDNode *MD)
Set a marker on instructions that denotes where we should create and emit heap alloc site labels.
Definition: MachineInstr.cpp:478
llvm::FastISel::FuncInfo
FunctionLoweringInfo & FuncInfo
Definition: FastISel.h:202
Constants.h
llvm::ISD::Constant
@ Constant
Definition: ISDOpcodes.h:76
llvm::ISD::ZERO_EXTEND
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:763
llvm::FunctionLoweringInfo::MBBMap
DenseMap< const BasicBlock *, MachineBasicBlock * > MBBMap
MBBMap - A mapping from LLVM basic blocks to their machine code entry.
Definition: FunctionLoweringInfo.h:72
llvm::DbgValueInst
This represents the llvm.dbg.value instruction.
Definition: IntrinsicInst.h:348
llvm::MachineOperand::CreateImm
static MachineOperand CreateImm(int64_t Val)
Definition: MachineOperand.h:782
SmallString.h
FunctionLoweringInfo.h
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::FastISel::createResultReg
Register createResultReg(const TargetRegisterClass *RC)
Definition: FastISel.cpp:1896
llvm::User
Definition: User.h:44
llvm::LibFunc
LibFunc
Definition: TargetLibraryInfo.h:35
llvm::EVT
Extended Value Type.
Definition: ValueTypes.h:34
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::FastISel::CallLoweringInfo::clearIns
void clearIns()
Definition: FastISel.h:194
llvm::CmpInst::ICMP_ULE
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:745
InstrTypes.h
llvm::BasicBlock::begin
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:297
MCContext.h
llvm::PatchPointOpers::TargetPos
@ TargetPos
Definition: StackMaps.h:79
llvm::FastISel::CallLoweringInfo::CB
const CallBase * CB
Definition: FastISel.h:88
llvm::CallingConv::AnyReg
@ AnyReg
Definition: CallingConv.h:62
llvm::FastISel::leaveLocalValueArea
void leaveLocalValueArea(SavePoint Old)
Reset InsertPt to the given old insert position.
Definition: FastISel.cpp:441
llvm::TargetRegisterClass
Definition: TargetRegisterInfo.h:45
llvm::CmpInst::FCMP_UGE
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:732
llvm::FastISel::selectXRayTypedEvent
bool selectXRayTypedEvent(const CallInst *II)
Definition: FastISel.cpp:919
llvm::ISD::TRUNCATE
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:769
llvm::ISD::SRA
@ SRA
Definition: ISDOpcodes.h:692
TargetLibraryInfo.h
addStackMapLiveVars
static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, const SDLoc &DL, SmallVectorImpl< SDValue > &Ops, SelectionDAGBuilder &Builder)
Add a stack map intrinsic call's live variable operands to a stackmap or patchpoint target node's ope...
Definition: SelectionDAGBuilder.cpp:9363
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:127
llvm::Mangler::getNameWithPrefix
void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV, bool CannotUsePrivateLabel) const
Print the appropriate prefix and the specified global variable's name.
Definition: Mangler.cpp:119
llvm::MaybeAlign
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:109
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:197
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:48
llvm::FastISel::fastEmit_
virtual unsigned fastEmit_(MVT VT, MVT RetVT, unsigned Opcode)
This method is called by target-independent code to request that an instruction with the given type a...
Definition: FastISel.cpp:1832
llvm::IntegerType
Class to represent integer types.
Definition: DerivedTypes.h:40
llvm::CmpInst::FCMP_UNO
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:729
llvm::Instruction
Definition: Instruction.h:42
llvm::DataLayout::getABITypeAlign
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:829
llvm::ISD::SINT_TO_FP
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:773
llvm::APSInt
An arbitrary precision integer that knows its signedness.
Definition: APSInt.h:23
llvm::BasicBlock::phis
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:355
llvm::FastISel::selectStackmap
bool selectStackmap(const CallInst *I)
Definition: FastISel.cpp:650
llvm::STATISTIC
STATISTIC(NumFunctions, "Total number of functions")
llvm::ConstantFP
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:257
isCommutative
static bool isCommutative(Instruction *I)
Definition: SLPVectorizer.cpp:276
llvm::HexagonInstrInfo::insertBranch
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
Insert branch code into the end of the specified MachineBasicBlock.
Definition: HexagonInstrInfo.cpp:625
APFloat.h
llvm::FastISel::fastEmit_ri
virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1852
llvm::MachineOperand::CreateFI
static MachineOperand CreateFI(int Idx)
Definition: MachineOperand.h:832
llvm::FastISel::CallLoweringInfo::IsInReg
bool IsInReg
Definition: FastISel.h:74
llvm::ConstantInt::get
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:919
llvm::DILocalVariable::isValidLocationForIntrinsic
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
Definition: DebugInfoMetadata.h:3127
llvm::FastISel::CallLoweringInfo::clearOuts
void clearOuts()
Definition: FastISel.h:188
llvm::CmpInst::FCMP_OEQ
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:722
DebugLoc.h
SmallPtrSet.h
llvm::Instruction::getSuccessor
BasicBlock * getSuccessor(unsigned Idx) const
Return the specified successor. This instruction must be a terminator.
Definition: Instruction.cpp:789
llvm::DbgVariableIntrinsic::hasArgList
bool hasArgList() const
Definition: IntrinsicInst.h:228
llvm::FastISel::MRI
MachineRegisterInfo & MRI
Definition: FastISel.h:204
llvm::FastISel::CallLoweringInfo::Call
MachineInstr * Call
Definition: FastISel.h:89
llvm::CmpInst::FCMP_OLT
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:725
llvm::ISD::AND
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:666
llvm::ISD::ArgFlagsTy::setInReg
void setInReg()
Definition: TargetCallingConv.h:80
Align
uint64_t Align
Definition: ELFObjHandler.cpp:81
PatternMatch.h
llvm::CallInst::isTailCall
bool isTailCall() const
Definition: Instructions.h:1668
llvm::MCID::Call
@ Call
Definition: MCInstrDesc.h:155
llvm::Triple::getArch
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:345
llvm::CallBase::getCallingConv
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1455
llvm::MachineRegisterInfo::getVRegDef
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
Definition: MachineRegisterInfo.cpp:396
llvm::MachineOperand::getParent
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Definition: MachineOperand.h:237
llvm::MachineInstrBuilder::addExternalSymbol
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:184
llvm::EVT::bitsLT
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition: ValueTypes.h:272
llvm::FastISel::createMachineMemOperandFor
MachineMemOperand * createMachineMemOperandFor(const Instruction *I) const
Create a machine mem operand from the given instruction.
Definition: FastISel.cpp:2268
llvm::FunctionLoweringInfo::OrigNumPHINodesToUpdate
unsigned OrigNumPHINodesToUpdate
Definition: FunctionLoweringInfo.h:179
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
Type.h
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:94
llvm::Instruction::getMetadata
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:279
llvm::SmallString< 32 >
llvm::ISD::ArgFlagsTy::setSExt
void setSExt()
Definition: TargetCallingConv.h:77
llvm::CmpInst::FCMP_FALSE
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:721
llvm::MachineRegisterInfo::getRegClass
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
Definition: MachineRegisterInfo.h:642
llvm::RegState::Dead
@ Dead
Unused definition.
Definition: MachineInstrBuilder.h:50
llvm::CmpInst
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:709
llvm::FastISel::fastLowerIntrinsicCall
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II)
This method is called by target-independent code to do target- specific intrinsic lowering.
Definition: FastISel.cpp:1828
llvm::ISD::FADD
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:390
llvm::InlineAsm
Definition: InlineAsm.h:31
llvm::tgtok::In
@ In
Definition: TGLexer.h:51
BasicBlock.h
llvm::APFloat
Definition: APFloat.h:700
llvm::AttributeList::ReturnIndex
@ ReturnIndex
Definition: Attributes.h:411
llvm::FastISel::CallLoweringInfo::NumResultRegs
unsigned NumResultRegs
Definition: FastISel.h:91
llvm::FastISel::selectInstruction
bool selectInstruction(const Instruction *I)
Do "fast" instruction selection for the given LLVM IR instruction and append the generated machine in...
Definition: FastISel.cpp:1477
llvm::FastISel::DbgLoc
DebugLoc DbgLoc
Definition: FastISel.h:207
llvm::PPC::Predicate
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Definition: PPCPredicates.h:26
VI
@ VI
Definition: SIInstrInfo.cpp:7842
BranchProbabilityInfo.h
llvm::EVT::getSizeInBits
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:340
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:66
llvm::MachineInstrBuilder
Definition: MachineInstrBuilder.h:69
uint64_t
llvm::FastISel::tryToFoldLoadIntoMI
virtual bool tryToFoldLoadIntoMI(MachineInstr *, unsigned, const LoadInst *)
The specified machine instr operand is a vreg, and that vreg is being provided by the specified load ...
Definition: FastISel.h:300
llvm::TargetLoweringBase::isTypeLegal
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
Definition: TargetLowering.h:934
llvm::DbgDeclareInst
This represents the llvm.dbg.declare instruction.
Definition: IntrinsicInst.h:309
llvm::DbgLabelInst
This represents the llvm.dbg.label instruction.
Definition: IntrinsicInst.h:371
llvm::Instruction::user_back
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:88
llvm::FastISel::fastLowerCall
virtual bool fastLowerCall(CallLoweringInfo &CLI)
This method is called by target-independent code to do target- specific call lowering.
Definition: FastISel.cpp:1826
llvm::FastISel::fastEmit_i
virtual unsigned fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1843
llvm::FastISel::fastEmitBranch
void fastEmitBranch(MachineBasicBlock *MSucc, const DebugLoc &DbgLoc)
Emit an unconditional branch to the given block, unless it is the immediate (fall-through) successor,...
Definition: FastISel.cpp:1561
llvm::DbgLabelInst::getLabel
DILabel * getLabel() const
Definition: IntrinsicInst.h:373
llvm::PatchPointOpers::NBytesPos
@ NBytesPos
Definition: StackMaps.h:79
llvm::ISD::TRAP
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1126
llvm::MachinePointerInfo
This class contains a discriminated union of information about pointers in memory operands,...
Definition: MachineMemOperand.h:38
move
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
Definition: README.txt:546
llvm::FastISel::CallLoweringInfo::getArgs
ArgListTy & getArgs()
Definition: FastISel.h:186
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::FastISel::selectIntrinsicCall
bool selectIntrinsicCall(const IntrinsicInst *II)
Definition: FastISel.cpp:1193
findLocalRegDef
static Register findLocalRegDef(MachineInstr &MI)
Return the defined register if this instruction defines exactly one virtual register and uses no othe...
Definition: FastISel.cpp:160
llvm::FastISel::fastEmitInst_rri
Register fastEmitInst_rri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, unsigned Op1, uint64_t Imm)
Emit a MachineInstr with two register operands, an immediate, and a result register in the given regi...
Definition: FastISel.cpp:2060
llvm::EVT::getEVT
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:561
I
#define I(x, y, z)
Definition: MD5.cpp:58
Analysis.h
llvm::make_early_inc_range
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:608
llvm::ISD::ArgFlagsTy::setOrigAlign
void setOrigAlign(Align A)
Definition: TargetCallingConv.h:164
llvm::FastISel::CallLoweringInfo::IsReturnValueUsed
bool IsReturnValueUsed
Definition: FastISel.h:76
llvm::FastISel::CallLoweringInfo
Definition: FastISel.h:69
llvm::Register::isVirtualRegister
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:71
llvm::MachineMemOperand::Flags
Flags
Flags values. These may be or'd together.
Definition: MachineMemOperand.h:129
llvm::ISD::InputArg
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
Definition: TargetCallingConv.h:195
llvm::MVT::i8
@ i8
Definition: MachineValueType.h:44
llvm::FastISel::fastEmit_r
virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0)
This method is called by target-independent code to request that an instruction with the given type,...
Definition: FastISel.cpp:1834
llvm::DIExpression::DerefBefore
@ DerefBefore
Definition: DebugInfoMetadata.h:2754
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::PatchPointOpers::NArgPos
@ NArgPos
Definition: StackMaps.h:79
llvm::EVT::getIntegerVT
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:64
llvm::FunctionLoweringInfo
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
Definition: FunctionLoweringInfo.h:52
llvm::MVT::Other
@ Other
Definition: MachineValueType.h:42
llvm::MVT::getSizeInBits
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
Definition: MachineValueType.h:864
llvm::TargetLoweringBase::ArgListEntry
Definition: TargetLowering.h:282
llvm::MachineFunction::getFrameInfo
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Definition: MachineFunction.h:656
llvm::ISD::ArgFlagsTy::setInConsecutiveRegs
void setInConsecutiveRegs(bool Flag=true)
Definition: TargetCallingConv.h:125
SI
StandardInstrumentations SI(Debug, VerifyEach)
llvm::CmpInst::FCMP_OGE
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:724
llvm::ISD::ArgFlagsTy::setSwiftError
void setSwiftError()
Definition: TargetCallingConv.h:104
llvm::FastISel::fastEmitInst_
Register fastEmitInst_(unsigned MachineInstOpcode, const TargetRegisterClass *RC)
Emit a MachineInstr with no operands and a result register in the given register class.
Definition: FastISel.cpp:1917
llvm::CmpInst::ICMP_UGE
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:743
llvm::MachineMemOperand::MONonTemporal
@ MONonTemporal
The memory access is non-temporal.
Definition: MachineMemOperand.h:139
llvm::FastISel::fastLowerArguments
virtual bool fastLowerArguments()
This method is called by target-independent code to do target- specific argument lowering.
Definition: FastISel.cpp:1824
APSInt.h
llvm::FastISel::ArgListTy
TargetLoweringBase::ArgListTy ArgListTy
Definition: FastISel.h:68
MachineModuleInfo.h
llvm::MVT
Machine Value Type.
Definition: MachineValueType.h:31
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::MDNode
Metadata node.
Definition: Metadata.h:937
llvm::ISD::ArgFlagsTy::setByVal
void setByVal()
Definition: TargetCallingConv.h:86
llvm::CallBase::arg_end
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1322
Builder
assume Assume Builder
Definition: AssumeBundleBuilder.cpp:651
llvm::BranchInst::isUnconditional
bool isUnconditional() const
Definition: Instructions.h:3177
llvm::ISD::ArgFlagsTy::setByValSize
void setByValSize(unsigned S)
Definition: TargetCallingConv.h:173
Mangler.h
llvm::CmpInst::ICMP_SLT
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:748
llvm::MachineRegisterInfo::use_nodbg_empty
bool use_nodbg_empty(Register RegNo) const
use_nodbg_empty - Return true if there are no non-Debug instructions using the specified register.
Definition: MachineRegisterInfo.h:574
llvm::FastISel::selectOperator
bool selectOperator(const User *I, unsigned Opcode)
Do "fast" instruction selection for the given LLVM IR operator (Instruction or ConstantExpr),...
Definition: FastISel.cpp:1682
llvm::FastISel::CallLoweringInfo::IsVarArg
bool IsVarArg
Definition: FastISel.h:73
TargetOptions.h
llvm::MachineBasicBlock::iterator
MachineInstrBundleIterator< MachineInstr > iterator
Definition: MachineBasicBlock.h:242
Mul
BinaryOperator * Mul
Definition: X86PartialReduction.cpp:70
llvm::MachineOperand::CreateGA
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
Definition: MachineOperand.h:859
DataLayout.h
llvm::MVT::i64
@ i64
Definition: MachineValueType.h:47
llvm::StructType
Class to represent struct types.
Definition: DerivedTypes.h:213
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:58
llvm::ISD::ArgFlagsTy::setInAlloca
void setInAlloca()
Definition: TargetCallingConv.h:92
MBBI
MachineBasicBlock MachineBasicBlock::iterator MBBI
Definition: AArch64SLSHardening.cpp:75
llvm::CmpInst::ICMP_ULT
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:744
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::FastISel::canFoldAddIntoGEP
bool canFoldAddIntoGEP(const User *GEP, const Value *Add)
Check if Add is an add that can be safely folded into GEP.
Definition: FastISel.cpp:2251
llvm::ISD::SREM
@ SREM
Definition: ISDOpcodes.h:244
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
llvm::InlineAsm::Extra_IsConvergent
@ Extra_IsConvergent
Definition: InlineAsm.h:233
uint32_t
llvm::BranchProbability
Definition: BranchProbability.h:29
llvm::Value::getContext
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:991
llvm::ISD::ArgFlagsTy
Definition: TargetCallingConv.h:27
TargetSubtargetInfo.h
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::AMDGPU::HSAMD::Kernel::Arg::Key::IsVolatile
constexpr char IsVolatile[]
Key for Kernel::Arg::Metadata::mIsVolatile.
Definition: AMDGPUMetadata.h:199
llvm::Type::getContext
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
llvm::LoadInst
An instruction for reading from memory.
Definition: Instructions.h:176
isRegUsedByPhiNodes
static bool isRegUsedByPhiNodes(Register DefReg, FunctionLoweringInfo &FuncInfo)
Definition: FastISel.cpp:177
llvm::ISD::FMUL
@ FMUL
Definition: ISDOpcodes.h:392
llvm::MachineMemOperand::MOVolatile
@ MOVolatile
The memory access is volatile.
Definition: MachineMemOperand.h:137
llvm::ISD::InputArg::Flags
ArgFlagsTy Flags
Definition: TargetCallingConv.h:196
llvm::MachineMemOperand::MOLoad
@ MOLoad
The memory access reads data.
Definition: MachineMemOperand.h:133
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::FastISel::fastEmitInst_ri
Register fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, uint64_t Imm)
Emit a MachineInstr with a register operand, an immediate, and a result register in the given registe...
Definition: FastISel.cpp:1995
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::ISD::XOR
@ XOR
Definition: ISDOpcodes.h:668
Argument.h
llvm::ISD::ArgFlagsTy::setCFGuardTarget
void setCFGuardTarget()
Definition: TargetCallingConv.h:107
Callee
amdgpu Simplify well known AMD library false FunctionCallee Callee
Definition: AMDGPULibCalls.cpp:186
CallingConv.h
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
Attributes.h
llvm::CmpInst::FCMP_UGT
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:731
llvm::MachineRegisterInfo::reg_begin
reg_iterator reg_begin(Register RegNo) const
Definition: MachineRegisterInfo.h:289
Constant.h
llvm::FastISel::tryToFoldLoad
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst)
We're checking to see if we can fold LI into FoldInst.
Definition: FastISel.cpp:2190
llvm::APFloatBase::rmTowardZero
static constexpr roundingMode rmTowardZero
Definition: APFloat.h:193
llvm::Constant::getNullValue
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:350
llvm::FastISel::lowerCall
bool lowerCall(const CallInst *I)
Definition: FastISel.cpp:1114
llvm::FastISel::selectExtractValue
bool selectExtractValue(const User *U)
Definition: FastISel.cpp:1642
getReturnAttrs
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
Definition: FastISel.cpp:942
llvm::FastISel::CallLoweringInfo::OutRegs
SmallVector< Register, 16 > OutRegs
Definition: FastISel.h:95
uint16_t
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:341
MachineFrameInfo.h
llvm::FastISel::selectFNeg
bool selectFNeg(const User *I, const Value *In)
Emit an FNeg operation.
Definition: FastISel.cpp:1600
llvm::Type::isEmptyTy
bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
Definition: Type.cpp:147
llvm::CallBase::arg_size
unsigned arg_size() const
Definition: InstrTypes.h:1339
llvm::ExtractValueInst
This instruction extracts a struct member or array element value from an aggregate value.
Definition: Instructions.h:2411
ISDOpcodes.h
Casting.h
DiagnosticInfo.h
Function.h
llvm::DIExpression::constantFold
std::pair< DIExpression *, const ConstantInt * > constantFold(const ConstantInt *CI)
Try to shorten an expression with an initial constant operand.
Definition: DebugInfoMetadata.cpp:1646
llvm::TargetLibraryInfo
Provides information about what library functions are available for the current target.
Definition: TargetLibraryInfo.h:222
llvm::ISD::SDIV
@ SDIV
Definition: ISDOpcodes.h:242
llvm::FastISel::CallLoweringInfo::RetTy
Type * RetTy
Definition: FastISel.h:70
StackMaps.h
llvm::FastISel::selectXRayCustomEvent
bool selectXRayCustomEvent(const CallInst *II)
Definition: FastISel.cpp:900
llvm::ISD::InputArg::VT
MVT VT
Definition: TargetCallingConv.h:197
llvm::CmpInst::ICMP_SGE
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:747
llvm::MCID::Add
@ Add
Definition: MCInstrDesc.h:185
llvm::CallBase::getCalledOperand
Value * getCalledOperand() const
Definition: InstrTypes.h:1389
llvm::MachineMemOperand::MOStore
@ MOStore
The memory access writes data.
Definition: MachineMemOperand.h:135
llvm::ISD::ADD
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:239
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:46
llvm::ARMBuildAttrs::Symbol
@ Symbol
Definition: ARMBuildAttributes.h:83
llvm::RISCVMatInt::Imm
@ Imm
Definition: RISCVMatInt.h:23
llvm::Type::getVoidTy
static Type * getVoidTy(LLVMContext &C)
Definition: Type.cpp:222
FastISel.h
llvm::FastISel::CallLoweringInfo::Ins
SmallVector< ISD::InputArg, 4 > Ins
Definition: FastISel.h:96
llvm::FastISel::selectGetElementPtr
bool selectGetElementPtr(const User *I)
Definition: FastISel.cpp:537
Instructions.h
llvm::FunctionLoweringInfo::MF
MachineFunction * MF
Definition: FunctionLoweringInfo.h:55
llvm::ISD::FSUB
@ FSUB
Definition: ISDOpcodes.h:391
llvm::GetReturnInfo
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
Definition: TargetLoweringBase.cpp:1614
llvm::ISD::SHL
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:691
llvm::LoadInst::isVolatile
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:213
llvm::MachineRegisterInfo::hasOneUse
bool hasOneUse(Register RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
Definition: MachineRegisterInfo.h:518
SmallVector.h
llvm::ISD::FREM
@ FREM
Definition: ISDOpcodes.h:394
llvm::MachineOperand::CreateRegMask
static MachineOperand CreateRegMask(const uint32_t *Mask)
CreateRegMask - Creates a register mask operand referencing Mask.
Definition: MachineOperand.h:895
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:277
llvm::Instruction::getDebugLoc
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:367
User.h
MachineInstrBuilder.h
llvm::ISD::MUL
@ MUL
Definition: ISDOpcodes.h:241
llvm::ISD::UREM
@ UREM
Definition: ISDOpcodes.h:245
llvm::CmpInst::ICMP_UGT
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:742
llvm::ISD::ArgFlagsTy::setSRet
void setSRet()
Definition: TargetCallingConv.h:83
llvm::DbgValueInst::getValue
Value * getValue(unsigned OpIdx=0) const
Definition: IntrinsicInst.h:352
llvm::CmpInst::FCMP_UNE
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:735
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:328
llvm::CallBase::getArgOperand
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1341
N
#define N
llvm::FastISel::fastEmitInst_rrr
Register fastEmitInst_rrr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, unsigned Op1, unsigned Op2)
Emit a MachineInstr with three register operands and a result register in the given register class.
Definition: FastISel.cpp:1969
llvm::Instruction::getParent
const BasicBlock * getParent() const
Definition: Instruction.h:91
llvm::ISD::SRL
@ SRL
Definition: ISDOpcodes.h:693
llvm::FastISel::removeDeadCode
void removeDeadCode(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E)
Remove all dead instructions between the I and E.
Definition: FastISel.cpp:415
llvm::CmpInst::getPredicate
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:809
llvm::FastISel::CallLoweringInfo::setTailCall
CallLoweringInfo & setTailCall(bool Value=true)
Definition: FastISel.h:176
llvm::TargetLoweringBase::getRegClassFor
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
Definition: TargetLowering.h:891
llvm::isInTailCallPosition
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition: Analysis.cpp:522
llvm::PHINode
Definition: Instructions.h:2664
llvm::BasicBlock::getTerminator
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:119
llvm::MachineRegisterInfo::constrainRegClass
const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
Definition: MachineRegisterInfo.cpp:83
llvm::MachineRegisterInfo::defusechain_iterator
reg_begin/reg_end - Provide iteration support to walk over all definitions and uses of a register wit...
Definition: MachineRegisterInfo.h:274
llvm::MachineInstrBuilder::addMetadata
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
Definition: MachineInstrBuilder.h:236
MachineMemOperand.h
llvm::CmpInst::FCMP_OLE
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:726
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
llvm::sys::path::rend
reverse_iterator rend(StringRef path)
Get reverse end iterator over path.
Definition: Path.cpp:306
MachineOperand.h
DerivedTypes.h
TM
const char LLVMTargetMachineRef TM
Definition: PassBuilderBindings.cpp:47
llvm::EVT::bitsGT
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition: ValueTypes.h:256
llvm::MVT::i16
@ i16
Definition: MachineValueType.h:45
llvm::IntegerType::get
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:311
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition: Instructions.h:1474
llvm::FastISel::TRI
const TargetRegisterInfo & TRI
Definition: FastISel.h:212
llvm::ISD::FNEG
@ FNEG
Perform various unary floating-point operations inspired by libm.
Definition: ISDOpcodes.h:904
llvm::FastISel::selectCall
bool selectCall(const User *I)
Definition: FastISel.cpp:1156
llvm::ISD::ArgFlagsTy::setPreallocated
void setPreallocated()
Definition: TargetCallingConv.h:95
GEP
Hexagon Common GEP
Definition: HexagonCommonGEP.cpp:172
llvm::FunctionLoweringInfo::PHINodesToUpdate
std::vector< std::pair< MachineInstr *, unsigned > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
Definition: FunctionLoweringInfo.h:178
llvm::MCInstrInfo::get
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
llvm::PatchPointOpers::IDPos
@ IDPos
Definition: StackMaps.h:79
LLVMContext.h
llvm::OptimizedStructLayoutField
A field in a structure.
Definition: OptimizedStructLayout.h:45
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
selectBinaryOp
static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
Select the AArch64 opcode for the basic binary operation GenericOpc (such as G_OR or G_SDIV),...
Definition: AArch64InstructionSelector.cpp:750
llvm::AMDGPU::HSAMD::Kernel::Key::Args
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Definition: AMDGPUMetadata.h:394
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::FastISel::optimizeCmpPredicate
CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const
Definition: FastISel.cpp:2315
llvm::PatchPointOpers::CCPos
@ CCPos
Definition: StackMaps.h:79
llvm::FastISel::CallLoweringInfo::setCallee
CallLoweringInfo & setCallee(Type *ResultTy, FunctionType *FuncTy, const Value *Target, ArgListTy &&ArgsList, const CallBase &Call)
Definition: FastISel.h:103
llvm::TargetLoweringBase::getValueType
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
Definition: TargetLowering.h:1459
llvm::FastISel::selectBitCast
bool selectBitCast(const User *I)
Definition: FastISel.cpp:1408
llvm::BranchInst
Conditional or Unconditional Branch instruction.
Definition: Instructions.h:3099
llvm::ISD::SIGN_EXTEND
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:760
raw_ostream.h
llvm::FastISel::TII
const TargetInstrInfo & TII
Definition: FastISel.h:210
llvm::FastISel::fastEmitInst_extractsubreg
Register fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, uint32_t Idx)
Emit a MachineInstr for an extract_subreg from a specified index of a superregister to a specified ty...
Definition: FastISel.cpp:2101
llvm::FastISel::lookUpRegForValue
Register lookUpRegForValue(const Value *V)
Look up the value to see if its value is already cached in a register.
Definition: FastISel.cpp:351
llvm::FastISel::finishCondBranch
void finishCondBranch(const BasicBlock *BranchBB, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB)
Emit an unconditional branch to FalseMBB, obtains the branch weight and adds TrueMBB and FalseMBB to ...
Definition: FastISel.cpp:1581
llvm::CmpInst::FCMP_TRUE
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:736
llvm::MachineInstr::eraseFromParent
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
Definition: MachineInstr.cpp:650
llvm::MachineInstrBundleIterator< MachineInstr >
llvm::isPowerOf2_64
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:496
Value.h
llvm::FastISel::fastEmitInst_rr
Register fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, unsigned Op1)
Emit a MachineInstr with two register operands and a result register in the given register class.
Definition: FastISel.cpp:1946
llvm::ISD::ArgFlagsTy::setSwiftAsync
void setSwiftAsync()
Definition: TargetCallingConv.h:101
llvm::MCInstrDesc::getNumOperands
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:230
llvm::ExtractValueInst::getIndices
ArrayRef< unsigned > getIndices() const
Definition: Instructions.h:2476
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::FastISel::DL
const DataLayout & DL
Definition: FastISel.h:209
llvm::FastISel::selectBinaryOp
bool selectBinaryOp(const User *I, unsigned ISDOpcode)
Select and emit code for a binary operator instruction, which has an opcode which directly correspond...
Definition: FastISel.cpp:449
Debug.h
llvm::TargetRegisterInfo::getCallPreservedMask
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Return a mask of call-preserved registers for the given calling convention on the current function.
Definition: TargetRegisterInfo.h:480
llvm::FastISel::recomputeInsertPt
void recomputeInsertPt()
Reset InsertPt to prepare for inserting instructions into the current block.
Definition: FastISel.cpp:401
llvm::FastISel::startNewBlock
void startNewBlock()
Set the current block to which generated machine instructions will be appended.
Definition: FastISel.cpp:123
llvm::CmpInst::FCMP_ORD
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:728
llvm::BranchInst::getSuccessor
BasicBlock * getSuccessor(unsigned i) const
Definition: Instructions.h:3192
llvm::ISD::ArgFlagsTy::setZExt
void setZExt()
Definition: TargetCallingConv.h:74
llvm::FastISel::constrainOperandRegClass
Register constrainOperandRegClass(const MCInstrDesc &II, Register Op, unsigned OpNum)
Try to constrain Op so that it is usable by argument OpNum of the provided MCInstrDesc.
Definition: FastISel.cpp:1900
llvm::FastISel::fastEmitInst_r
Register fastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0)
Emit a MachineInstr with one register operand and a result register in the given register class.
Definition: FastISel.cpp:1926
llvm::FastISel::fastEmitInst_i
Register fastEmitInst_i(unsigned MachineInstOpcode, const TargetRegisterClass *RC, uint64_t Imm)
Emit a MachineInstr with a single immediate operand, and a result register in the given register clas...
Definition: FastISel.cpp:2085
llvm::CmpInst::FCMP_UEQ
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:730
llvm::FunctionType
Class to represent function types.
Definition: DerivedTypes.h:103
llvm::MCInstrDesc::ImplicitDefs
const MCPhysReg * ImplicitDefs
Definition: MCInstrDesc.h:207
llvm::EVT::getSimpleVT
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:288
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
llvm::ISD::FDIV
@ FDIV
Definition: ISDOpcodes.h:393