LLVM 22.0.0git
InstrEmitter.cpp
Go to the documentation of this file.
1//==--- InstrEmitter.cpp - Emit MachineInstrs for the SelectionDAG class ---==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements the Emit routines for the SelectionDAG class, which creates
10// MachineInstrs based on the decisions of the SelectionDAG instruction
11// selection.
12//
13//===----------------------------------------------------------------------===//
14
15#include "InstrEmitter.h"
16#include "SDNodeDbgValue.h"
27#include "llvm/IR/PseudoProbe.h"
30using namespace llvm;
31
32#define DEBUG_TYPE "instr-emitter"
33
34/// MinRCSize - Smallest register class we allow when constraining virtual
35/// registers. If satisfying all register class constraints would require
36/// using a smaller register class, emit a COPY to a new virtual register
37/// instead.
38const unsigned MinRCSize = 4;
39
40/// CountResults - The results of target nodes have register or immediate
41/// operands first, then an optional chain, and optional glue operands (which do
42/// not go into the resulting MachineInstr).
44 unsigned N = Node->getNumValues();
45 while (N && Node->getValueType(N - 1) == MVT::Glue)
46 --N;
47 if (N && Node->getValueType(N - 1) == MVT::Other)
48 --N; // Skip over chain result.
49 return N;
50}
51
52/// countOperands - The inputs to target nodes have any actual inputs first,
53/// followed by an optional chain operand, then an optional glue operand.
54/// Compute the number of actual operands that will go into the resulting
55/// MachineInstr.
56///
57/// Also count physreg RegisterSDNode and RegisterMaskSDNode operands preceding
58/// the chain and glue. These operands may be implicit on the machine instr.
59static unsigned countOperands(SDNode *Node, unsigned NumExpUses,
60 unsigned &NumImpUses) {
61 unsigned N = Node->getNumOperands();
62 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
63 --N;
64 if (N && Node->getOperand(N - 1).getValueType() == MVT::Other)
65 --N; // Ignore chain if it exists.
66
67 // Count RegisterSDNode and RegisterMaskSDNode operands for NumImpUses.
68 NumImpUses = N - NumExpUses;
69 for (unsigned I = N; I > NumExpUses; --I) {
70 if (isa<RegisterMaskSDNode>(Node->getOperand(I - 1)))
71 continue;
72 if (RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Node->getOperand(I - 1)))
73 if (RN->getReg().isPhysical())
74 continue;
75 NumImpUses = N - I;
76 break;
77 }
78
79 return N;
80}
81
82/// EmitCopyFromReg - Generate machine code for an CopyFromReg node or an
83/// implicit physical register output.
84void InstrEmitter::EmitCopyFromReg(SDValue Op, bool IsClone, Register SrcReg,
85 VRBaseMapType &VRBaseMap) {
86 Register VRBase;
87 if (SrcReg.isVirtual()) {
88 // Just use the input register directly!
89 if (IsClone)
90 VRBaseMap.erase(Op);
91 bool isNew = VRBaseMap.insert(std::make_pair(Op, SrcReg)).second;
92 (void)isNew; // Silence compiler warning.
93 assert(isNew && "Node emitted out of order - early");
94 return;
95 }
96
97 // If the node is only used by a CopyToReg and the dest reg is a vreg, use
98 // the CopyToReg'd destination register instead of creating a new vreg.
99 bool MatchReg = true;
100 const TargetRegisterClass *UseRC = nullptr;
101 MVT VT = Op.getSimpleValueType();
102
103 // Stick to the preferred register classes for legal types.
104 if (TLI->isTypeLegal(VT))
105 UseRC = TLI->getRegClassFor(VT, Op->isDivergent());
106
107 for (SDNode *User : Op->users()) {
108 bool Match = true;
109 if (User->getOpcode() == ISD::CopyToReg && User->getOperand(2) == Op) {
110 Register DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
111 if (DestReg.isVirtual()) {
112 VRBase = DestReg;
113 Match = false;
114 } else if (DestReg != SrcReg)
115 Match = false;
116 } else {
117 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
118 if (User->getOperand(i) != Op)
119 continue;
120 if (VT == MVT::Other || VT == MVT::Glue)
121 continue;
122 Match = false;
123 if (User->isMachineOpcode()) {
124 const MCInstrDesc &II = TII->get(User->getMachineOpcode());
125 const TargetRegisterClass *RC = nullptr;
126 if (i + II.getNumDefs() < II.getNumOperands()) {
127 RC = TRI->getAllocatableClass(
128 TII->getRegClass(II, i + II.getNumDefs(), TRI));
129 }
130 if (!UseRC)
131 UseRC = RC;
132 else if (RC) {
133 const TargetRegisterClass *ComRC =
134 TRI->getCommonSubClass(UseRC, RC);
135 // If multiple uses expect disjoint register classes, we emit
136 // copies in AddRegisterOperand.
137 if (ComRC)
138 UseRC = ComRC;
139 }
140 }
141 }
142 }
143 MatchReg &= Match;
144 if (VRBase)
145 break;
146 }
147
148 const TargetRegisterClass *SrcRC = nullptr, *DstRC = nullptr;
149 SrcRC = TRI->getMinimalPhysRegClass(SrcReg, VT);
150
151 // Figure out the register class to create for the destreg.
152 if (VRBase) {
153 DstRC = MRI->getRegClass(VRBase);
154 } else if (UseRC) {
155 assert(TRI->isTypeLegalForClass(*UseRC, VT) &&
156 "Incompatible phys register def and uses!");
157 DstRC = UseRC;
158 } else
159 DstRC = SrcRC;
160
161 // If all uses are reading from the src physical register and copying the
162 // register is either impossible or very expensive, then don't create a copy.
163 if (MatchReg && SrcRC->expensiveOrImpossibleToCopy()) {
164 VRBase = SrcReg;
165 } else {
166 // Create the reg, emit the copy.
167 VRBase = MRI->createVirtualRegister(DstRC);
168 BuildMI(*MBB, InsertPos, Op.getDebugLoc(), TII->get(TargetOpcode::COPY),
169 VRBase)
170 .addReg(SrcReg);
171 }
172
173 if (IsClone)
174 VRBaseMap.erase(Op);
175 bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
176 (void)isNew; // Silence compiler warning.
177 assert(isNew && "Node emitted out of order - early");
178}
179
180void InstrEmitter::CreateVirtualRegisters(SDNode *Node,
182 const MCInstrDesc &II,
183 bool IsClone, bool IsCloned,
184 VRBaseMapType &VRBaseMap) {
185 assert(Node->getMachineOpcode() != TargetOpcode::IMPLICIT_DEF &&
186 "IMPLICIT_DEF should have been handled as a special case elsewhere!");
187
188 unsigned NumResults = CountResults(Node);
189 bool HasVRegVariadicDefs = !MF->getTarget().usesPhysRegsForValues() &&
190 II.isVariadic() && II.variadicOpsAreDefs();
191 unsigned NumVRegs = HasVRegVariadicDefs ? NumResults : II.getNumDefs();
192 if (Node->getMachineOpcode() == TargetOpcode::STATEPOINT)
193 NumVRegs = NumResults;
194 for (unsigned i = 0; i < NumVRegs; ++i) {
195 // If the specific node value is only used by a CopyToReg and the dest reg
196 // is a vreg in the same register class, use the CopyToReg'd destination
197 // register instead of creating a new vreg.
198 Register VRBase;
199 const TargetRegisterClass *RC =
200 TRI->getAllocatableClass(TII->getRegClass(II, i, TRI));
201 // Always let the value type influence the used register class. The
202 // constraints on the instruction may be too lax to represent the value
203 // type correctly. For example, a 64-bit float (X86::FR64) can't live in
204 // the 32-bit float super-class (X86::FR32).
205 if (i < NumResults && TLI->isTypeLegal(Node->getSimpleValueType(i))) {
206 const TargetRegisterClass *VTRC = TLI->getRegClassFor(
207 Node->getSimpleValueType(i),
208 (Node->isDivergent() || (RC && TRI->isDivergentRegClass(RC))));
209 if (RC)
210 VTRC = TRI->getCommonSubClass(RC, VTRC);
211 if (VTRC)
212 RC = VTRC;
213 }
214
215 if (!II.operands().empty() && II.operands()[i].isOptionalDef()) {
216 // Optional def must be a physical register.
217 VRBase = cast<RegisterSDNode>(Node->getOperand(i-NumResults))->getReg();
218 assert(VRBase.isPhysical());
219 MIB.addReg(VRBase, RegState::Define);
220 }
221
222 if (!VRBase && !IsClone && !IsCloned)
223 for (SDNode *User : Node->users()) {
224 if (User->getOpcode() == ISD::CopyToReg &&
225 User->getOperand(2).getNode() == Node &&
226 User->getOperand(2).getResNo() == i) {
227 Register Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
228 if (Reg.isVirtual()) {
229 const TargetRegisterClass *RegRC = MRI->getRegClass(Reg);
230 if (RegRC == RC) {
231 VRBase = Reg;
232 MIB.addReg(VRBase, RegState::Define);
233 break;
234 }
235 }
236 }
237 }
238
239 // Create the result registers for this node and add the result regs to
240 // the machine instruction.
241 if (!VRBase) {
242 assert(RC && "Isn't a register operand!");
243 VRBase = MRI->createVirtualRegister(RC);
244 MIB.addReg(VRBase, RegState::Define);
245 }
246
247 // If this def corresponds to a result of the SDNode insert the VRBase into
248 // the lookup map.
249 if (i < NumResults) {
250 SDValue Op(Node, i);
251 if (IsClone)
252 VRBaseMap.erase(Op);
253 bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
254 (void)isNew; // Silence compiler warning.
255 assert(isNew && "Node emitted out of order - early");
256 }
257 }
258}
259
260/// getVR - Return the virtual register corresponding to the specified result
261/// of the specified node.
262Register InstrEmitter::getVR(SDValue Op, VRBaseMapType &VRBaseMap) {
263 if (Op.isMachineOpcode() &&
264 Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) {
265 // Add an IMPLICIT_DEF instruction before every use.
266 // IMPLICIT_DEF can produce any type of result so its MCInstrDesc
267 // does not include operand register class info.
268 const TargetRegisterClass *RC = TLI->getRegClassFor(
269 Op.getSimpleValueType(), Op.getNode()->isDivergent());
270 Register VReg = MRI->createVirtualRegister(RC);
271 BuildMI(*MBB, InsertPos, Op.getDebugLoc(),
272 TII->get(TargetOpcode::IMPLICIT_DEF), VReg);
273 return VReg;
274 }
275
276 VRBaseMapType::iterator I = VRBaseMap.find(Op);
277 assert(I != VRBaseMap.end() && "Node emitted out of order - late");
278 return I->second;
279}
280
282 if (Op->isMachineOpcode()) {
283 switch (Op->getMachineOpcode()) {
284 case TargetOpcode::CONVERGENCECTRL_ANCHOR:
285 case TargetOpcode::CONVERGENCECTRL_ENTRY:
286 case TargetOpcode::CONVERGENCECTRL_LOOP:
287 case TargetOpcode::CONVERGENCECTRL_GLUE:
288 return true;
289 }
290 return false;
291 }
292
293 // We can reach here when CopyFromReg is encountered. But rather than making a
294 // special case for that, we just make sure we don't reach here in some
295 // surprising way.
296 switch (Op->getOpcode()) {
297 case ISD::CONVERGENCECTRL_ANCHOR:
298 case ISD::CONVERGENCECTRL_ENTRY:
299 case ISD::CONVERGENCECTRL_LOOP:
300 case ISD::CONVERGENCECTRL_GLUE:
301 llvm_unreachable("Convergence control should have been selected by now.");
302 }
303 return false;
304}
305
306/// AddRegisterOperand - Add the specified register as an operand to the
307/// specified machine instr. Insert register copies if the register is
308/// not in the required register class.
309void
310InstrEmitter::AddRegisterOperand(MachineInstrBuilder &MIB,
311 SDValue Op,
312 unsigned IIOpNum,
313 const MCInstrDesc *II,
314 VRBaseMapType &VRBaseMap,
315 bool IsDebug, bool IsClone, bool IsCloned) {
316 assert(Op.getValueType() != MVT::Other &&
317 Op.getValueType() != MVT::Glue &&
318 "Chain and glue operands should occur at end of operand list!");
319 // Get/emit the operand.
320 Register VReg = getVR(Op, VRBaseMap);
321
322 const MCInstrDesc &MCID = MIB->getDesc();
323 bool isOptDef = IIOpNum < MCID.getNumOperands() &&
324 MCID.operands()[IIOpNum].isOptionalDef();
325
326 // If the instruction requires a register in a different class, create
327 // a new virtual register and copy the value into it, but first attempt to
328 // shrink VReg's register class within reason. For example, if VReg == GR32
329 // and II requires a GR32_NOSP, just constrain VReg to GR32_NOSP.
330 if (II) {
331 const TargetRegisterClass *OpRC = nullptr;
332 if (IIOpNum < II->getNumOperands())
333 OpRC = TII->getRegClass(*II, IIOpNum, TRI);
334
335 if (OpRC) {
336 unsigned MinNumRegs = MinRCSize;
337 // Don't apply any RC size limit for IMPLICIT_DEF. Each use has a unique
338 // virtual register.
339 if (Op.isMachineOpcode() &&
340 Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF)
341 MinNumRegs = 0;
342
343 const TargetRegisterClass *ConstrainedRC
344 = MRI->constrainRegClass(VReg, OpRC, MinNumRegs);
345 if (!ConstrainedRC) {
346 OpRC = TRI->getAllocatableClass(OpRC);
347 assert(OpRC && "Constraints cannot be fulfilled for allocation");
348 Register NewVReg = MRI->createVirtualRegister(OpRC);
349 BuildMI(*MBB, InsertPos, MIB->getDebugLoc(),
350 TII->get(TargetOpcode::COPY), NewVReg)
351 .addReg(VReg);
352 VReg = NewVReg;
353 } else {
354 assert(ConstrainedRC->isAllocatable() &&
355 "Constraining an allocatable VReg produced an unallocatable class?");
356 }
357 }
358 }
359
360 // If this value has only one use, that use is a kill. This is a
361 // conservative approximation. InstrEmitter does trivial coalescing
362 // with CopyFromReg nodes, so don't emit kill flags for them.
363 // Avoid kill flags on Schedule cloned nodes, since there will be
364 // multiple uses.
365 // Tied operands are never killed, so we need to check that. And that
366 // means we need to determine the index of the operand.
367 // Don't kill convergence control tokens. Initially they are only used in glue
368 // nodes, and the InstrEmitter later adds implicit uses on the users of the
369 // glue node. This can sometimes make it seem like there is only one use,
370 // which is the glue node itself.
371 bool isKill = Op.hasOneUse() && !isConvergenceCtrlMachineOp(Op) &&
372 Op.getNode()->getOpcode() != ISD::CopyFromReg && !IsDebug &&
373 !(IsClone || IsCloned);
374 if (isKill) {
375 unsigned Idx = MIB->getNumOperands();
376 while (Idx > 0 &&
377 MIB->getOperand(Idx-1).isReg() &&
378 MIB->getOperand(Idx-1).isImplicit())
379 --Idx;
380 bool isTied = MCID.getOperandConstraint(Idx, MCOI::TIED_TO) != -1;
381 if (isTied)
382 isKill = false;
383 }
384
385 MIB.addReg(VReg, getDefRegState(isOptDef) | getKillRegState(isKill) |
386 getDebugRegState(IsDebug));
387}
388
389/// AddOperand - Add the specified operand to the specified machine instr. II
390/// specifies the instruction information for the node, and IIOpNum is the
391/// operand number (in the II) that we are adding.
392void InstrEmitter::AddOperand(MachineInstrBuilder &MIB, SDValue Op,
393 unsigned IIOpNum, const MCInstrDesc *II,
394 VRBaseMapType &VRBaseMap, bool IsDebug,
395 bool IsClone, bool IsCloned) {
396 if (Op.isMachineOpcode()) {
397 AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap,
398 IsDebug, IsClone, IsCloned);
399 } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
400 if (C->getAPIntValue().getSignificantBits() <= 64) {
401 MIB.addImm(C->getSExtValue());
402 } else {
403 MIB.addCImm(
404 ConstantInt::get(MF->getFunction().getContext(), C->getAPIntValue()));
405 }
406 } else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Op)) {
407 MIB.addFPImm(F->getConstantFPValue());
408 } else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) {
409 Register VReg = R->getReg();
410 MVT OpVT = Op.getSimpleValueType();
411 const TargetRegisterClass *IIRC =
412 II ? TRI->getAllocatableClass(TII->getRegClass(*II, IIOpNum, TRI))
413 : nullptr;
414 const TargetRegisterClass *OpRC =
415 TLI->isTypeLegal(OpVT)
416 ? TLI->getRegClassFor(OpVT,
417 Op.getNode()->isDivergent() ||
418 (IIRC && TRI->isDivergentRegClass(IIRC)))
419 : nullptr;
420
421 if (OpRC && IIRC && OpRC != IIRC && VReg.isVirtual()) {
422 Register NewVReg = MRI->createVirtualRegister(IIRC);
423 BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(),
424 TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg);
425 VReg = NewVReg;
426 }
427 // Turn additional physreg operands into implicit uses on non-variadic
428 // instructions. This is used by call and return instructions passing
429 // arguments in registers.
430 bool Imp = II && (IIOpNum >= II->getNumOperands() && !II->isVariadic());
431 MIB.addReg(VReg, getImplRegState(Imp));
432 } else if (RegisterMaskSDNode *RM = dyn_cast<RegisterMaskSDNode>(Op)) {
433 MIB.addRegMask(RM->getRegMask());
434 } else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Op)) {
435 MIB.addGlobalAddress(TGA->getGlobal(), TGA->getOffset(),
436 TGA->getTargetFlags());
437 } else if (BasicBlockSDNode *BBNode = dyn_cast<BasicBlockSDNode>(Op)) {
438 MIB.addMBB(BBNode->getBasicBlock());
439 } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
440 MIB.addFrameIndex(FI->getIndex());
441 } else if (JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Op)) {
442 MIB.addJumpTableIndex(JT->getIndex(), JT->getTargetFlags());
443 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) {
444 int Offset = CP->getOffset();
445 Align Alignment = CP->getAlign();
446
447 unsigned Idx;
448 MachineConstantPool *MCP = MF->getConstantPool();
449 if (CP->isMachineConstantPoolEntry())
450 Idx = MCP->getConstantPoolIndex(CP->getMachineCPVal(), Alignment);
451 else
452 Idx = MCP->getConstantPoolIndex(CP->getConstVal(), Alignment);
453 MIB.addConstantPoolIndex(Idx, Offset, CP->getTargetFlags());
454 } else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) {
455 MIB.addExternalSymbol(ES->getSymbol(), ES->getTargetFlags());
456 } else if (auto *SymNode = dyn_cast<MCSymbolSDNode>(Op)) {
457 MIB.addSym(SymNode->getMCSymbol());
458 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) {
459 MIB.addBlockAddress(BA->getBlockAddress(),
460 BA->getOffset(),
461 BA->getTargetFlags());
462 } else if (TargetIndexSDNode *TI = dyn_cast<TargetIndexSDNode>(Op)) {
463 MIB.addTargetIndex(TI->getIndex(), TI->getOffset(), TI->getTargetFlags());
464 } else {
465 assert(Op.getValueType() != MVT::Other &&
466 Op.getValueType() != MVT::Glue &&
467 "Chain and glue operands should occur at end of operand list!");
468 AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap,
469 IsDebug, IsClone, IsCloned);
470 }
471}
472
473Register InstrEmitter::ConstrainForSubReg(Register VReg, unsigned SubIdx,
474 MVT VT, bool isDivergent, const DebugLoc &DL) {
475 const TargetRegisterClass *VRC = MRI->getRegClass(VReg);
476 const TargetRegisterClass *RC = TRI->getSubClassWithSubReg(VRC, SubIdx);
477
478 // RC is a sub-class of VRC that supports SubIdx. Try to constrain VReg
479 // within reason.
480 if (RC && RC != VRC)
481 RC = MRI->constrainRegClass(VReg, RC, MinRCSize);
482
483 // VReg has been adjusted. It can be used with SubIdx operands now.
484 if (RC)
485 return VReg;
486
487 // VReg couldn't be reasonably constrained. Emit a COPY to a new virtual
488 // register instead.
489 RC = TRI->getSubClassWithSubReg(TLI->getRegClassFor(VT, isDivergent), SubIdx);
490 assert(RC && "No legal register class for VT supports that SubIdx");
491 Register NewReg = MRI->createVirtualRegister(RC);
492 BuildMI(*MBB, InsertPos, DL, TII->get(TargetOpcode::COPY), NewReg)
493 .addReg(VReg);
494 return NewReg;
495}
496
497/// EmitSubregNode - Generate machine code for subreg nodes.
498///
499void InstrEmitter::EmitSubregNode(SDNode *Node, VRBaseMapType &VRBaseMap,
500 bool IsClone, bool IsCloned) {
501 Register VRBase;
502 unsigned Opc = Node->getMachineOpcode();
503
504 // If the node is only used by a CopyToReg and the dest reg is a vreg, use
505 // the CopyToReg'd destination register instead of creating a new vreg.
506 for (SDNode *User : Node->users()) {
507 if (User->getOpcode() == ISD::CopyToReg &&
508 User->getOperand(2).getNode() == Node) {
509 Register DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
510 if (DestReg.isVirtual()) {
511 VRBase = DestReg;
512 break;
513 }
514 }
515 }
516
517 if (Opc == TargetOpcode::EXTRACT_SUBREG) {
518 // EXTRACT_SUBREG is lowered as %dst = COPY %src:sub. There are no
519 // constraints on the %dst register, COPY can target all legal register
520 // classes.
521 unsigned SubIdx = Node->getConstantOperandVal(1);
522 const TargetRegisterClass *TRC =
523 TLI->getRegClassFor(Node->getSimpleValueType(0), Node->isDivergent());
524
526 MachineInstr *DefMI;
527 RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(0));
528 if (R && R->getReg().isPhysical()) {
529 Reg = R->getReg();
530 DefMI = nullptr;
531 } else {
532 Reg = R ? R->getReg() : getVR(Node->getOperand(0), VRBaseMap);
533 DefMI = MRI->getVRegDef(Reg);
534 }
535
536 Register SrcReg, DstReg;
537 unsigned DefSubIdx;
538 if (DefMI &&
539 TII->isCoalescableExtInstr(*DefMI, SrcReg, DstReg, DefSubIdx) &&
540 SubIdx == DefSubIdx &&
541 TRC == MRI->getRegClass(SrcReg)) {
542 // Optimize these:
543 // r1025 = s/zext r1024, 4
544 // r1026 = extract_subreg r1025, 4
545 // to a copy
546 // r1026 = copy r1024
547 VRBase = MRI->createVirtualRegister(TRC);
548 BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
549 TII->get(TargetOpcode::COPY), VRBase).addReg(SrcReg);
550 MRI->clearKillFlags(SrcReg);
551 } else {
552 // Reg may not support a SubIdx sub-register, and we may need to
553 // constrain its register class or issue a COPY to a compatible register
554 // class.
555 if (Reg.isVirtual())
556 Reg = ConstrainForSubReg(Reg, SubIdx,
557 Node->getOperand(0).getSimpleValueType(),
558 Node->isDivergent(), Node->getDebugLoc());
559 // Create the destreg if it is missing.
560 if (!VRBase)
561 VRBase = MRI->createVirtualRegister(TRC);
562
563 // Create the extract_subreg machine instruction.
564 MachineInstrBuilder CopyMI =
565 BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
566 TII->get(TargetOpcode::COPY), VRBase);
567 if (Reg.isVirtual())
568 CopyMI.addReg(Reg, 0, SubIdx);
569 else
570 CopyMI.addReg(TRI->getSubReg(Reg, SubIdx));
571 }
572 } else if (Opc == TargetOpcode::INSERT_SUBREG ||
573 Opc == TargetOpcode::SUBREG_TO_REG) {
574 SDValue N0 = Node->getOperand(0);
575 SDValue N1 = Node->getOperand(1);
576 SDValue N2 = Node->getOperand(2);
577 unsigned SubIdx = N2->getAsZExtVal();
578
579 // Figure out the register class to create for the destreg. It should be
580 // the largest legal register class supporting SubIdx sub-registers.
581 // RegisterCoalescer will constrain it further if it decides to eliminate
582 // the INSERT_SUBREG instruction.
583 //
584 // %dst = INSERT_SUBREG %src, %sub, SubIdx
585 //
586 // is lowered by TwoAddressInstructionPass to:
587 //
588 // %dst = COPY %src
589 // %dst:SubIdx = COPY %sub
590 //
591 // There is no constraint on the %src register class.
592 //
593 const TargetRegisterClass *SRC =
594 TLI->getRegClassFor(Node->getSimpleValueType(0), Node->isDivergent());
595 SRC = TRI->getSubClassWithSubReg(SRC, SubIdx);
596 assert(SRC && "No register class supports VT and SubIdx for INSERT_SUBREG");
597
598 if (VRBase == 0 || !SRC->hasSubClassEq(MRI->getRegClass(VRBase)))
599 VRBase = MRI->createVirtualRegister(SRC);
600
601 // Create the insert_subreg or subreg_to_reg machine instruction.
602 MachineInstrBuilder MIB =
603 BuildMI(*MF, Node->getDebugLoc(), TII->get(Opc), VRBase);
604
605 // If creating a subreg_to_reg, then the first input operand
606 // is an implicit value immediate, otherwise it's a register
607 if (Opc == TargetOpcode::SUBREG_TO_REG) {
608 const ConstantSDNode *SD = cast<ConstantSDNode>(N0);
609 MIB.addImm(SD->getZExtValue());
610 } else
611 AddOperand(MIB, N0, 0, nullptr, VRBaseMap, /*IsDebug=*/false,
612 IsClone, IsCloned);
613 // Add the subregister being inserted
614 AddOperand(MIB, N1, 0, nullptr, VRBaseMap, /*IsDebug=*/false,
615 IsClone, IsCloned);
616 MIB.addImm(SubIdx);
617 MBB->insert(InsertPos, MIB);
618 } else
619 llvm_unreachable("Node is not insert_subreg, extract_subreg, or subreg_to_reg");
620
621 SDValue Op(Node, 0);
622 bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
623 (void)isNew; // Silence compiler warning.
624 assert(isNew && "Node emitted out of order - early");
625}
626
627/// EmitCopyToRegClassNode - Generate machine code for COPY_TO_REGCLASS nodes.
628/// COPY_TO_REGCLASS is just a normal copy, except that the destination
629/// register is constrained to be in a particular register class.
630///
631void
632InstrEmitter::EmitCopyToRegClassNode(SDNode *Node,
633 VRBaseMapType &VRBaseMap) {
634 // Create the new VReg in the destination class and emit a copy.
635 unsigned DstRCIdx = Node->getConstantOperandVal(1);
636 const TargetRegisterClass *DstRC =
637 TRI->getAllocatableClass(TRI->getRegClass(DstRCIdx));
638 Register NewVReg = MRI->createVirtualRegister(DstRC);
639 const MCInstrDesc &II = TII->get(TargetOpcode::COPY);
640 MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II, NewVReg);
641 AddOperand(MIB, Node->getOperand(0), 1, &II, VRBaseMap, /*IsDebug=*/false,
642 /*IsClone=*/false, /*IsCloned*/ false);
643
644 MBB->insert(InsertPos, MIB);
645 SDValue Op(Node, 0);
646 bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
647 (void)isNew; // Silence compiler warning.
648 assert(isNew && "Node emitted out of order - early");
649}
650
651/// EmitRegSequence - Generate machine code for REG_SEQUENCE nodes.
652///
653void InstrEmitter::EmitRegSequence(SDNode *Node, VRBaseMapType &VRBaseMap,
654 bool IsClone, bool IsCloned) {
655 unsigned DstRCIdx = Node->getConstantOperandVal(0);
656 const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
657 Register NewVReg = MRI->createVirtualRegister(TRI->getAllocatableClass(RC));
658 const MCInstrDesc &II = TII->get(TargetOpcode::REG_SEQUENCE);
659 MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II, NewVReg);
660 unsigned NumOps = Node->getNumOperands();
661 // If the input pattern has a chain, then the root of the corresponding
662 // output pattern will get a chain as well. This can happen to be a
663 // REG_SEQUENCE (which is not "guarded" by countOperands/CountResults).
664 if (NumOps && Node->getOperand(NumOps-1).getValueType() == MVT::Other)
665 --NumOps; // Ignore chain if it exists.
666
667 assert((NumOps & 1) == 1 &&
668 "REG_SEQUENCE must have an odd number of operands!");
669 for (unsigned i = 1; i != NumOps; ++i) {
670 SDValue Op = Node->getOperand(i);
671 if ((i & 1) == 0) {
672 RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(i-1));
673 // Skip physical registers as they don't have a vreg to get and we'll
674 // insert copies for them in TwoAddressInstructionPass anyway.
675 if (!R || !R->getReg().isPhysical()) {
676 unsigned SubIdx = Op->getAsZExtVal();
677 Register SubReg = getVR(Node->getOperand(i - 1), VRBaseMap);
678 const TargetRegisterClass *TRC = MRI->getRegClass(SubReg);
679 const TargetRegisterClass *SRC =
680 TRI->getMatchingSuperRegClass(RC, TRC, SubIdx);
681 if (SRC && SRC != RC) {
682 MRI->setRegClass(NewVReg, SRC);
683 RC = SRC;
684 }
685 }
686 }
687 AddOperand(MIB, Op, i+1, &II, VRBaseMap, /*IsDebug=*/false,
688 IsClone, IsCloned);
689 }
690
691 MBB->insert(InsertPos, MIB);
692 SDValue Op(Node, 0);
693 bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
694 (void)isNew; // Silence compiler warning.
695 assert(isNew && "Node emitted out of order - early");
696}
697
698/// EmitDbgValue - Generate machine instruction for a dbg_value node.
699///
702 VRBaseMapType &VRBaseMap) {
703 DebugLoc DL = SD->getDebugLoc();
705 ->isValidLocationForIntrinsic(DL) &&
706 "Expected inlined-at fields to agree");
707
708 SD->setIsEmitted();
709
710 assert(!SD->getLocationOps().empty() &&
711 "dbg_value with no location operands?");
712
713 if (SD->isInvalidated())
714 return EmitDbgNoLocation(SD);
715
716 // Attempt to produce a DBG_INSTR_REF if we've been asked to.
717 if (EmitDebugInstrRefs)
718 if (auto *InstrRef = EmitDbgInstrRef(SD, VRBaseMap))
719 return InstrRef;
720
721 // Emit variadic dbg_value nodes as DBG_VALUE_LIST if they have not been
722 // emitted as instruction references.
723 if (SD->isVariadic())
724 return EmitDbgValueList(SD, VRBaseMap);
725
726 // Emit single-location dbg_value nodes as DBG_VALUE if they have not been
727 // emitted as instruction references.
728 return EmitDbgValueFromSingleOp(SD, VRBaseMap);
729}
730
732 const Value *V = Op.getConst();
733 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
734 if (CI->getBitWidth() > 64)
736 if (CI->getBitWidth() == 1)
737 return MachineOperand::CreateImm(CI->getZExtValue());
738 return MachineOperand::CreateImm(CI->getSExtValue());
739 }
740 if (const ConstantFP *CF = dyn_cast<ConstantFP>(V))
742 // Note: This assumes that all nullptr constants are zero-valued.
745 // Undef or unhandled value type, so return an undef operand.
747 /* Reg */ 0U, /* isDef */ false, /* isImp */ false,
748 /* isKill */ false, /* isDead */ false,
749 /* isUndef */ false, /* isEarlyClobber */ false,
750 /* SubReg */ 0, /* isDebug */ true);
751}
752
754 MachineInstrBuilder &MIB, const MCInstrDesc &DbgValDesc,
755 ArrayRef<SDDbgOperand> LocationOps,
756 VRBaseMapType &VRBaseMap) {
757 for (const SDDbgOperand &Op : LocationOps) {
758 switch (Op.getKind()) {
760 MIB.addFrameIndex(Op.getFrameIx());
761 break;
763 MIB.addReg(Op.getVReg());
764 break;
766 SDValue V = SDValue(Op.getSDNode(), Op.getResNo());
767 // It's possible we replaced this SDNode with other(s) and therefore
768 // didn't generate code for it. It's better to catch these cases where
769 // they happen and transfer the debug info, but trying to guarantee that
770 // in all cases would be very fragile; this is a safeguard for any
771 // that were missed.
772 if (VRBaseMap.count(V) == 0)
773 MIB.addReg(0U); // undef
774 else
775 AddOperand(MIB, V, (*MIB).getNumOperands(), &DbgValDesc, VRBaseMap,
776 /*IsDebug=*/true, /*IsClone=*/false, /*IsCloned=*/false);
777 } break;
780 break;
781 }
782 }
783}
784
787 VRBaseMapType &VRBaseMap) {
788 MDNode *Var = SD->getVariable();
789 const DIExpression *Expr = SD->getExpression();
790 DebugLoc DL = SD->getDebugLoc();
791 const MCInstrDesc &RefII = TII->get(TargetOpcode::DBG_INSTR_REF);
792
793 // Returns true if the given operand is not a legal debug operand for a
794 // DBG_INSTR_REF.
795 auto IsInvalidOp = [](SDDbgOperand DbgOp) {
796 return DbgOp.getKind() == SDDbgOperand::FRAMEIX;
797 };
798 // Returns true if the given operand is not itself an instruction reference
799 // but is a legal debug operand for a DBG_INSTR_REF.
800 auto IsNonInstrRefOp = [](SDDbgOperand DbgOp) {
801 return DbgOp.getKind() == SDDbgOperand::CONST;
802 };
803
804 // If this variable location does not depend on any instructions or contains
805 // any stack locations, produce it as a standard debug value instead.
806 if (any_of(SD->getLocationOps(), IsInvalidOp) ||
807 all_of(SD->getLocationOps(), IsNonInstrRefOp)) {
808 if (SD->isVariadic())
809 return EmitDbgValueList(SD, VRBaseMap);
810 return EmitDbgValueFromSingleOp(SD, VRBaseMap);
811 }
812
813 // Immediately fold any indirectness from the LLVM-IR intrinsic into the
814 // expression:
815 if (SD->isIndirect())
816 Expr = DIExpression::append(Expr, dwarf::DW_OP_deref);
817 // If this is not already a variadic expression, it must be modified to become
818 // one.
819 if (!SD->isVariadic())
821
823
824 // It may not be immediately possible to identify the MachineInstr that
825 // defines a VReg, it can depend for example on the order blocks are
826 // emitted in. When this happens, or when further analysis is needed later,
827 // produce an instruction like this:
828 //
829 // DBG_INSTR_REF !123, !456, %0:gr64
830 //
831 // i.e., point the instruction at the vreg, and patch it up later in
832 // MachineFunction::finalizeDebugInstrRefs.
833 auto AddVRegOp = [&](Register VReg) {
835 /* Reg */ VReg, /* isDef */ false, /* isImp */ false,
836 /* isKill */ false, /* isDead */ false,
837 /* isUndef */ false, /* isEarlyClobber */ false,
838 /* SubReg */ 0, /* isDebug */ true));
839 };
840 unsigned OpCount = SD->getLocationOps().size();
841 for (unsigned OpIdx = 0; OpIdx < OpCount; ++OpIdx) {
842 SDDbgOperand DbgOperand = SD->getLocationOps()[OpIdx];
843
844 // Try to find both the defined register and the instruction defining it.
845 MachineInstr *DefMI = nullptr;
846 Register VReg;
847
848 if (DbgOperand.getKind() == SDDbgOperand::VREG) {
849 VReg = DbgOperand.getVReg();
850
851 // No definition means that block hasn't been emitted yet. Leave a vreg
852 // reference to be fixed later.
853 if (!MRI->hasOneDef(VReg)) {
854 AddVRegOp(VReg);
855 continue;
856 }
857
858 DefMI = &*MRI->def_instr_begin(VReg);
859 } else if (DbgOperand.getKind() == SDDbgOperand::SDNODE) {
860 // Look up the corresponding VReg for the given SDNode, if any.
861 SDNode *Node = DbgOperand.getSDNode();
862 SDValue Op = SDValue(Node, DbgOperand.getResNo());
863 VRBaseMapType::iterator I = VRBaseMap.find(Op);
864 // No VReg -> produce a DBG_VALUE $noreg instead.
865 if (I == VRBaseMap.end())
866 break;
867
868 // Try to pick out a defining instruction at this point.
869 VReg = getVR(Op, VRBaseMap);
870
871 // Again, if there's no instruction defining the VReg right now, fix it up
872 // later.
873 if (!MRI->hasOneDef(VReg)) {
874 AddVRegOp(VReg);
875 continue;
876 }
877
878 DefMI = &*MRI->def_instr_begin(VReg);
879 } else {
880 assert(DbgOperand.getKind() == SDDbgOperand::CONST);
881 MOs.push_back(GetMOForConstDbgOp(DbgOperand));
882 continue;
883 }
884
885 // Avoid copy like instructions: they don't define values, only move them.
886 // Leave a virtual-register reference until it can be fixed up later, to
887 // find the underlying value definition.
888 if (DefMI->isCopyLike() || TII->isCopyInstr(*DefMI)) {
889 AddVRegOp(VReg);
890 continue;
891 }
892
893 // Find the operand number which defines the specified VReg.
894 unsigned OperandIdx = 0;
895 for (const auto &MO : DefMI->operands()) {
896 if (MO.isReg() && MO.isDef() && MO.getReg() == VReg)
897 break;
898 ++OperandIdx;
899 }
900 assert(OperandIdx < DefMI->getNumOperands());
901
902 // Make the DBG_INSTR_REF refer to that instruction, and that operand.
903 unsigned InstrNum = DefMI->getDebugInstrNum();
904 MOs.push_back(MachineOperand::CreateDbgInstrRef(InstrNum, OperandIdx));
905 }
906
907 // If we haven't created a valid MachineOperand for every DbgOp, abort and
908 // produce an undef DBG_VALUE.
909 if (MOs.size() != OpCount)
910 return EmitDbgNoLocation(SD);
911
912 return BuildMI(*MF, DL, RefII, false, MOs, Var, Expr);
913}
914
916 // An invalidated SDNode must generate an undef DBG_VALUE: although the
917 // original value is no longer computed, earlier DBG_VALUEs live ranges
918 // must not leak into later code.
919 DIVariable *Var = SD->getVariable();
920 const DIExpression *Expr =
922 DebugLoc DL = SD->getDebugLoc();
923 const MCInstrDesc &Desc = TII->get(TargetOpcode::DBG_VALUE);
924 return BuildMI(*MF, DL, Desc, false, 0U, Var, Expr);
925}
926
929 VRBaseMapType &VRBaseMap) {
930 MDNode *Var = SD->getVariable();
931 DIExpression *Expr = SD->getExpression();
932 DebugLoc DL = SD->getDebugLoc();
933 // DBG_VALUE_LIST := "DBG_VALUE_LIST" var, expression, loc (, loc)*
934 const MCInstrDesc &DbgValDesc = TII->get(TargetOpcode::DBG_VALUE_LIST);
935 // Build the DBG_VALUE_LIST instruction base.
936 auto MIB = BuildMI(*MF, DL, DbgValDesc);
937 MIB.addMetadata(Var);
938 MIB.addMetadata(Expr);
939 AddDbgValueLocationOps(MIB, DbgValDesc, SD->getLocationOps(), VRBaseMap);
940 return &*MIB;
941}
942
945 VRBaseMapType &VRBaseMap) {
946 MDNode *Var = SD->getVariable();
947 DIExpression *Expr = SD->getExpression();
948 DebugLoc DL = SD->getDebugLoc();
949 const MCInstrDesc &II = TII->get(TargetOpcode::DBG_VALUE);
950
951 assert(SD->getLocationOps().size() == 1 &&
952 "Non variadic dbg_value should have only one location op");
953
954 // See about constant-folding the expression.
955 // Copy the location operand in case we replace it.
956 SmallVector<SDDbgOperand, 1> LocationOps(1, SD->getLocationOps()[0]);
957 if (Expr && LocationOps[0].getKind() == SDDbgOperand::CONST) {
958 const Value *V = LocationOps[0].getConst();
959 if (auto *C = dyn_cast<ConstantInt>(V)) {
960 std::tie(Expr, C) = Expr->constantFold(C);
961 LocationOps[0] = SDDbgOperand::fromConst(C);
962 }
963 }
964
965 // Emit non-variadic dbg_value nodes as DBG_VALUE.
966 // DBG_VALUE := "DBG_VALUE" loc, isIndirect, var, expr
967 auto MIB = BuildMI(*MF, DL, II);
968 AddDbgValueLocationOps(MIB, II, LocationOps, VRBaseMap);
969
970 if (SD->isIndirect())
971 MIB.addImm(0U);
972 else
973 MIB.addReg(0U);
974
975 return MIB.addMetadata(Var).addMetadata(Expr);
976}
977
980 MDNode *Label = SD->getLabel();
981 DebugLoc DL = SD->getDebugLoc();
982 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) &&
983 "Expected inlined-at fields to agree");
984
985 const MCInstrDesc &II = TII->get(TargetOpcode::DBG_LABEL);
986 MachineInstrBuilder MIB = BuildMI(*MF, DL, II);
987 MIB.addMetadata(Label);
988
989 return &*MIB;
990}
991
992/// EmitMachineNode - Generate machine code for a target-specific node and
993/// needed dependencies.
994///
995void InstrEmitter::
996EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
997 VRBaseMapType &VRBaseMap) {
998 unsigned Opc = Node->getMachineOpcode();
999
1000 // Handle subreg insert/extract specially
1001 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1002 Opc == TargetOpcode::INSERT_SUBREG ||
1003 Opc == TargetOpcode::SUBREG_TO_REG) {
1004 EmitSubregNode(Node, VRBaseMap, IsClone, IsCloned);
1005 return;
1006 }
1007
1008 // Handle COPY_TO_REGCLASS specially.
1009 if (Opc == TargetOpcode::COPY_TO_REGCLASS) {
1010 EmitCopyToRegClassNode(Node, VRBaseMap);
1011 return;
1012 }
1013
1014 // Handle REG_SEQUENCE specially.
1015 if (Opc == TargetOpcode::REG_SEQUENCE) {
1016 EmitRegSequence(Node, VRBaseMap, IsClone, IsCloned);
1017 return;
1018 }
1019
1020 if (Opc == TargetOpcode::IMPLICIT_DEF)
1021 // We want a unique VR for each IMPLICIT_DEF use.
1022 return;
1023
1024 const MCInstrDesc &II = TII->get(Opc);
1025 unsigned NumResults = CountResults(Node);
1026 unsigned NumDefs = II.getNumDefs();
1027 const MCPhysReg *ScratchRegs = nullptr;
1028
1029 // Handle STACKMAP and PATCHPOINT specially and then use the generic code.
1030 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
1031 // Stackmaps do not have arguments and do not preserve their calling
1032 // convention. However, to simplify runtime support, they clobber the same
1033 // scratch registers as AnyRegCC.
1034 unsigned CC = CallingConv::AnyReg;
1035 if (Opc == TargetOpcode::PATCHPOINT) {
1036 CC = Node->getConstantOperandVal(PatchPointOpers::CCPos);
1037 NumDefs = NumResults;
1038 }
1039 ScratchRegs = TLI->getScratchRegisters((CallingConv::ID) CC);
1040 } else if (Opc == TargetOpcode::STATEPOINT) {
1041 NumDefs = NumResults;
1042 }
1043
1044 unsigned NumImpUses = 0;
1045 unsigned NodeOperands =
1046 countOperands(Node, II.getNumOperands() - NumDefs, NumImpUses);
1047 bool HasVRegVariadicDefs = !MF->getTarget().usesPhysRegsForValues() &&
1048 II.isVariadic() && II.variadicOpsAreDefs();
1049 bool HasPhysRegOuts = NumResults > NumDefs && !II.implicit_defs().empty() &&
1050 !HasVRegVariadicDefs;
1051#ifndef NDEBUG
1052 unsigned NumMIOperands = NodeOperands + NumResults;
1053 if (II.isVariadic())
1054 assert(NumMIOperands >= II.getNumOperands() &&
1055 "Too few operands for a variadic node!");
1056 else
1057 assert(NumMIOperands >= II.getNumOperands() &&
1058 NumMIOperands <=
1059 II.getNumOperands() + II.implicit_defs().size() + NumImpUses &&
1060 "#operands for dag node doesn't match .td file!");
1061#endif
1062
1063 // Create the new machine instruction.
1064 MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II);
1065
1066 // Transfer IR flags from the SDNode to the MachineInstr
1067 MachineInstr *MI = MIB.getInstr();
1068 const SDNodeFlags Flags = Node->getFlags();
1069 if (Flags.hasUnpredictable())
1071
1072 // Add result register values for things that are defined by this
1073 // instruction.
1074 if (NumResults) {
1075 CreateVirtualRegisters(Node, MIB, II, IsClone, IsCloned, VRBaseMap);
1076
1077 if (Flags.hasNoSignedZeros())
1079
1080 if (Flags.hasAllowReciprocal())
1082
1083 if (Flags.hasNoNaNs())
1085
1086 if (Flags.hasNoInfs())
1088
1089 if (Flags.hasAllowContract())
1091
1092 if (Flags.hasApproximateFuncs())
1094
1095 if (Flags.hasAllowReassociation())
1097
1098 if (Flags.hasNoUnsignedWrap())
1100
1101 if (Flags.hasNoSignedWrap())
1103
1104 if (Flags.hasExact())
1106
1107 if (Flags.hasNoFPExcept())
1109
1110 if (Flags.hasDisjoint())
1112
1113 if (Flags.hasSameSign())
1115 }
1116
1117 // Emit all of the actual operands of this instruction, adding them to the
1118 // instruction as appropriate.
1119 bool HasOptPRefs = NumDefs > NumResults;
1120 assert((!HasOptPRefs || !HasPhysRegOuts) &&
1121 "Unable to cope with optional defs and phys regs defs!");
1122 unsigned NumSkip = HasOptPRefs ? NumDefs - NumResults : 0;
1123 for (unsigned i = NumSkip; i != NodeOperands; ++i)
1124 AddOperand(MIB, Node->getOperand(i), i-NumSkip+NumDefs, &II,
1125 VRBaseMap, /*IsDebug=*/false, IsClone, IsCloned);
1126
1127 // Add scratch registers as implicit def and early clobber
1128 if (ScratchRegs)
1129 for (unsigned i = 0; ScratchRegs[i]; ++i)
1130 MIB.addReg(ScratchRegs[i], RegState::ImplicitDefine |
1132
1133 // Set the memory reference descriptions of this instruction now that it is
1134 // part of the function.
1135 MIB.setMemRefs(cast<MachineSDNode>(Node)->memoperands());
1136
1137 // Set the CFI type.
1138 MIB->setCFIType(*MF, Node->getCFIType());
1139
1140 // Insert the instruction into position in the block. This needs to
1141 // happen before any custom inserter hook is called so that the
1142 // hook knows where in the block to insert the replacement code.
1143 MBB->insert(InsertPos, MIB);
1144
1145 // The MachineInstr may also define physregs instead of virtregs. These
1146 // physreg values can reach other instructions in different ways:
1147 //
1148 // 1. When there is a use of a Node value beyond the explicitly defined
1149 // virtual registers, we emit a CopyFromReg for one of the implicitly
1150 // defined physregs. This only happens when HasPhysRegOuts is true.
1151 //
1152 // 2. A CopyFromReg reading a physreg may be glued to this instruction.
1153 //
1154 // 3. A glued instruction may implicitly use a physreg.
1155 //
1156 // 4. A glued instruction may use a RegisterSDNode operand.
1157 //
1158 // Collect all the used physreg defs, and make sure that any unused physreg
1159 // defs are marked as dead.
1160 SmallVector<Register, 8> UsedRegs;
1161
1162 // Additional results must be physical register defs.
1163 if (HasPhysRegOuts) {
1164 for (unsigned i = NumDefs; i < NumResults; ++i) {
1165 Register Reg = II.implicit_defs()[i - NumDefs];
1166 if (!Node->hasAnyUseOfValue(i))
1167 continue;
1168 // This implicitly defined physreg has a use.
1169 UsedRegs.push_back(Reg);
1170 EmitCopyFromReg(SDValue(Node, i), IsClone, Reg, VRBaseMap);
1171 }
1172 }
1173
1174 // Scan the glue chain for any used physregs.
1175 if (Node->getValueType(Node->getNumValues()-1) == MVT::Glue) {
1176 for (SDNode *F = Node->getGluedUser(); F; F = F->getGluedUser()) {
1177 if (F->getOpcode() == ISD::CopyFromReg) {
1178 Register Reg = cast<RegisterSDNode>(F->getOperand(1))->getReg();
1179 if (Reg.isPhysical())
1180 UsedRegs.push_back(Reg);
1181 continue;
1182 } else if (F->getOpcode() == ISD::CopyToReg) {
1183 // Skip CopyToReg nodes that are internal to the glue chain.
1184 continue;
1185 }
1186 // Collect declared implicit uses.
1187 const MCInstrDesc &MCID = TII->get(F->getMachineOpcode());
1188 append_range(UsedRegs, MCID.implicit_uses());
1189 // In addition to declared implicit uses, we must also check for
1190 // direct RegisterSDNode operands.
1191 for (const SDValue &Op : F->op_values())
1192 if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) {
1193 Register Reg = R->getReg();
1194 if (Reg.isPhysical())
1195 UsedRegs.push_back(Reg);
1196 }
1197 }
1198 }
1199
1200 // Add rounding control registers as implicit def for function call.
1201 if (II.isCall() && MF->getFunction().hasFnAttribute(Attribute::StrictFP)) {
1202 ArrayRef<MCPhysReg> RCRegs = TLI->getRoundingControlRegisters();
1203 llvm::append_range(UsedRegs, RCRegs);
1204 }
1205
1206 // Finally mark unused registers as dead.
1207 if (!UsedRegs.empty() || !II.implicit_defs().empty() || II.hasOptionalDef())
1208 MIB->setPhysRegsDeadExcept(UsedRegs, *TRI);
1209
1210 // STATEPOINT is too 'dynamic' to have meaningful machine description.
1211 // We have to manually tie operands.
1212 if (Opc == TargetOpcode::STATEPOINT && NumDefs > 0) {
1213 assert(!HasPhysRegOuts && "STATEPOINT mishandled");
1214 MachineInstr *MI = MIB;
1215 unsigned Def = 0;
1216 int First = StatepointOpers(MI).getFirstGCPtrIdx();
1217 assert(First > 0 && "Statepoint has Defs but no GC ptr list");
1218 unsigned Use = (unsigned)First;
1219 while (Def < NumDefs) {
1220 if (MI->getOperand(Use).isReg())
1221 MI->tieOperands(Def++, Use);
1223 }
1224 }
1225
1226 if (SDNode *GluedNode = Node->getGluedNode()) {
1227 // FIXME: Possibly iterate over multiple glue nodes?
1228 if (GluedNode->getOpcode() ==
1229 ~(unsigned)TargetOpcode::CONVERGENCECTRL_GLUE) {
1230 Register VReg = getVR(GluedNode->getOperand(0), VRBaseMap);
1231 MachineOperand MO = MachineOperand::CreateReg(VReg, /*isDef=*/false,
1232 /*isImp=*/true);
1233 MIB->addOperand(MO);
1234 }
1235 }
1236
1237 // Run post-isel target hook to adjust this instruction if needed.
1238 if (II.hasPostISelHook())
1239 TLI->AdjustInstrPostInstrSelection(*MIB, Node);
1240}
1241
1242/// EmitSpecialNode - Generate machine code for a target-independent node and
1243/// needed dependencies.
1244void InstrEmitter::
1245EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
1246 VRBaseMapType &VRBaseMap) {
1247 switch (Node->getOpcode()) {
1248 default:
1249#ifndef NDEBUG
1250 Node->dump();
1251#endif
1252 llvm_unreachable("This target-independent node should have been selected!");
1253 case ISD::EntryToken:
1254 case ISD::MERGE_VALUES:
1255 case ISD::TokenFactor: // fall thru
1256 break;
1257 case ISD::CopyToReg: {
1258 Register DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
1259 SDValue SrcVal = Node->getOperand(2);
1260 if (DestReg.isVirtual() && SrcVal.isMachineOpcode() &&
1261 SrcVal.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) {
1262 // Instead building a COPY to that vreg destination, build an
1263 // IMPLICIT_DEF instruction instead.
1264 BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
1265 TII->get(TargetOpcode::IMPLICIT_DEF), DestReg);
1266 break;
1267 }
1268 Register SrcReg;
1269 if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(SrcVal))
1270 SrcReg = R->getReg();
1271 else
1272 SrcReg = getVR(SrcVal, VRBaseMap);
1273
1274 if (SrcReg == DestReg) // Coalesced away the copy? Ignore.
1275 break;
1276
1277 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
1278 DestReg).addReg(SrcReg);
1279 break;
1280 }
1281 case ISD::CopyFromReg: {
1282 Register SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
1283 EmitCopyFromReg(SDValue(Node, 0), IsClone, SrcReg, VRBaseMap);
1284 break;
1285 }
1286 case ISD::EH_LABEL:
1287 case ISD::ANNOTATION_LABEL: {
1288 unsigned Opc = (Node->getOpcode() == ISD::EH_LABEL)
1289 ? TargetOpcode::EH_LABEL
1290 : TargetOpcode::ANNOTATION_LABEL;
1291 MCSymbol *S = cast<LabelSDNode>(Node)->getLabel();
1292 BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
1293 TII->get(Opc)).addSym(S);
1294 break;
1295 }
1296
1297 case ISD::LIFETIME_START:
1298 case ISD::LIFETIME_END: {
1299 unsigned TarOp = (Node->getOpcode() == ISD::LIFETIME_START)
1300 ? TargetOpcode::LIFETIME_START
1301 : TargetOpcode::LIFETIME_END;
1302 auto *FI = cast<FrameIndexSDNode>(Node->getOperand(1));
1303 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp))
1304 .addFrameIndex(FI->getIndex());
1305 break;
1306 }
1307
1308 case ISD::PSEUDO_PROBE: {
1309 unsigned TarOp = TargetOpcode::PSEUDO_PROBE;
1310 auto Guid = cast<PseudoProbeSDNode>(Node)->getGuid();
1311 auto Index = cast<PseudoProbeSDNode>(Node)->getIndex();
1312 auto Attr = cast<PseudoProbeSDNode>(Node)->getAttributes();
1313
1314 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp))
1315 .addImm(Guid)
1316 .addImm(Index)
1318 .addImm(Attr);
1319 break;
1320 }
1321
1322 case ISD::INLINEASM:
1323 case ISD::INLINEASM_BR: {
1324 unsigned NumOps = Node->getNumOperands();
1325 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1326 --NumOps; // Ignore the glue operand.
1327
1328 // Create the inline asm machine instruction.
1329 unsigned TgtOpc = Node->getOpcode() == ISD::INLINEASM_BR
1330 ? TargetOpcode::INLINEASM_BR
1331 : TargetOpcode::INLINEASM;
1332 MachineInstrBuilder MIB =
1333 BuildMI(*MF, Node->getDebugLoc(), TII->get(TgtOpc));
1334
1335 // Add the asm string as an external symbol operand.
1336 SDValue AsmStrV = Node->getOperand(InlineAsm::Op_AsmString);
1337 const char *AsmStr = cast<ExternalSymbolSDNode>(AsmStrV)->getSymbol();
1338 MIB.addExternalSymbol(AsmStr);
1339
1340 // Add the HasSideEffect, isAlignStack, AsmDialect, MayLoad and MayStore
1341 // bits.
1342 int64_t ExtraInfo =
1344 getZExtValue();
1345 MIB.addImm(ExtraInfo);
1346
1347 // Remember to operand index of the group flags.
1348 SmallVector<unsigned, 8> GroupIdx;
1349
1350 // Remember registers that are part of early-clobber defs.
1352
1353 // Add all of the operand registers to the instruction.
1354 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1355 unsigned Flags = Node->getConstantOperandVal(i);
1356 const InlineAsm::Flag F(Flags);
1357 const unsigned NumVals = F.getNumOperandRegisters();
1358
1359 GroupIdx.push_back(MIB->getNumOperands());
1360 MIB.addImm(Flags);
1361 ++i; // Skip the ID value.
1362
1363 switch (F.getKind()) {
1365 for (unsigned j = 0; j != NumVals; ++j, ++i) {
1366 Register Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1367 // FIXME: Add dead flags for physical and virtual registers defined.
1368 // For now, mark physical register defs as implicit to help fast
1369 // regalloc. This makes inline asm look a lot like calls.
1371 }
1372 break;
1375 for (unsigned j = 0; j != NumVals; ++j, ++i) {
1376 Register Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1379 ECRegs.push_back(Reg);
1380 }
1381 break;
1382 case InlineAsm::Kind::RegUse: // Use of register.
1383 case InlineAsm::Kind::Imm: // Immediate.
1384 case InlineAsm::Kind::Mem: // Non-function addressing mode.
1385 // The addressing mode has been selected, just add all of the
1386 // operands to the machine instruction.
1387 for (unsigned j = 0; j != NumVals; ++j, ++i)
1388 AddOperand(MIB, Node->getOperand(i), 0, nullptr, VRBaseMap,
1389 /*IsDebug=*/false, IsClone, IsCloned);
1390
1391 // Manually set isTied bits.
1392 if (F.isRegUseKind()) {
1393 unsigned DefGroup;
1394 if (F.isUseOperandTiedToDef(DefGroup)) {
1395 unsigned DefIdx = GroupIdx[DefGroup] + 1;
1396 unsigned UseIdx = GroupIdx.back() + 1;
1397 for (unsigned j = 0; j != NumVals; ++j)
1398 MIB->tieOperands(DefIdx + j, UseIdx + j);
1399 }
1400 }
1401 break;
1402 case InlineAsm::Kind::Func: // Function addressing mode.
1403 for (unsigned j = 0; j != NumVals; ++j, ++i) {
1404 SDValue Op = Node->getOperand(i);
1405 AddOperand(MIB, Op, 0, nullptr, VRBaseMap,
1406 /*IsDebug=*/false, IsClone, IsCloned);
1407
1408 // Adjust Target Flags for function reference.
1409 if (auto *TGA = dyn_cast<GlobalAddressSDNode>(Op)) {
1410 unsigned NewFlags =
1411 MF->getSubtarget().classifyGlobalFunctionReference(
1412 TGA->getGlobal());
1413 unsigned LastIdx = MIB.getInstr()->getNumOperands() - 1;
1414 MIB.getInstr()->getOperand(LastIdx).setTargetFlags(NewFlags);
1415 }
1416 }
1417 }
1418 }
1419
1420 // Add rounding control registers as implicit def for inline asm.
1421 if (MF->getFunction().hasFnAttribute(Attribute::StrictFP)) {
1422 ArrayRef<MCPhysReg> RCRegs = TLI->getRoundingControlRegisters();
1423 for (MCPhysReg Reg : RCRegs)
1425 }
1426
1427 // GCC inline assembly allows input operands to also be early-clobber
1428 // output operands (so long as the operand is written only after it's
1429 // used), but this does not match the semantics of our early-clobber flag.
1430 // If an early-clobber operand register is also an input operand register,
1431 // then remove the early-clobber flag.
1432 for (Register Reg : ECRegs) {
1433 if (MIB->readsRegister(Reg, TRI)) {
1434 MachineOperand *MO =
1435 MIB->findRegisterDefOperand(Reg, TRI, false, false);
1436 assert(MO && "No def operand for clobbered register?");
1437 MO->setIsEarlyClobber(false);
1438 }
1439 }
1440
1441 // Get the mdnode from the asm if it exists and add it to the instruction.
1442 SDValue MDV = Node->getOperand(InlineAsm::Op_MDNode);
1443 const MDNode *MD = cast<MDNodeSDNode>(MDV)->getMD();
1444 if (MD)
1445 MIB.addMetadata(MD);
1446
1447 MBB->insert(InsertPos, MIB);
1448 break;
1449 }
1450 }
1451}
1452
1453/// InstrEmitter - Construct an InstrEmitter and set it to start inserting
1454/// at the given position in the given block.
1457 : MF(mbb->getParent()), MRI(&MF->getRegInfo()),
1458 TII(MF->getSubtarget().getInstrInfo()),
1459 TRI(MF->getSubtarget().getRegisterInfo()),
1460 TLI(MF->getSubtarget().getTargetLowering()), MBB(mbb),
1461 InsertPos(insertpos) {
1462 EmitDebugInstrRefs = mbb->getParent()->useDebugInstrRef();
1463}
unsigned SubReg
MachineInstrBuilder MachineInstrBuilder & DefMI
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const Function * getParent(const Value *V)
This file contains constants used for implementing Dwarf debug support.
IRTranslator LLVM IR MI
static bool isConvergenceCtrlMachineOp(SDValue Op)
MachineOperand GetMOForConstDbgOp(const SDDbgOperand &Op)
const unsigned MinRCSize
MinRCSize - Smallest register class we allow when constraining virtual registers.
static unsigned countOperands(SDNode *Node, unsigned NumExpUses, unsigned &NumImpUses)
countOperands - The inputs to target nodes have any actual inputs first, followed by an optional chai...
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
This file describes how to lower LLVM code to machine code.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:277
This is the shared class of boolean and integer constants.
Definition Constants.h:87
uint64_t getZExtValue() const
DWARF expression.
static LLVM_ABI DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
LLVM_ABI std::pair< DIExpression *, const ConstantInt * > constantFold(const ConstantInt *CI)
Try to shorten an expression with an initial constant operand.
static LLVM_ABI const DIExpression * convertToVariadicExpression(const DIExpression *Expr)
If Expr is a non-variadic expression (i.e.
static LLVM_ABI const DIExpression * convertToUndefExpression(const DIExpression *Expr)
Removes all elements from Expr that do not apply to an undef debug value, which includes every operat...
Base class for variables.
A debug info location.
Definition DebugLoc.h:124
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition DenseMap.h:74
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition DenseMap.h:174
iterator end()
Definition DenseMap.h:81
MachineInstr * EmitDbgValue(SDDbgValue *SD, VRBaseMapType &VRBaseMap)
EmitDbgValue - Generate machine instruction for a dbg_value node.
MachineInstr * EmitDbgInstrRef(SDDbgValue *SD, VRBaseMapType &VRBaseMap)
Emit a dbg_value as a DBG_INSTR_REF.
SmallDenseMap< SDValue, Register, 16 > VRBaseMapType
MachineInstr * EmitDbgLabel(SDDbgLabel *SD)
Generate machine instruction for a dbg_label node.
MachineInstr * EmitDbgNoLocation(SDDbgValue *SD)
Emit a DBG_VALUE $noreg, indicating a variable has no location.
static unsigned CountResults(SDNode *Node)
CountResults - The results of target nodes have register or immediate operands first,...
MachineInstr * EmitDbgValueList(SDDbgValue *SD, VRBaseMapType &VRBaseMap)
Emit a DBG_VALUE_LIST from the operands to SDDbgValue.
InstrEmitter(const TargetMachine &TM, MachineBasicBlock *mbb, MachineBasicBlock::iterator insertpos)
InstrEmitter - Construct an InstrEmitter and set it to start inserting at the given position in the g...
void AddDbgValueLocationOps(MachineInstrBuilder &MIB, const MCInstrDesc &DbgValDesc, ArrayRef< SDDbgOperand > Locations, VRBaseMapType &VRBaseMap)
MachineInstr * EmitDbgValueFromSingleOp(SDDbgValue *SD, VRBaseMapType &VRBaseMap)
Emit a DBG_VALUE from the operands to SDDbgValue.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
ArrayRef< MCPhysReg > implicit_uses() const
Return a list of registers that are potentially read by any instance of this machine instruction.
Metadata node.
Definition Metadata.h:1078
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
Machine Value Type.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
const MachineInstrBuilder & addTargetIndex(unsigned Idx, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addBlockAddress(const BlockAddress *BA, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
LLVM_ABI void setCFIType(MachineFunction &MF, uint32_t Type)
Set the CFI type for the instruction.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI void insert(mop_iterator InsertBefore, ArrayRef< MachineOperand > Ops)
Inserts Ops BEFORE It. Can untie/retie tied operands.
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void setPhysRegsDeadExcept(ArrayRef< Register > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
const MachineOperand & getOperand(unsigned i) const
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateFPImm(const ConstantFP *CFP)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static MachineOperand CreateCImm(const ConstantInt *CI)
void setIsEarlyClobber(bool Val=true)
static MachineOperand CreateImm(int64_t Val)
static MachineOperand CreateDbgInstrRef(unsigned InstrIdx, unsigned OpIdx)
void setTargetFlags(unsigned F)
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
Wrapper class representing virtual and physical registers.
Definition Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:74
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:78
Holds the information from a dbg_label node through SDISel.
MDNode * getLabel() const
Returns the MDNode pointer for the label.
const DebugLoc & getDebugLoc() const
Returns the DebugLoc.
Holds the information for a single machine location through SDISel; either an SDNode,...
Register getVReg() const
Returns the Virtual Register for a VReg.
unsigned getResNo() const
Returns the ResNo for a register ref.
static SDDbgOperand fromConst(const Value *Const)
SDNode * getSDNode() const
Returns the SDNode* for a register ref.
@ VREG
Value is a virtual register.
@ FRAMEIX
Value is contents of a stack location.
@ SDNODE
Value is the result of an expression.
@ CONST
Value is a constant.
Kind getKind() const
Holds the information from a dbg_value node through SDISel.
const DebugLoc & getDebugLoc() const
Returns the DebugLoc.
DIVariable * getVariable() const
Returns the DIVariable pointer for the variable.
bool isInvalidated() const
ArrayRef< SDDbgOperand > getLocationOps() const
DIExpression * getExpression() const
Returns the DIExpression pointer for the expression.
bool isIndirect() const
Returns whether this is an indirect value.
void setIsEmitted()
setIsEmitted / isEmitted - Getter/Setter for flag indicating that this SDDbgValue has been emitted to...
bool isVariadic() const
Represents one node in the SelectionDAG.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isMachineOpcode() const
unsigned getMachineOpcode() const
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
static LLVM_ABI unsigned getNextMetaArgIdx(const MachineInstr *MI, unsigned CurIdx)
Get index of next meta operand.
Primary interface to the complete machine description for the target machine.
bool isAllocatable() const
Return true if this register class may be used to create virtual registers.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
LLVM Value Representation.
Definition Value.h:75
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition CallingConv.h:60
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Definition ISDOpcodes.h:256
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition ISDOpcodes.h:225
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
Definition ISDOpcodes.h:48
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
Definition ISDOpcodes.h:219
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition ISDOpcodes.h:53
@ Define
Register definition.
@ EarlyClobber
Register definition happens before uses.
@ User
could "use" a pointer
NodeAddr< DefNode * > Def
Definition RDFGraph.h:384
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2136
Op::Description Desc
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
unsigned getImplRegState(bool B)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
unsigned getDebugRegState(bool B)
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:71
unsigned getDefRegState(bool B)
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
#define N
TODO: Might pack better if we changed this to a Struct of Arrays, since MachineOperand is width 32,...