LLVM 19.0.0git
MachineIRBuilder.cpp
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the MachineIRBuidler class.
10//===----------------------------------------------------------------------===//
21
22using namespace llvm;
23
25 State.MF = &MF;
26 State.MBB = nullptr;
27 State.MRI = &MF.getRegInfo();
28 State.TII = MF.getSubtarget().getInstrInfo();
29 State.DL = DebugLoc();
30 State.PCSections = nullptr;
32 State.Observer = nullptr;
33}
34
35//------------------------------------------------------------------------------
36// Build instruction variants.
37//------------------------------------------------------------------------------
38
40 return BuildMI(getMF(), {getDL(), getPCSections()}, getTII().get(Opcode));
41}
42
44 getMBB().insert(getInsertPt(), MIB);
45 recordInsertion(MIB);
46 return MIB;
47}
48
51 const MDNode *Expr) {
52 assert(isa<DILocalVariable>(Variable) && "not a variable");
53 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
54 assert(
55 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
56 "Expected inlined-at fields to agree");
57 return insertInstr(BuildMI(getMF(), getDL(),
58 getTII().get(TargetOpcode::DBG_VALUE),
59 /*IsIndirect*/ false, Reg, Variable, Expr));
60}
61
64 const MDNode *Expr) {
65 assert(isa<DILocalVariable>(Variable) && "not a variable");
66 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
67 assert(
68 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
69 "Expected inlined-at fields to agree");
70 return insertInstr(BuildMI(getMF(), getDL(),
71 getTII().get(TargetOpcode::DBG_VALUE),
72 /*IsIndirect*/ true, Reg, Variable, Expr));
73}
74
76 const MDNode *Variable,
77 const MDNode *Expr) {
78 assert(isa<DILocalVariable>(Variable) && "not a variable");
79 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
80 assert(
81 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
82 "Expected inlined-at fields to agree");
83 return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE)
84 .addFrameIndex(FI)
85 .addImm(0)
86 .addMetadata(Variable)
87 .addMetadata(Expr));
88}
89
91 const MDNode *Variable,
92 const MDNode *Expr) {
93 assert(isa<DILocalVariable>(Variable) && "not a variable");
94 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
95 assert(
96 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
97 "Expected inlined-at fields to agree");
98 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
99
100 auto *NumericConstant = [&] () -> const Constant* {
101 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
102 if (CE->getOpcode() == Instruction::IntToPtr)
103 return CE->getOperand(0);
104 return &C;
105 }();
106
107 if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {
108 if (CI->getBitWidth() > 64)
109 MIB.addCImm(CI);
110 else
111 MIB.addImm(CI->getZExtValue());
112 } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {
113 MIB.addFPImm(CFP);
114 } else if (isa<ConstantPointerNull>(NumericConstant)) {
115 MIB.addImm(0);
116 } else {
117 // Insert $noreg if we didn't find a usable constant and had to drop it.
118 MIB.addReg(Register());
119 }
120
121 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
122 return insertInstr(MIB);
123}
124
126 assert(isa<DILabel>(Label) && "not a label");
127 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
128 "Expected inlined-at fields to agree");
129 auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
130
131 return MIB.addMetadata(Label);
132}
133
135 const SrcOp &Size,
136 Align Alignment) {
137 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
138 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
139 Res.addDefToMIB(*getMRI(), MIB);
140 Size.addSrcToMIB(MIB);
141 MIB.addImm(Alignment.value());
142 return MIB;
143}
144
146 int Idx) {
147 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
148 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
149 Res.addDefToMIB(*getMRI(), MIB);
150 MIB.addFrameIndex(Idx);
151 return MIB;
152}
153
155 const GlobalValue *GV) {
156 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
157 assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
158 GV->getType()->getAddressSpace() &&
159 "address space mismatch");
160
161 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
162 Res.addDefToMIB(*getMRI(), MIB);
163 MIB.addGlobalAddress(GV);
164 return MIB;
165}
166
168 unsigned Idx) {
169 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
170 auto MIB = buildInstr(TargetOpcode::G_CONSTANT_POOL);
171 Res.addDefToMIB(*getMRI(), MIB);
172 MIB.addConstantPoolIndex(Idx);
173 return MIB;
174}
175
177 unsigned JTI) {
178 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
179 .addJumpTableIndex(JTI);
180}
181
182void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
183 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
184 assert((Res == Op0) && "type mismatch");
185}
186
188 const LLT Op1) {
189 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
190 assert((Res == Op0 && Res == Op1) && "type mismatch");
191}
192
193void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
194 const LLT Op1) {
195 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
196 assert((Res == Op0) && "type mismatch");
197}
198
201 const SrcOp &Op1, std::optional<unsigned> Flags) {
202 assert(Res.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
203 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
204 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
205
206 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}, Flags);
207}
208
209std::optional<MachineInstrBuilder>
211 const LLT ValueTy, uint64_t Value) {
212 assert(Res == 0 && "Res is a result argument");
213 assert(ValueTy.isScalar() && "invalid offset type");
214
215 if (Value == 0) {
216 Res = Op0;
217 return std::nullopt;
218 }
219
221 auto Cst = buildConstant(ValueTy, Value);
222 return buildPtrAdd(Res, Op0, Cst.getReg(0));
223}
224
226 const SrcOp &Op0,
227 uint32_t NumBits) {
228 LLT PtrTy = Res.getLLTTy(*getMRI());
229 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
230 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
231 buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
232 return buildPtrMask(Res, Op0, MaskReg);
233}
234
237 const SrcOp &Op0) {
238 LLT ResTy = Res.getLLTTy(*getMRI());
239 LLT Op0Ty = Op0.getLLTTy(*getMRI());
240
241 assert(ResTy.isVector() && "Res non vector type");
242
244 if (Op0Ty.isVector()) {
245 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
246 "Different vector element types");
247 assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
248 "Op0 has more elements");
249 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
250
251 for (auto Op : Unmerge.getInstr()->defs())
252 Regs.push_back(Op.getReg());
253 } else {
254 assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
255 "Op0 has more size");
256 Regs.push_back(Op0.getReg());
257 }
258 Register Undef =
259 buildUndef(Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(0);
260 unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
261 for (unsigned i = 0; i < NumberOfPadElts; ++i)
262 Regs.push_back(Undef);
263 return buildMergeLikeInstr(Res, Regs);
264}
265
268 const SrcOp &Op0) {
269 LLT ResTy = Res.getLLTTy(*getMRI());
270 LLT Op0Ty = Op0.getLLTTy(*getMRI());
271
272 assert(Op0Ty.isVector() && "Non vector type");
273 assert(((ResTy.isScalar() && (ResTy == Op0Ty.getElementType())) ||
274 (ResTy.isVector() &&
275 (ResTy.getElementType() == Op0Ty.getElementType()))) &&
276 "Different vector element types");
277 assert(
278 (ResTy.isScalar() || (ResTy.getNumElements() < Op0Ty.getNumElements())) &&
279 "Op0 has fewer elements");
280
281 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
282 if (ResTy.isScalar())
283 return buildCopy(Res, Unmerge.getReg(0));
285 for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
286 Regs.push_back(Unmerge.getReg(i));
287 return buildMergeLikeInstr(Res, Regs);
288}
289
291 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
292}
293
295 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
296 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
297}
298
300 unsigned JTI,
301 Register IndexReg) {
302 assert(getMRI()->getType(TablePtr).isPointer() &&
303 "Table reg must be a pointer");
304 return buildInstr(TargetOpcode::G_BRJT)
305 .addUse(TablePtr)
307 .addUse(IndexReg);
308}
309
311 const SrcOp &Op) {
312 return buildInstr(TargetOpcode::COPY, Res, Op);
313}
314
316 const ConstantInt &Val) {
317 LLT Ty = Res.getLLTTy(*getMRI());
318 LLT EltTy = Ty.getScalarType();
319 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
320 "creating constant with the wrong size");
321
322 assert(!Ty.isScalableVector() &&
323 "unexpected scalable vector in buildConstant");
324
325 if (Ty.isFixedVector()) {
326 auto Const = buildInstr(TargetOpcode::G_CONSTANT)
327 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
328 .addCImm(&Val);
329 return buildSplatBuildVector(Res, Const);
330 }
331
332 auto Const = buildInstr(TargetOpcode::G_CONSTANT);
333 Const->setDebugLoc(DebugLoc());
334 Res.addDefToMIB(*getMRI(), Const);
335 Const.addCImm(&Val);
336 return Const;
337}
338
340 int64_t Val) {
343 ConstantInt *CI = ConstantInt::get(IntN, Val, true);
344 return buildConstant(Res, *CI);
345}
346
348 const ConstantFP &Val) {
349 LLT Ty = Res.getLLTTy(*getMRI());
350 LLT EltTy = Ty.getScalarType();
351
353 == EltTy.getSizeInBits() &&
354 "creating fconstant with the wrong size");
355
356 assert(!Ty.isPointer() && "invalid operand type");
357
358 assert(!Ty.isScalableVector() &&
359 "unexpected scalable vector in buildFConstant");
360
361 if (Ty.isFixedVector()) {
362 auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
363 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
364 .addFPImm(&Val);
365
366 return buildSplatBuildVector(Res, Const);
367 }
368
369 auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
370 Const->setDebugLoc(DebugLoc());
371 Res.addDefToMIB(*getMRI(), Const);
372 Const.addFPImm(&Val);
373 return Const;
374}
375
377 const APInt &Val) {
378 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
379 return buildConstant(Res, *CI);
380}
381
383 double Val) {
384 LLT DstTy = Res.getLLTTy(*getMRI());
385 auto &Ctx = getMF().getFunction().getContext();
386 auto *CFP =
387 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
388 return buildFConstant(Res, *CFP);
389}
390
392 const APFloat &Val) {
393 auto &Ctx = getMF().getFunction().getContext();
394 auto *CFP = ConstantFP::get(Ctx, Val);
395 return buildFConstant(Res, *CFP);
396}
397
399 MachineBasicBlock &Dest) {
400 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
401
402 auto MIB = buildInstr(TargetOpcode::G_BRCOND);
403 Tst.addSrcToMIB(MIB);
404 MIB.addMBB(&Dest);
405 return MIB;
406}
407
410 MachinePointerInfo PtrInfo, Align Alignment,
412 const AAMDNodes &AAInfo) {
413 MMOFlags |= MachineMemOperand::MOLoad;
414 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
415
416 LLT Ty = Dst.getLLTTy(*getMRI());
417 MachineMemOperand *MMO =
418 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
419 return buildLoad(Dst, Addr, *MMO);
420}
421
423 const DstOp &Res,
424 const SrcOp &Addr,
425 MachineMemOperand &MMO) {
426 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
427 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
428
429 auto MIB = buildInstr(Opcode);
430 Res.addDefToMIB(*getMRI(), MIB);
431 Addr.addSrcToMIB(MIB);
432 MIB.addMemOperand(&MMO);
433 return MIB;
434}
435
437 const DstOp &Dst, const SrcOp &BasePtr,
438 MachineMemOperand &BaseMMO, int64_t Offset) {
439 LLT LoadTy = Dst.getLLTTy(*getMRI());
440 MachineMemOperand *OffsetMMO =
441 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
442
443 if (Offset == 0) // This may be a size or type changing load.
444 return buildLoad(Dst, BasePtr, *OffsetMMO);
445
446 LLT PtrTy = BasePtr.getLLTTy(*getMRI());
447 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
448 auto ConstOffset = buildConstant(OffsetTy, Offset);
449 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
450 return buildLoad(Dst, Ptr, *OffsetMMO);
451}
452
454 const SrcOp &Addr,
455 MachineMemOperand &MMO) {
456 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
457 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
458
459 auto MIB = buildInstr(TargetOpcode::G_STORE);
460 Val.addSrcToMIB(MIB);
461 Addr.addSrcToMIB(MIB);
462 MIB.addMemOperand(&MMO);
463 return MIB;
464}
465
468 MachinePointerInfo PtrInfo, Align Alignment,
470 const AAMDNodes &AAInfo) {
471 MMOFlags |= MachineMemOperand::MOStore;
472 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
473
474 LLT Ty = Val.getLLTTy(*getMRI());
475 MachineMemOperand *MMO =
476 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
477 return buildStore(Val, Addr, *MMO);
478}
479
481 const SrcOp &Op) {
482 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
483}
484
486 const SrcOp &Op) {
487 return buildInstr(TargetOpcode::G_SEXT, Res, Op);
488}
489
491 const SrcOp &Op) {
492 return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
493}
494
495unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
496 const auto *TLI = getMF().getSubtarget().getTargetLowering();
497 switch (TLI->getBooleanContents(IsVec, IsFP)) {
499 return TargetOpcode::G_SEXT;
501 return TargetOpcode::G_ZEXT;
502 default:
503 return TargetOpcode::G_ANYEXT;
504 }
505}
506
508 const SrcOp &Op,
509 bool IsFP) {
510 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
511 return buildInstr(ExtOp, Res, Op);
512}
513
515 const SrcOp &Op,
516 bool IsVector,
517 bool IsFP) {
518 const auto *TLI = getMF().getSubtarget().getTargetLowering();
519 switch (TLI->getBooleanContents(IsVector, IsFP)) {
521 return buildSExtInReg(Res, Op, 1);
523 return buildZExtInReg(Res, Op, 1);
525 return buildCopy(Res, Op);
526 }
527
528 llvm_unreachable("unexpected BooleanContent");
529}
530
532 const DstOp &Res,
533 const SrcOp &Op) {
534 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
535 TargetOpcode::G_SEXT == ExtOpc) &&
536 "Expecting Extending Opc");
537 assert(Res.getLLTTy(*getMRI()).isScalar() ||
538 Res.getLLTTy(*getMRI()).isVector());
539 assert(Res.getLLTTy(*getMRI()).isScalar() ==
540 Op.getLLTTy(*getMRI()).isScalar());
541
542 unsigned Opcode = TargetOpcode::COPY;
543 if (Res.getLLTTy(*getMRI()).getSizeInBits() >
544 Op.getLLTTy(*getMRI()).getSizeInBits())
545 Opcode = ExtOpc;
546 else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
547 Op.getLLTTy(*getMRI()).getSizeInBits())
548 Opcode = TargetOpcode::G_TRUNC;
549 else
550 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
551
552 return buildInstr(Opcode, Res, Op);
553}
554
556 const SrcOp &Op) {
557 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
558}
559
561 const SrcOp &Op) {
562 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
563}
564
566 const SrcOp &Op) {
567 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
568}
569
571 const SrcOp &Op,
572 int64_t ImmOp) {
573 LLT ResTy = Res.getLLTTy(*getMRI());
574 auto Mask = buildConstant(
575 ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
576 return buildAnd(Res, Op, Mask);
577}
578
580 const SrcOp &Src) {
581 LLT SrcTy = Src.getLLTTy(*getMRI());
582 LLT DstTy = Dst.getLLTTy(*getMRI());
583 if (SrcTy == DstTy)
584 return buildCopy(Dst, Src);
585
586 unsigned Opcode;
587 if (SrcTy.isPointer() && DstTy.isScalar())
588 Opcode = TargetOpcode::G_PTRTOINT;
589 else if (DstTy.isPointer() && SrcTy.isScalar())
590 Opcode = TargetOpcode::G_INTTOPTR;
591 else {
592 assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
593 Opcode = TargetOpcode::G_BITCAST;
594 }
595
596 return buildInstr(Opcode, Dst, Src);
597}
598
600 const SrcOp &Src,
601 uint64_t Index) {
602 LLT SrcTy = Src.getLLTTy(*getMRI());
603 LLT DstTy = Dst.getLLTTy(*getMRI());
604
605#ifndef NDEBUG
606 assert(SrcTy.isValid() && "invalid operand type");
607 assert(DstTy.isValid() && "invalid operand type");
608 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
609 "extracting off end of register");
610#endif
611
612 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
613 assert(Index == 0 && "insertion past the end of a register");
614 return buildCast(Dst, Src);
615 }
616
617 auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
618 Dst.addDefToMIB(*getMRI(), Extract);
619 Src.addSrcToMIB(Extract);
620 Extract.addImm(Index);
621 return Extract;
622}
623
625 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
626}
627
629 ArrayRef<Register> Ops) {
630 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
631 // we need some temporary storage for the DstOp objects. Here we use a
632 // sufficiently large SmallVector to not go through the heap.
633 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
634 assert(TmpVec.size() > 1);
635 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
636}
637
640 ArrayRef<Register> Ops) {
641 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
642 // we need some temporary storage for the DstOp objects. Here we use a
643 // sufficiently large SmallVector to not go through the heap.
644 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
645 assert(TmpVec.size() > 1);
646 return buildInstr(getOpcodeForMerge(Res, TmpVec), Res, TmpVec);
647}
648
651 std::initializer_list<SrcOp> Ops) {
652 assert(Ops.size() > 1);
653 return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
654}
655
656unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
657 ArrayRef<SrcOp> SrcOps) const {
658 if (DstOp.getLLTTy(*getMRI()).isVector()) {
659 if (SrcOps[0].getLLTTy(*getMRI()).isVector())
660 return TargetOpcode::G_CONCAT_VECTORS;
661 return TargetOpcode::G_BUILD_VECTOR;
662 }
663
664 return TargetOpcode::G_MERGE_VALUES;
665}
666
668 const SrcOp &Op) {
669 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
670 // we need some temporary storage for the DstOp objects. Here we use a
671 // sufficiently large SmallVector to not go through the heap.
672 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
673 assert(TmpVec.size() > 1);
674 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
675}
676
678 const SrcOp &Op) {
679 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
680 SmallVector<DstOp, 8> TmpVec(NumReg, Res);
681 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
682}
683
685 const SrcOp &Op) {
686 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
687 // we need some temporary storage for the DstOp objects. Here we use a
688 // sufficiently large SmallVector to not go through the heap.
689 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
690 assert(TmpVec.size() > 1);
691 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
692}
693
695 ArrayRef<Register> Ops) {
696 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
697 // we need some temporary storage for the DstOp objects. Here we use a
698 // sufficiently large SmallVector to not go through the heap.
699 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
700 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
701}
702
705 ArrayRef<APInt> Ops) {
706 SmallVector<SrcOp> TmpVec;
707 TmpVec.reserve(Ops.size());
708 LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
709 for (const auto &Op : Ops)
710 TmpVec.push_back(buildConstant(EltTy, Op));
711 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
712}
713
715 const SrcOp &Src) {
716 SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
717 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
718}
719
722 ArrayRef<Register> Ops) {
723 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
724 // we need some temporary storage for the DstOp objects. Here we use a
725 // sufficiently large SmallVector to not go through the heap.
726 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
727 if (TmpVec[0].getLLTTy(*getMRI()).getSizeInBits() ==
728 Res.getLLTTy(*getMRI()).getElementType().getSizeInBits())
729 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
730 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
731}
732
734 const SrcOp &Src) {
735 LLT DstTy = Res.getLLTTy(*getMRI());
736 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
737 "Expected Src to match Dst elt ty");
738 auto UndefVec = buildUndef(DstTy);
739 auto Zero = buildConstant(LLT::scalar(64), 0);
740 auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
741 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
742 return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
743}
744
746 const SrcOp &Src) {
747 assert(Src.getLLTTy(*getMRI()) == Res.getLLTTy(*getMRI()).getElementType() &&
748 "Expected Src to match Dst elt ty");
749 return buildInstr(TargetOpcode::G_SPLAT_VECTOR, Res, Src);
750}
751
753 const SrcOp &Src1,
754 const SrcOp &Src2,
755 ArrayRef<int> Mask) {
756 LLT DstTy = Res.getLLTTy(*getMRI());
757 LLT Src1Ty = Src1.getLLTTy(*getMRI());
758 LLT Src2Ty = Src2.getLLTTy(*getMRI());
759 assert((size_t)(Src1Ty.getNumElements() + Src2Ty.getNumElements()) >=
760 Mask.size());
761 assert(DstTy.getElementType() == Src1Ty.getElementType() &&
762 DstTy.getElementType() == Src2Ty.getElementType());
763 (void)DstTy;
764 (void)Src1Ty;
765 (void)Src2Ty;
766 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
767 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
768 .addShuffleMask(MaskAlloc);
769}
770
773 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
774 // we need some temporary storage for the DstOp objects. Here we use a
775 // sufficiently large SmallVector to not go through the heap.
776 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
777 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
778}
779
781 const SrcOp &Src,
782 const SrcOp &Op,
783 unsigned Index) {
784 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
785 Res.getLLTTy(*getMRI()).getSizeInBits() &&
786 "insertion past the end of a register");
787
788 if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
789 Op.getLLTTy(*getMRI()).getSizeInBits()) {
790 return buildCast(Res, Op);
791 }
792
793 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
794}
795
797 unsigned MinElts) {
798
801 ConstantInt *CI = ConstantInt::get(IntN, MinElts);
802 return buildVScale(Res, *CI);
803}
804
806 const ConstantInt &MinElts) {
807 auto VScale = buildInstr(TargetOpcode::G_VSCALE);
808 VScale->setDebugLoc(DebugLoc());
809 Res.addDefToMIB(*getMRI(), VScale);
810 VScale.addCImm(&MinElts);
811 return VScale;
812}
813
815 const APInt &MinElts) {
816 ConstantInt *CI =
817 ConstantInt::get(getMF().getFunction().getContext(), MinElts);
818 return buildVScale(Res, *CI);
819}
820
821static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
822 if (HasSideEffects && IsConvergent)
823 return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
824 if (HasSideEffects)
825 return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
826 if (IsConvergent)
827 return TargetOpcode::G_INTRINSIC_CONVERGENT;
828 return TargetOpcode::G_INTRINSIC;
829}
830
833 ArrayRef<Register> ResultRegs,
834 bool HasSideEffects, bool isConvergent) {
835 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
836 for (unsigned ResultReg : ResultRegs)
837 MIB.addDef(ResultReg);
838 MIB.addIntrinsicID(ID);
839 return MIB;
840}
841
844 ArrayRef<Register> ResultRegs) {
845 auto Attrs = Intrinsic::getAttributes(getContext(), ID);
846 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
847 bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
848 return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
849}
853 bool HasSideEffects,
854 bool isConvergent) {
855 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
856 for (DstOp Result : Results)
857 Result.addDefToMIB(*getMRI(), MIB);
858 MIB.addIntrinsicID(ID);
859 return MIB;
860}
864 auto Attrs = Intrinsic::getAttributes(getContext(), ID);
865 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
866 bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
867 return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
868}
869
871 const SrcOp &Op) {
872 return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
874
877 std::optional<unsigned> Flags) {
878 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
879}
880
882 const DstOp &Res,
883 const SrcOp &Op0,
884 const SrcOp &Op1) {
885 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
886}
887
889 const DstOp &Res,
890 const SrcOp &Op0,
891 const SrcOp &Op1,
892 std::optional<unsigned> Flags) {
893
894 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
895}
896
899 const SrcOp &Op0, const SrcOp &Op1,
900 std::optional<unsigned> Flags) {
901
902 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
903}
904
906 const SrcOp &Src0,
907 const SrcOp &Src1,
908 unsigned Idx) {
909 return buildInstr(TargetOpcode::G_INSERT_SUBVECTOR, Res,
910 {Src0, Src1, uint64_t(Idx)});
911}
912
914 const SrcOp &Src,
915 unsigned Idx) {
916 return buildInstr(TargetOpcode::G_INSERT_SUBVECTOR, Res,
917 {Src, uint64_t(Idx)});
918}
919
922 const SrcOp &Elt, const SrcOp &Idx) {
923 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
924}
925
928 const SrcOp &Idx) {
929 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
930}
931
933 const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr,
934 const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO) {
935#ifndef NDEBUG
936 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
937 LLT SuccessResTy = SuccessRes.getLLTTy(*getMRI());
938 LLT AddrTy = Addr.getLLTTy(*getMRI());
939 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
940 LLT NewValTy = NewVal.getLLTTy(*getMRI());
941 assert(OldValResTy.isScalar() && "invalid operand type");
942 assert(SuccessResTy.isScalar() && "invalid operand type");
943 assert(AddrTy.isPointer() && "invalid operand type");
944 assert(CmpValTy.isValid() && "invalid operand type");
945 assert(NewValTy.isValid() && "invalid operand type");
946 assert(OldValResTy == CmpValTy && "type mismatch");
947 assert(OldValResTy == NewValTy && "type mismatch");
948#endif
949
950 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS);
951 OldValRes.addDefToMIB(*getMRI(), MIB);
952 SuccessRes.addDefToMIB(*getMRI(), MIB);
953 Addr.addSrcToMIB(MIB);
954 CmpVal.addSrcToMIB(MIB);
955 NewVal.addSrcToMIB(MIB);
956 MIB.addMemOperand(&MMO);
957 return MIB;
958}
959
962 const SrcOp &CmpVal, const SrcOp &NewVal,
963 MachineMemOperand &MMO) {
964#ifndef NDEBUG
965 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
966 LLT AddrTy = Addr.getLLTTy(*getMRI());
967 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
968 LLT NewValTy = NewVal.getLLTTy(*getMRI());
969 assert(OldValResTy.isScalar() && "invalid operand type");
970 assert(AddrTy.isPointer() && "invalid operand type");
971 assert(CmpValTy.isValid() && "invalid operand type");
972 assert(NewValTy.isValid() && "invalid operand type");
973 assert(OldValResTy == CmpValTy && "type mismatch");
974 assert(OldValResTy == NewValTy && "type mismatch");
975#endif
976
977 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG);
978 OldValRes.addDefToMIB(*getMRI(), MIB);
979 Addr.addSrcToMIB(MIB);
980 CmpVal.addSrcToMIB(MIB);
981 NewVal.addSrcToMIB(MIB);
982 MIB.addMemOperand(&MMO);
983 return MIB;
984}
985
987 unsigned Opcode, const DstOp &OldValRes,
988 const SrcOp &Addr, const SrcOp &Val,
989 MachineMemOperand &MMO) {
990
991#ifndef NDEBUG
992 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
993 LLT AddrTy = Addr.getLLTTy(*getMRI());
994 LLT ValTy = Val.getLLTTy(*getMRI());
995 assert(OldValResTy.isScalar() && "invalid operand type");
996 assert(AddrTy.isPointer() && "invalid operand type");
997 assert(ValTy.isValid() && "invalid operand type");
998 assert(OldValResTy == ValTy && "type mismatch");
999 assert(MMO.isAtomic() && "not atomic mem operand");
1000#endif
1001
1002 auto MIB = buildInstr(Opcode);
1003 OldValRes.addDefToMIB(*getMRI(), MIB);
1004 Addr.addSrcToMIB(MIB);
1005 Val.addSrcToMIB(MIB);
1006 MIB.addMemOperand(&MMO);
1007 return MIB;
1008}
1009
1012 Register Val, MachineMemOperand &MMO) {
1013 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
1014 MMO);
1015}
1018 Register Val, MachineMemOperand &MMO) {
1019 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
1020 MMO);
1021}
1024 Register Val, MachineMemOperand &MMO) {
1025 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
1026 MMO);
1027}
1030 Register Val, MachineMemOperand &MMO) {
1031 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
1032 MMO);
1033}
1036 Register Val, MachineMemOperand &MMO) {
1037 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
1038 MMO);
1039}
1041 Register Addr,
1042 Register Val,
1043 MachineMemOperand &MMO) {
1044 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
1045 MMO);
1046}
1049 Register Val, MachineMemOperand &MMO) {
1050 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
1051 MMO);
1052}
1055 Register Val, MachineMemOperand &MMO) {
1056 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
1057 MMO);
1058}
1061 Register Val, MachineMemOperand &MMO) {
1062 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1063 MMO);
1064}
1067 Register Val, MachineMemOperand &MMO) {
1068 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1069 MMO);
1070}
1073 Register Val, MachineMemOperand &MMO) {
1074 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1075 MMO);
1076}
1077
1080 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1081 MachineMemOperand &MMO) {
1082 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1083 MMO);
1084}
1085
1087MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1088 MachineMemOperand &MMO) {
1089 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1090 MMO);
1091}
1092
1095 const SrcOp &Val, MachineMemOperand &MMO) {
1096 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1097 MMO);
1098}
1099
1102 const SrcOp &Val, MachineMemOperand &MMO) {
1103 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1104 MMO);
1105}
1106
1108MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1109 return buildInstr(TargetOpcode::G_FENCE)
1110 .addImm(Ordering)
1111 .addImm(Scope);
1112}
1113
1115 unsigned RW,
1116 unsigned Locality,
1117 unsigned CacheType,
1118 MachineMemOperand &MMO) {
1119 auto MIB = buildInstr(TargetOpcode::G_PREFETCH);
1120 Addr.addSrcToMIB(MIB);
1121 MIB.addImm(RW).addImm(Locality).addImm(CacheType);
1122 MIB.addMemOperand(&MMO);
1123 return MIB;
1124}
1125
1128#ifndef NDEBUG
1129 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1130#endif
1131
1132 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
1133}
1134
1135void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1136 bool IsExtend) {
1137#ifndef NDEBUG
1138 if (DstTy.isVector()) {
1139 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1140 assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
1141 "different number of elements in a trunc/ext");
1142 } else
1143 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1144
1145 if (IsExtend)
1147 "invalid narrowing extend");
1148 else
1150 "invalid widening trunc");
1151#endif
1152}
1153
1154void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1155 const LLT Op0Ty, const LLT Op1Ty) {
1156#ifndef NDEBUG
1157 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1158 "invalid operand type");
1159 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1160 if (ResTy.isScalar() || ResTy.isPointer())
1161 assert(TstTy.isScalar() && "type mismatch");
1162 else
1163 assert((TstTy.isScalar() ||
1164 (TstTy.isVector() &&
1165 TstTy.getElementCount() == Op0Ty.getElementCount())) &&
1166 "type mismatch");
1167#endif
1168}
1169
1172 ArrayRef<SrcOp> SrcOps,
1173 std::optional<unsigned> Flags) {
1174 switch (Opc) {
1175 default:
1176 break;
1177 case TargetOpcode::G_SELECT: {
1178 assert(DstOps.size() == 1 && "Invalid select");
1179 assert(SrcOps.size() == 3 && "Invalid select");
1181 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1182 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1183 break;
1184 }
1185 case TargetOpcode::G_FNEG:
1186 case TargetOpcode::G_ABS:
1187 // All these are unary ops.
1188 assert(DstOps.size() == 1 && "Invalid Dst");
1189 assert(SrcOps.size() == 1 && "Invalid Srcs");
1190 validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1191 SrcOps[0].getLLTTy(*getMRI()));
1192 break;
1193 case TargetOpcode::G_ADD:
1194 case TargetOpcode::G_AND:
1195 case TargetOpcode::G_MUL:
1196 case TargetOpcode::G_OR:
1197 case TargetOpcode::G_SUB:
1198 case TargetOpcode::G_XOR:
1199 case TargetOpcode::G_UDIV:
1200 case TargetOpcode::G_SDIV:
1201 case TargetOpcode::G_UREM:
1202 case TargetOpcode::G_SREM:
1203 case TargetOpcode::G_SMIN:
1204 case TargetOpcode::G_SMAX:
1205 case TargetOpcode::G_UMIN:
1206 case TargetOpcode::G_UMAX:
1207 case TargetOpcode::G_UADDSAT:
1208 case TargetOpcode::G_SADDSAT:
1209 case TargetOpcode::G_USUBSAT:
1210 case TargetOpcode::G_SSUBSAT: {
1211 // All these are binary ops.
1212 assert(DstOps.size() == 1 && "Invalid Dst");
1213 assert(SrcOps.size() == 2 && "Invalid Srcs");
1214 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1215 SrcOps[0].getLLTTy(*getMRI()),
1216 SrcOps[1].getLLTTy(*getMRI()));
1217 break;
1218 }
1219 case TargetOpcode::G_SHL:
1220 case TargetOpcode::G_ASHR:
1221 case TargetOpcode::G_LSHR:
1222 case TargetOpcode::G_USHLSAT:
1223 case TargetOpcode::G_SSHLSAT: {
1224 assert(DstOps.size() == 1 && "Invalid Dst");
1225 assert(SrcOps.size() == 2 && "Invalid Srcs");
1226 validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1227 SrcOps[0].getLLTTy(*getMRI()),
1228 SrcOps[1].getLLTTy(*getMRI()));
1229 break;
1230 }
1231 case TargetOpcode::G_SEXT:
1232 case TargetOpcode::G_ZEXT:
1233 case TargetOpcode::G_ANYEXT:
1234 assert(DstOps.size() == 1 && "Invalid Dst");
1235 assert(SrcOps.size() == 1 && "Invalid Srcs");
1236 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1237 SrcOps[0].getLLTTy(*getMRI()), true);
1238 break;
1239 case TargetOpcode::G_TRUNC:
1240 case TargetOpcode::G_FPTRUNC: {
1241 assert(DstOps.size() == 1 && "Invalid Dst");
1242 assert(SrcOps.size() == 1 && "Invalid Srcs");
1243 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1244 SrcOps[0].getLLTTy(*getMRI()), false);
1245 break;
1246 }
1247 case TargetOpcode::G_BITCAST: {
1248 assert(DstOps.size() == 1 && "Invalid Dst");
1249 assert(SrcOps.size() == 1 && "Invalid Srcs");
1250 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1251 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1252 break;
1253 }
1254 case TargetOpcode::COPY:
1255 assert(DstOps.size() == 1 && "Invalid Dst");
1256 // If the caller wants to add a subreg source it has to be done separately
1257 // so we may not have any SrcOps at this point yet.
1258 break;
1259 case TargetOpcode::G_FCMP:
1260 case TargetOpcode::G_ICMP: {
1261 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1262 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1263 // For F/ICMP, the first src operand is the predicate, followed by
1264 // the two comparands.
1265 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1266 "Expecting predicate");
1267 assert([&]() -> bool {
1268 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1269 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1270 : CmpInst::isFPPredicate(Pred);
1271 }() && "Invalid predicate");
1272 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1273 "Type mismatch");
1274 assert([&]() -> bool {
1275 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1276 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1277 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1278 return DstTy.isScalar();
1279 else
1280 return DstTy.isVector() &&
1281 DstTy.getElementCount() == Op0Ty.getElementCount();
1282 }() && "Type Mismatch");
1283 break;
1284 }
1285 case TargetOpcode::G_UNMERGE_VALUES: {
1286 assert(!DstOps.empty() && "Invalid trivial sequence");
1287 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1288 assert(llvm::all_of(DstOps,
1289 [&, this](const DstOp &Op) {
1290 return Op.getLLTTy(*getMRI()) ==
1291 DstOps[0].getLLTTy(*getMRI());
1292 }) &&
1293 "type mismatch in output list");
1294 assert((TypeSize::ScalarTy)DstOps.size() *
1295 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1296 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1297 "input operands do not cover output register");
1298 break;
1299 }
1300 case TargetOpcode::G_MERGE_VALUES: {
1301 assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1302 assert(DstOps.size() == 1 && "Invalid Dst");
1303 assert(llvm::all_of(SrcOps,
1304 [&, this](const SrcOp &Op) {
1305 return Op.getLLTTy(*getMRI()) ==
1306 SrcOps[0].getLLTTy(*getMRI());
1307 }) &&
1308 "type mismatch in input list");
1309 assert((TypeSize::ScalarTy)SrcOps.size() *
1310 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1311 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1312 "input operands do not cover output register");
1313 assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1314 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1315 break;
1316 }
1317 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1318 assert(DstOps.size() == 1 && "Invalid Dst size");
1319 assert(SrcOps.size() == 2 && "Invalid Src size");
1320 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1321 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1322 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1323 "Invalid operand type");
1324 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1325 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1326 DstOps[0].getLLTTy(*getMRI()) &&
1327 "Type mismatch");
1328 break;
1329 }
1330 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1331 assert(DstOps.size() == 1 && "Invalid dst size");
1332 assert(SrcOps.size() == 3 && "Invalid src size");
1333 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1334 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1335 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1336 SrcOps[1].getLLTTy(*getMRI()) &&
1337 "Type mismatch");
1338 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1339 assert(DstOps[0].getLLTTy(*getMRI()).getElementCount() ==
1340 SrcOps[0].getLLTTy(*getMRI()).getElementCount() &&
1341 "Type mismatch");
1342 break;
1343 }
1344 case TargetOpcode::G_BUILD_VECTOR: {
1345 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1346 "Must have at least 2 operands");
1347 assert(DstOps.size() == 1 && "Invalid DstOps");
1348 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1349 "Res type must be a vector");
1350 assert(llvm::all_of(SrcOps,
1351 [&, this](const SrcOp &Op) {
1352 return Op.getLLTTy(*getMRI()) ==
1353 SrcOps[0].getLLTTy(*getMRI());
1354 }) &&
1355 "type mismatch in input list");
1356 assert((TypeSize::ScalarTy)SrcOps.size() *
1357 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1358 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1359 "input scalars do not exactly cover the output vector register");
1360 break;
1361 }
1362 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1363 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1364 "Must have at least 2 operands");
1365 assert(DstOps.size() == 1 && "Invalid DstOps");
1366 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1367 "Res type must be a vector");
1368 assert(llvm::all_of(SrcOps,
1369 [&, this](const SrcOp &Op) {
1370 return Op.getLLTTy(*getMRI()) ==
1371 SrcOps[0].getLLTTy(*getMRI());
1372 }) &&
1373 "type mismatch in input list");
1374 break;
1375 }
1376 case TargetOpcode::G_CONCAT_VECTORS: {
1377 assert(DstOps.size() == 1 && "Invalid DstOps");
1378 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1379 "Must have at least 2 operands");
1380 assert(llvm::all_of(SrcOps,
1381 [&, this](const SrcOp &Op) {
1382 return (Op.getLLTTy(*getMRI()).isVector() &&
1383 Op.getLLTTy(*getMRI()) ==
1384 SrcOps[0].getLLTTy(*getMRI()));
1385 }) &&
1386 "type mismatch in input list");
1387 assert((TypeSize::ScalarTy)SrcOps.size() *
1388 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1389 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1390 "input vectors do not exactly cover the output vector register");
1391 break;
1392 }
1393 case TargetOpcode::G_UADDE: {
1394 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1395 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1396 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1397 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1398 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1399 "Invalid operand");
1400 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1401 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1402 "type mismatch");
1403 break;
1404 }
1405 }
1406
1407 auto MIB = buildInstr(Opc);
1408 for (const DstOp &Op : DstOps)
1409 Op.addDefToMIB(*getMRI(), MIB);
1410 for (const SrcOp &Op : SrcOps)
1411 Op.addSrcToMIB(MIB);
1412 if (Flags)
1413 MIB->setFlags(*Flags);
1414 return MIB;
1415}
Function Alias Analysis Results
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Addr
uint64_t Size
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:236
static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent)
This file declares the MachineIRBuilder class.
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getScalarSizeInBits(Type *Ty)
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
This file describes how to lower LLVM code to machine code.
const fltSemantics & getSemantics() const
Definition: APFloat.h:1303
Class for arbitrary precision integers.
Definition: APInt.h:76
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:284
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:154
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
iterator begin() const
Definition: ArrayRef.h:153
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
The address of a basic block.
Definition: Constants.h:889
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:966
bool isFPPredicate() const
Definition: InstrTypes.h:1095
bool isIntPredicate() const
Definition: InstrTypes.h:1096
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:268
const APFloat & getValueAPF() const
Definition: Constants.h:311
This is the shared class of boolean and integer constants.
Definition: Constants.h:80
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition: Constants.h:148
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
LLT getLLTTy(const MachineRegisterInfo &MRI) const
Register getReg() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:356
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:182
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:267
constexpr bool isScalar() const
Definition: LowLevelType.h:146
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr bool isValid() const
Definition: LowLevelType.h:145
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:159
constexpr bool isVector() const
Definition: LowLevelType.h:148
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:193
constexpr bool isPointer() const
Definition: LowLevelType.h:149
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:290
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:184
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
Definition: LowLevelType.h:178
constexpr LLT getScalarType() const
Definition: LowLevelType.h:208
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Metadata node.
Definition: Metadata.h:1067
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBundleIterator< MachineInstr > iterator
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineInstrBuilder buildLoadFromOffset(const DstOp &Dst, const SrcOp &BasePtr, MachineMemOperand &BaseMMO, int64_t Offset)
Helper to create a load from a constant offset given a base address.
MachineInstrBuilder buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMIN Addr, Val, MMO.
MachineInstrBuilder buildBoolExtInReg(const DstOp &Res, const SrcOp &Op, bool IsVector, bool IsFP)
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
MachineInstrBuilder buildAtomicRMWXor(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO.
MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
LLVMContext & getContext() const
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildConstantPool(const DstOp &Res, unsigned Idx)
Build and insert Res = G_CONSTANT_POOL Idx.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op, bool IsFP)
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildAtomicRMWAnd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO.
MachineInstrBuilder buildZExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and inserts Res = G_AND Op, LowBitsSet(ImmOp) Since there is no G_ZEXT_INREG like G_SEXT_INREG,...
MachineInstrBuilder buildAtomicRMWMin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildAtomicRMWFAdd(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FADD Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWNand(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO.
MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Res = COPY Op depending on the differing sizes of Res and Op.
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src)
Build and insert a vector splat of a scalar Src using a G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idio...
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MDNode * getPCSections()
Get the current instruction's PC sections metadata.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
unsigned getBoolExtOp(bool IsVec, bool IsFP) const
MachineInstrBuilder buildAtomicRMWUmax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO.
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
void recordInsertion(MachineInstr *InsertedInstr) const
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR_TRUNC Op0, ...
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a, b, .....
void validateSelectOp(const LLT ResTy, const LLT TstTy, const LLT Op0Ty, const LLT Op1Ty)
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const DebugLoc & getDL()
Getter for DebugLoc.
MachineInstrBuilder buildBuildVectorConstant(const DstOp &Res, ArrayRef< APInt > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ... where each OpN is built with G_CONSTANT.
MachineInstrBuilder buildAtomicRMWUmin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO.
void validateBinaryOp(const LLT Res, const LLT Op0, const LLT Op1)
void validateShiftOp(const LLT Res, const LLT Op0, const LLT Op1)
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildInsert(const DstOp &Res, const SrcOp &Src, const SrcOp &Op, unsigned Index)
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildAtomicRMWSub(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO.
MachineInstrBuilder buildMergeValues(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
MachineInstrBuilder buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAX Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWOr(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
MachineInstrBuilder buildDeleteTrailingVectorElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x, y, z = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a,...
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildAtomicRMWAdd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildAtomicCmpXchg(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal, MMO.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
void validateTruncExt(const LLT Dst, const LLT Src, bool IsExtend)
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_PTRMASK Op0, Op1.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
void validateUnaryOp(const LLT Res, const LLT Op0)
MachineInstrBuilder buildBlockAddress(Register Res, const BlockAddress *BA)
Build and insert Res = G_BLOCK_ADDR BA.
MachineInstrBuilder buildAtomicRMWMax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
void setMF(MachineFunction &MF)
MachineInstrBuilder buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FSUB Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXchg(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO.
MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0, uint32_t NumBits)
Build and insert Res = G_PTRMASK Op0, G_CONSTANT (1 << NumBits) - 1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and insert Res = G_SEXT_INREG Op, ImmOp.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addBlockAddress(const BlockAddress *BA, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
A description of a memory reference used in the backend.
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:679
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
size_t size() const
Definition: SmallVector.h:91
void reserve(size_type N)
Definition: SmallVector.h:676
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
LLT getLLTTy(const MachineRegisterInfo &MRI) const
void addSrcToMIB(MachineInstrBuilder &MIB) const
Register getReg() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
LLVM Value Representation.
Definition: Value.h:74
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:203
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:210
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
DWARFExpression::Operation Op
APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition: Utils.cpp:631
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:760
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:331
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
MachineFunction * MF
MachineFunction under construction.
DebugLoc DL
Debug location to be set to any instruction we create.
const TargetInstrInfo * TII
Information used to access the description of the opcodes.
MDNode * PCSections
PC sections metadata to be set to any instruction we create.
MachineBasicBlock::iterator II
MachineRegisterInfo * MRI
Information used to verify types are consistent and to create virtual registers.
GISelChangeObserver * Observer
This class contains a discriminated union of information about pointers in memory operands,...