LLVM 19.0.0git
MachineIRBuilder.cpp
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the MachineIRBuidler class.
10//===----------------------------------------------------------------------===//
21
22using namespace llvm;
23
25 State.MF = &MF;
26 State.MBB = nullptr;
27 State.MRI = &MF.getRegInfo();
28 State.TII = MF.getSubtarget().getInstrInfo();
29 State.DL = DebugLoc();
30 State.PCSections = nullptr;
32 State.Observer = nullptr;
33}
34
35//------------------------------------------------------------------------------
36// Build instruction variants.
37//------------------------------------------------------------------------------
38
40 return BuildMI(getMF(), {getDL(), getPCSections()}, getTII().get(Opcode));
41}
42
44 getMBB().insert(getInsertPt(), MIB);
45 recordInsertion(MIB);
46 return MIB;
47}
48
51 const MDNode *Expr) {
52 assert(isa<DILocalVariable>(Variable) && "not a variable");
53 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
54 assert(
55 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
56 "Expected inlined-at fields to agree");
57 return insertInstr(BuildMI(getMF(), getDL(),
58 getTII().get(TargetOpcode::DBG_VALUE),
59 /*IsIndirect*/ false, Reg, Variable, Expr));
60}
61
64 const MDNode *Expr) {
65 assert(isa<DILocalVariable>(Variable) && "not a variable");
66 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
67 assert(
68 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
69 "Expected inlined-at fields to agree");
70 return insertInstr(BuildMI(getMF(), getDL(),
71 getTII().get(TargetOpcode::DBG_VALUE),
72 /*IsIndirect*/ true, Reg, Variable, Expr));
73}
74
76 const MDNode *Variable,
77 const MDNode *Expr) {
78 assert(isa<DILocalVariable>(Variable) && "not a variable");
79 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
80 assert(
81 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
82 "Expected inlined-at fields to agree");
83 return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE)
84 .addFrameIndex(FI)
85 .addImm(0)
86 .addMetadata(Variable)
87 .addMetadata(Expr));
88}
89
91 const MDNode *Variable,
92 const MDNode *Expr) {
93 assert(isa<DILocalVariable>(Variable) && "not a variable");
94 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
95 assert(
96 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
97 "Expected inlined-at fields to agree");
98 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
99
100 auto *NumericConstant = [&] () -> const Constant* {
101 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
102 if (CE->getOpcode() == Instruction::IntToPtr)
103 return CE->getOperand(0);
104 return &C;
105 }();
106
107 if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {
108 if (CI->getBitWidth() > 64)
109 MIB.addCImm(CI);
110 else
111 MIB.addImm(CI->getZExtValue());
112 } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {
113 MIB.addFPImm(CFP);
114 } else if (isa<ConstantPointerNull>(NumericConstant)) {
115 MIB.addImm(0);
116 } else {
117 // Insert $noreg if we didn't find a usable constant and had to drop it.
118 MIB.addReg(Register());
119 }
120
121 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
122 return insertInstr(MIB);
123}
124
126 assert(isa<DILabel>(Label) && "not a label");
127 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
128 "Expected inlined-at fields to agree");
129 auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
130
131 return MIB.addMetadata(Label);
132}
133
135 const SrcOp &Size,
136 Align Alignment) {
137 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
138 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
139 Res.addDefToMIB(*getMRI(), MIB);
140 Size.addSrcToMIB(MIB);
141 MIB.addImm(Alignment.value());
142 return MIB;
143}
144
146 int Idx) {
147 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
148 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
149 Res.addDefToMIB(*getMRI(), MIB);
150 MIB.addFrameIndex(Idx);
151 return MIB;
152}
153
155 const GlobalValue *GV) {
156 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
157 assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
158 GV->getType()->getAddressSpace() &&
159 "address space mismatch");
160
161 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
162 Res.addDefToMIB(*getMRI(), MIB);
163 MIB.addGlobalAddress(GV);
164 return MIB;
165}
166
168 unsigned Idx) {
169 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
170 auto MIB = buildInstr(TargetOpcode::G_CONSTANT_POOL);
171 Res.addDefToMIB(*getMRI(), MIB);
172 MIB.addConstantPoolIndex(Idx);
173 return MIB;
174}
175
177 unsigned JTI) {
178 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
179 .addJumpTableIndex(JTI);
180}
181
182void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
183 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
184 assert((Res == Op0) && "type mismatch");
185}
186
188 const LLT Op1) {
189 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
190 assert((Res == Op0 && Res == Op1) && "type mismatch");
191}
192
193void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
194 const LLT Op1) {
195 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
196 assert((Res == Op0) && "type mismatch");
197}
198
201 const SrcOp &Op1, std::optional<unsigned> Flags) {
202 assert(Res.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
203 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
204 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
205
206 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}, Flags);
207}
208
209std::optional<MachineInstrBuilder>
211 const LLT ValueTy, uint64_t Value) {
212 assert(Res == 0 && "Res is a result argument");
213 assert(ValueTy.isScalar() && "invalid offset type");
214
215 if (Value == 0) {
216 Res = Op0;
217 return std::nullopt;
218 }
219
221 auto Cst = buildConstant(ValueTy, Value);
222 return buildPtrAdd(Res, Op0, Cst.getReg(0));
223}
224
226 const SrcOp &Op0,
227 uint32_t NumBits) {
228 LLT PtrTy = Res.getLLTTy(*getMRI());
229 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
230 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
231 buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
232 return buildPtrMask(Res, Op0, MaskReg);
233}
234
237 const SrcOp &Op0) {
238 LLT ResTy = Res.getLLTTy(*getMRI());
239 LLT Op0Ty = Op0.getLLTTy(*getMRI());
240
241 assert(ResTy.isVector() && "Res non vector type");
242
244 if (Op0Ty.isVector()) {
245 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
246 "Different vector element types");
247 assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
248 "Op0 has more elements");
249 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
250
251 for (auto Op : Unmerge.getInstr()->defs())
252 Regs.push_back(Op.getReg());
253 } else {
254 assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
255 "Op0 has more size");
256 Regs.push_back(Op0.getReg());
257 }
258 Register Undef =
259 buildUndef(Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(0);
260 unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
261 for (unsigned i = 0; i < NumberOfPadElts; ++i)
262 Regs.push_back(Undef);
263 return buildMergeLikeInstr(Res, Regs);
264}
265
268 const SrcOp &Op0) {
269 LLT ResTy = Res.getLLTTy(*getMRI());
270 LLT Op0Ty = Op0.getLLTTy(*getMRI());
271
272 assert(Op0Ty.isVector() && "Non vector type");
273 assert(((ResTy.isScalar() && (ResTy == Op0Ty.getElementType())) ||
274 (ResTy.isVector() &&
275 (ResTy.getElementType() == Op0Ty.getElementType()))) &&
276 "Different vector element types");
277 assert(
278 (ResTy.isScalar() || (ResTy.getNumElements() < Op0Ty.getNumElements())) &&
279 "Op0 has fewer elements");
280
281 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
282 if (ResTy.isScalar())
283 return buildCopy(Res, Unmerge.getReg(0));
285 for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
286 Regs.push_back(Unmerge.getReg(i));
287 return buildMergeLikeInstr(Res, Regs);
288}
289
291 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
292}
293
295 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
296 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
297}
298
300 unsigned JTI,
301 Register IndexReg) {
302 assert(getMRI()->getType(TablePtr).isPointer() &&
303 "Table reg must be a pointer");
304 return buildInstr(TargetOpcode::G_BRJT)
305 .addUse(TablePtr)
307 .addUse(IndexReg);
308}
309
311 const SrcOp &Op) {
312 return buildInstr(TargetOpcode::COPY, Res, Op);
313}
314
316 const ConstantInt &Val) {
317 LLT Ty = Res.getLLTTy(*getMRI());
318 LLT EltTy = Ty.getScalarType();
319 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
320 "creating constant with the wrong size");
321
322 assert(!Ty.isScalableVector() &&
323 "unexpected scalable vector in buildConstant");
324
325 if (Ty.isFixedVector()) {
326 auto Const = buildInstr(TargetOpcode::G_CONSTANT)
327 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
328 .addCImm(&Val);
329 return buildSplatBuildVector(Res, Const);
330 }
331
332 auto Const = buildInstr(TargetOpcode::G_CONSTANT);
333 Const->setDebugLoc(DebugLoc());
334 Res.addDefToMIB(*getMRI(), Const);
335 Const.addCImm(&Val);
336 return Const;
337}
338
340 int64_t Val) {
343 ConstantInt *CI = ConstantInt::get(IntN, Val, true);
344 return buildConstant(Res, *CI);
345}
346
348 const ConstantFP &Val) {
349 LLT Ty = Res.getLLTTy(*getMRI());
350 LLT EltTy = Ty.getScalarType();
351
353 == EltTy.getSizeInBits() &&
354 "creating fconstant with the wrong size");
355
356 assert(!Ty.isPointer() && "invalid operand type");
357
358 assert(!Ty.isScalableVector() &&
359 "unexpected scalable vector in buildFConstant");
360
361 if (Ty.isFixedVector()) {
362 auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
363 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
364 .addFPImm(&Val);
365
366 return buildSplatBuildVector(Res, Const);
367 }
368
369 auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
370 Const->setDebugLoc(DebugLoc());
371 Res.addDefToMIB(*getMRI(), Const);
372 Const.addFPImm(&Val);
373 return Const;
374}
375
377 const APInt &Val) {
378 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
379 return buildConstant(Res, *CI);
380}
381
383 double Val) {
384 LLT DstTy = Res.getLLTTy(*getMRI());
385 auto &Ctx = getMF().getFunction().getContext();
386 auto *CFP =
387 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
388 return buildFConstant(Res, *CFP);
389}
390
392 const APFloat &Val) {
393 auto &Ctx = getMF().getFunction().getContext();
394 auto *CFP = ConstantFP::get(Ctx, Val);
395 return buildFConstant(Res, *CFP);
396}
397
399 MachineBasicBlock &Dest) {
400 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
401
402 auto MIB = buildInstr(TargetOpcode::G_BRCOND);
403 Tst.addSrcToMIB(MIB);
404 MIB.addMBB(&Dest);
405 return MIB;
406}
407
410 MachinePointerInfo PtrInfo, Align Alignment,
412 const AAMDNodes &AAInfo) {
413 MMOFlags |= MachineMemOperand::MOLoad;
414 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
415
416 LLT Ty = Dst.getLLTTy(*getMRI());
417 MachineMemOperand *MMO =
418 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
419 return buildLoad(Dst, Addr, *MMO);
420}
421
423 const DstOp &Res,
424 const SrcOp &Addr,
425 MachineMemOperand &MMO) {
426 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
427 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
428
429 auto MIB = buildInstr(Opcode);
430 Res.addDefToMIB(*getMRI(), MIB);
431 Addr.addSrcToMIB(MIB);
432 MIB.addMemOperand(&MMO);
433 return MIB;
434}
435
437 const DstOp &Dst, const SrcOp &BasePtr,
438 MachineMemOperand &BaseMMO, int64_t Offset) {
439 LLT LoadTy = Dst.getLLTTy(*getMRI());
440 MachineMemOperand *OffsetMMO =
441 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
442
443 if (Offset == 0) // This may be a size or type changing load.
444 return buildLoad(Dst, BasePtr, *OffsetMMO);
445
446 LLT PtrTy = BasePtr.getLLTTy(*getMRI());
447 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
448 auto ConstOffset = buildConstant(OffsetTy, Offset);
449 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
450 return buildLoad(Dst, Ptr, *OffsetMMO);
451}
452
454 const SrcOp &Addr,
455 MachineMemOperand &MMO) {
456 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
457 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
458
459 auto MIB = buildInstr(TargetOpcode::G_STORE);
460 Val.addSrcToMIB(MIB);
461 Addr.addSrcToMIB(MIB);
462 MIB.addMemOperand(&MMO);
463 return MIB;
464}
465
468 MachinePointerInfo PtrInfo, Align Alignment,
470 const AAMDNodes &AAInfo) {
471 MMOFlags |= MachineMemOperand::MOStore;
472 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
473
474 LLT Ty = Val.getLLTTy(*getMRI());
475 MachineMemOperand *MMO =
476 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
477 return buildStore(Val, Addr, *MMO);
478}
479
481 const SrcOp &Op) {
482 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
483}
484
486 const SrcOp &Op) {
487 return buildInstr(TargetOpcode::G_SEXT, Res, Op);
488}
489
491 const SrcOp &Op) {
492 return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
493}
494
495unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
496 const auto *TLI = getMF().getSubtarget().getTargetLowering();
497 switch (TLI->getBooleanContents(IsVec, IsFP)) {
499 return TargetOpcode::G_SEXT;
501 return TargetOpcode::G_ZEXT;
502 default:
503 return TargetOpcode::G_ANYEXT;
504 }
505}
506
508 const SrcOp &Op,
509 bool IsFP) {
510 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
511 return buildInstr(ExtOp, Res, Op);
512}
513
515 const SrcOp &Op,
516 bool IsVector,
517 bool IsFP) {
518 const auto *TLI = getMF().getSubtarget().getTargetLowering();
519 switch (TLI->getBooleanContents(IsVector, IsFP)) {
521 return buildSExtInReg(Res, Op, 1);
523 return buildZExtInReg(Res, Op, 1);
525 return buildCopy(Res, Op);
526 }
527
528 llvm_unreachable("unexpected BooleanContent");
529}
530
532 const DstOp &Res,
533 const SrcOp &Op) {
534 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
535 TargetOpcode::G_SEXT == ExtOpc) &&
536 "Expecting Extending Opc");
537 assert(Res.getLLTTy(*getMRI()).isScalar() ||
538 Res.getLLTTy(*getMRI()).isVector());
539 assert(Res.getLLTTy(*getMRI()).isScalar() ==
540 Op.getLLTTy(*getMRI()).isScalar());
541
542 unsigned Opcode = TargetOpcode::COPY;
543 if (Res.getLLTTy(*getMRI()).getSizeInBits() >
544 Op.getLLTTy(*getMRI()).getSizeInBits())
545 Opcode = ExtOpc;
546 else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
547 Op.getLLTTy(*getMRI()).getSizeInBits())
548 Opcode = TargetOpcode::G_TRUNC;
549 else
550 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
551
552 return buildInstr(Opcode, Res, Op);
553}
554
556 const SrcOp &Op) {
557 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
558}
559
561 const SrcOp &Op) {
562 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
563}
564
566 const SrcOp &Op) {
567 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
568}
569
571 const SrcOp &Op,
572 int64_t ImmOp) {
573 LLT ResTy = Res.getLLTTy(*getMRI());
574 auto Mask = buildConstant(
575 ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
576 return buildAnd(Res, Op, Mask);
577}
578
580 const SrcOp &Src) {
581 LLT SrcTy = Src.getLLTTy(*getMRI());
582 LLT DstTy = Dst.getLLTTy(*getMRI());
583 if (SrcTy == DstTy)
584 return buildCopy(Dst, Src);
585
586 unsigned Opcode;
587 if (SrcTy.isPointer() && DstTy.isScalar())
588 Opcode = TargetOpcode::G_PTRTOINT;
589 else if (DstTy.isPointer() && SrcTy.isScalar())
590 Opcode = TargetOpcode::G_INTTOPTR;
591 else {
592 assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
593 Opcode = TargetOpcode::G_BITCAST;
594 }
595
596 return buildInstr(Opcode, Dst, Src);
597}
598
600 const SrcOp &Src,
601 uint64_t Index) {
602 LLT SrcTy = Src.getLLTTy(*getMRI());
603 LLT DstTy = Dst.getLLTTy(*getMRI());
604
605#ifndef NDEBUG
606 assert(SrcTy.isValid() && "invalid operand type");
607 assert(DstTy.isValid() && "invalid operand type");
608 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
609 "extracting off end of register");
610#endif
611
612 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
613 assert(Index == 0 && "insertion past the end of a register");
614 return buildCast(Dst, Src);
615 }
616
617 auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
618 Dst.addDefToMIB(*getMRI(), Extract);
619 Src.addSrcToMIB(Extract);
620 Extract.addImm(Index);
621 return Extract;
622}
623
625 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
626}
627
629 ArrayRef<Register> Ops) {
630 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
631 // we need some temporary storage for the DstOp objects. Here we use a
632 // sufficiently large SmallVector to not go through the heap.
633 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
634 assert(TmpVec.size() > 1);
635 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
636}
637
640 ArrayRef<Register> Ops) {
641 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
642 // we need some temporary storage for the DstOp objects. Here we use a
643 // sufficiently large SmallVector to not go through the heap.
644 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
645 assert(TmpVec.size() > 1);
646 return buildInstr(getOpcodeForMerge(Res, TmpVec), Res, TmpVec);
647}
648
651 std::initializer_list<SrcOp> Ops) {
652 assert(Ops.size() > 1);
653 return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
654}
655
656unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
657 ArrayRef<SrcOp> SrcOps) const {
658 if (DstOp.getLLTTy(*getMRI()).isVector()) {
659 if (SrcOps[0].getLLTTy(*getMRI()).isVector())
660 return TargetOpcode::G_CONCAT_VECTORS;
661 return TargetOpcode::G_BUILD_VECTOR;
662 }
663
664 return TargetOpcode::G_MERGE_VALUES;
665}
666
668 const SrcOp &Op) {
669 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
670 // we need some temporary storage for the DstOp objects. Here we use a
671 // sufficiently large SmallVector to not go through the heap.
672 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
673 assert(TmpVec.size() > 1);
674 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
675}
676
678 const SrcOp &Op) {
679 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
680 SmallVector<DstOp, 8> TmpVec(NumReg, Res);
681 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
682}
683
685 const SrcOp &Op) {
686 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
687 // we need some temporary storage for the DstOp objects. Here we use a
688 // sufficiently large SmallVector to not go through the heap.
689 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
690 assert(TmpVec.size() > 1);
691 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
692}
693
695 ArrayRef<Register> Ops) {
696 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
697 // we need some temporary storage for the DstOp objects. Here we use a
698 // sufficiently large SmallVector to not go through the heap.
699 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
700 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
701}
702
705 ArrayRef<APInt> Ops) {
706 SmallVector<SrcOp> TmpVec;
707 TmpVec.reserve(Ops.size());
708 LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
709 for (const auto &Op : Ops)
710 TmpVec.push_back(buildConstant(EltTy, Op));
711 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
712}
713
715 const SrcOp &Src) {
716 SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
717 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
718}
719
722 ArrayRef<Register> Ops) {
723 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
724 // we need some temporary storage for the DstOp objects. Here we use a
725 // sufficiently large SmallVector to not go through the heap.
726 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
727 if (TmpVec[0].getLLTTy(*getMRI()).getSizeInBits() ==
728 Res.getLLTTy(*getMRI()).getElementType().getSizeInBits())
729 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
730 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
731}
732
734 const SrcOp &Src) {
735 LLT DstTy = Res.getLLTTy(*getMRI());
736 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
737 "Expected Src to match Dst elt ty");
738 auto UndefVec = buildUndef(DstTy);
739 auto Zero = buildConstant(LLT::scalar(64), 0);
740 auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
741 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
742 return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
743}
744
746 const SrcOp &Src) {
747 assert(Src.getLLTTy(*getMRI()) == Res.getLLTTy(*getMRI()).getElementType() &&
748 "Expected Src to match Dst elt ty");
749 return buildInstr(TargetOpcode::G_SPLAT_VECTOR, Res, Src);
750}
751
753 const SrcOp &Src1,
754 const SrcOp &Src2,
755 ArrayRef<int> Mask) {
756 LLT DstTy = Res.getLLTTy(*getMRI());
757 LLT Src1Ty = Src1.getLLTTy(*getMRI());
758 LLT Src2Ty = Src2.getLLTTy(*getMRI());
759 assert((size_t)(Src1Ty.getNumElements() + Src2Ty.getNumElements()) >=
760 Mask.size());
761 assert(DstTy.getElementType() == Src1Ty.getElementType() &&
762 DstTy.getElementType() == Src2Ty.getElementType());
763 (void)DstTy;
764 (void)Src1Ty;
765 (void)Src2Ty;
766 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
767 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
768 .addShuffleMask(MaskAlloc);
769}
770
773 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
774 // we need some temporary storage for the DstOp objects. Here we use a
775 // sufficiently large SmallVector to not go through the heap.
776 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
777 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
778}
779
781 const SrcOp &Src,
782 const SrcOp &Op,
783 unsigned Index) {
784 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
785 Res.getLLTTy(*getMRI()).getSizeInBits() &&
786 "insertion past the end of a register");
787
788 if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
789 Op.getLLTTy(*getMRI()).getSizeInBits()) {
790 return buildCast(Res, Op);
791 }
792
793 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
794}
795
797 unsigned MinElts) {
798
801 ConstantInt *CI = ConstantInt::get(IntN, MinElts);
802 return buildVScale(Res, *CI);
803}
804
806 const ConstantInt &MinElts) {
807 auto VScale = buildInstr(TargetOpcode::G_VSCALE);
808 VScale->setDebugLoc(DebugLoc());
809 Res.addDefToMIB(*getMRI(), VScale);
810 VScale.addCImm(&MinElts);
811 return VScale;
812}
813
814static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
815 if (HasSideEffects && IsConvergent)
816 return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
817 if (HasSideEffects)
818 return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
819 if (IsConvergent)
820 return TargetOpcode::G_INTRINSIC_CONVERGENT;
821 return TargetOpcode::G_INTRINSIC;
822}
823
826 ArrayRef<Register> ResultRegs,
827 bool HasSideEffects, bool isConvergent) {
828 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
829 for (unsigned ResultReg : ResultRegs)
830 MIB.addDef(ResultReg);
831 MIB.addIntrinsicID(ID);
832 return MIB;
833}
834
837 ArrayRef<Register> ResultRegs) {
838 auto Attrs = Intrinsic::getAttributes(getContext(), ID);
839 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
840 bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
841 return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
842}
843
846 bool HasSideEffects,
847 bool isConvergent) {
848 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
849 for (DstOp Result : Results)
850 Result.addDefToMIB(*getMRI(), MIB);
851 MIB.addIntrinsicID(ID);
852 return MIB;
853}
854
857 auto Attrs = Intrinsic::getAttributes(getContext(), ID);
858 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
859 bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
860 return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
862
864 const SrcOp &Op) {
865 return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
866}
867
870 std::optional<unsigned> Flags) {
871 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
873
875 const DstOp &Res,
876 const SrcOp &Op0,
877 const SrcOp &Op1) {
878 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
879}
880
882 const DstOp &Res,
883 const SrcOp &Op0,
884 const SrcOp &Op1,
885 std::optional<unsigned> Flags) {
886
887 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
888}
889
892 const SrcOp &Op0, const SrcOp &Op1,
893 std::optional<unsigned> Flags) {
894
895 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
896}
897
899 const SrcOp &Src0,
900 const SrcOp &Src1,
901 unsigned Idx) {
902 return buildInstr(TargetOpcode::G_INSERT_SUBVECTOR, Res,
903 {Src0, Src1, uint64_t(Idx)});
904}
905
907 const SrcOp &Src,
908 unsigned Idx) {
909 return buildInstr(TargetOpcode::G_INSERT_SUBVECTOR, Res,
910 {Src, uint64_t(Idx)});
911}
912
915 const SrcOp &Elt, const SrcOp &Idx) {
916 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
917}
918
921 const SrcOp &Idx) {
922 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
923}
924
926 Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
927 Register NewVal, MachineMemOperand &MMO) {
928#ifndef NDEBUG
929 LLT OldValResTy = getMRI()->getType(OldValRes);
930 LLT SuccessResTy = getMRI()->getType(SuccessRes);
931 LLT AddrTy = getMRI()->getType(Addr);
932 LLT CmpValTy = getMRI()->getType(CmpVal);
933 LLT NewValTy = getMRI()->getType(NewVal);
934 assert(OldValResTy.isScalar() && "invalid operand type");
935 assert(SuccessResTy.isScalar() && "invalid operand type");
936 assert(AddrTy.isPointer() && "invalid operand type");
937 assert(CmpValTy.isValid() && "invalid operand type");
938 assert(NewValTy.isValid() && "invalid operand type");
939 assert(OldValResTy == CmpValTy && "type mismatch");
940 assert(OldValResTy == NewValTy && "type mismatch");
941#endif
942
943 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
944 .addDef(OldValRes)
945 .addDef(SuccessRes)
946 .addUse(Addr)
947 .addUse(CmpVal)
948 .addUse(NewVal)
949 .addMemOperand(&MMO);
950}
951
954 Register CmpVal, Register NewVal,
955 MachineMemOperand &MMO) {
956#ifndef NDEBUG
957 LLT OldValResTy = getMRI()->getType(OldValRes);
958 LLT AddrTy = getMRI()->getType(Addr);
959 LLT CmpValTy = getMRI()->getType(CmpVal);
960 LLT NewValTy = getMRI()->getType(NewVal);
961 assert(OldValResTy.isScalar() && "invalid operand type");
962 assert(AddrTy.isPointer() && "invalid operand type");
963 assert(CmpValTy.isValid() && "invalid operand type");
964 assert(NewValTy.isValid() && "invalid operand type");
965 assert(OldValResTy == CmpValTy && "type mismatch");
966 assert(OldValResTy == NewValTy && "type mismatch");
967#endif
968
969 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
970 .addDef(OldValRes)
971 .addUse(Addr)
972 .addUse(CmpVal)
973 .addUse(NewVal)
974 .addMemOperand(&MMO);
975}
976
978 unsigned Opcode, const DstOp &OldValRes,
979 const SrcOp &Addr, const SrcOp &Val,
980 MachineMemOperand &MMO) {
981
982#ifndef NDEBUG
983 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
984 LLT AddrTy = Addr.getLLTTy(*getMRI());
985 LLT ValTy = Val.getLLTTy(*getMRI());
986 assert(OldValResTy.isScalar() && "invalid operand type");
987 assert(AddrTy.isPointer() && "invalid operand type");
988 assert(ValTy.isValid() && "invalid operand type");
989 assert(OldValResTy == ValTy && "type mismatch");
990 assert(MMO.isAtomic() && "not atomic mem operand");
991#endif
992
993 auto MIB = buildInstr(Opcode);
994 OldValRes.addDefToMIB(*getMRI(), MIB);
995 Addr.addSrcToMIB(MIB);
996 Val.addSrcToMIB(MIB);
997 MIB.addMemOperand(&MMO);
998 return MIB;
999}
1000
1003 Register Val, MachineMemOperand &MMO) {
1004 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
1005 MMO);
1006}
1009 Register Val, MachineMemOperand &MMO) {
1010 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
1011 MMO);
1012}
1015 Register Val, MachineMemOperand &MMO) {
1016 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
1017 MMO);
1018}
1021 Register Val, MachineMemOperand &MMO) {
1022 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
1023 MMO);
1024}
1027 Register Val, MachineMemOperand &MMO) {
1028 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
1029 MMO);
1030}
1032 Register Addr,
1033 Register Val,
1034 MachineMemOperand &MMO) {
1035 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
1036 MMO);
1037}
1040 Register Val, MachineMemOperand &MMO) {
1041 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
1042 MMO);
1043}
1046 Register Val, MachineMemOperand &MMO) {
1047 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
1048 MMO);
1049}
1052 Register Val, MachineMemOperand &MMO) {
1053 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1054 MMO);
1055}
1058 Register Val, MachineMemOperand &MMO) {
1059 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1060 MMO);
1061}
1064 Register Val, MachineMemOperand &MMO) {
1065 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1066 MMO);
1067}
1068
1071 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1072 MachineMemOperand &MMO) {
1073 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1074 MMO);
1075}
1076
1078MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1079 MachineMemOperand &MMO) {
1080 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1081 MMO);
1082}
1083
1086 const SrcOp &Val, MachineMemOperand &MMO) {
1087 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1088 MMO);
1089}
1090
1093 const SrcOp &Val, MachineMemOperand &MMO) {
1094 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1095 MMO);
1096}
1097
1099MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1100 return buildInstr(TargetOpcode::G_FENCE)
1101 .addImm(Ordering)
1102 .addImm(Scope);
1103}
1104
1106 unsigned RW,
1107 unsigned Locality,
1108 unsigned CacheType,
1109 MachineMemOperand &MMO) {
1110 auto MIB = buildInstr(TargetOpcode::G_PREFETCH);
1111 Addr.addSrcToMIB(MIB);
1112 MIB.addImm(RW).addImm(Locality).addImm(CacheType);
1113 MIB.addMemOperand(&MMO);
1114 return MIB;
1115}
1116
1119#ifndef NDEBUG
1120 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1121#endif
1122
1123 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
1124}
1125
1126void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1127 bool IsExtend) {
1128#ifndef NDEBUG
1129 if (DstTy.isVector()) {
1130 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1131 assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
1132 "different number of elements in a trunc/ext");
1133 } else
1134 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1135
1136 if (IsExtend)
1138 "invalid narrowing extend");
1139 else
1141 "invalid widening trunc");
1142#endif
1143}
1144
1145void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1146 const LLT Op0Ty, const LLT Op1Ty) {
1147#ifndef NDEBUG
1148 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1149 "invalid operand type");
1150 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1151 if (ResTy.isScalar() || ResTy.isPointer())
1152 assert(TstTy.isScalar() && "type mismatch");
1153 else
1154 assert((TstTy.isScalar() ||
1155 (TstTy.isVector() &&
1156 TstTy.getNumElements() == Op0Ty.getNumElements())) &&
1157 "type mismatch");
1158#endif
1159}
1160
1163 ArrayRef<SrcOp> SrcOps,
1164 std::optional<unsigned> Flags) {
1165 switch (Opc) {
1166 default:
1167 break;
1168 case TargetOpcode::G_SELECT: {
1169 assert(DstOps.size() == 1 && "Invalid select");
1170 assert(SrcOps.size() == 3 && "Invalid select");
1172 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1173 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1174 break;
1175 }
1176 case TargetOpcode::G_FNEG:
1177 case TargetOpcode::G_ABS:
1178 // All these are unary ops.
1179 assert(DstOps.size() == 1 && "Invalid Dst");
1180 assert(SrcOps.size() == 1 && "Invalid Srcs");
1181 validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1182 SrcOps[0].getLLTTy(*getMRI()));
1183 break;
1184 case TargetOpcode::G_ADD:
1185 case TargetOpcode::G_AND:
1186 case TargetOpcode::G_MUL:
1187 case TargetOpcode::G_OR:
1188 case TargetOpcode::G_SUB:
1189 case TargetOpcode::G_XOR:
1190 case TargetOpcode::G_UDIV:
1191 case TargetOpcode::G_SDIV:
1192 case TargetOpcode::G_UREM:
1193 case TargetOpcode::G_SREM:
1194 case TargetOpcode::G_SMIN:
1195 case TargetOpcode::G_SMAX:
1196 case TargetOpcode::G_UMIN:
1197 case TargetOpcode::G_UMAX:
1198 case TargetOpcode::G_UADDSAT:
1199 case TargetOpcode::G_SADDSAT:
1200 case TargetOpcode::G_USUBSAT:
1201 case TargetOpcode::G_SSUBSAT: {
1202 // All these are binary ops.
1203 assert(DstOps.size() == 1 && "Invalid Dst");
1204 assert(SrcOps.size() == 2 && "Invalid Srcs");
1205 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1206 SrcOps[0].getLLTTy(*getMRI()),
1207 SrcOps[1].getLLTTy(*getMRI()));
1208 break;
1209 }
1210 case TargetOpcode::G_SHL:
1211 case TargetOpcode::G_ASHR:
1212 case TargetOpcode::G_LSHR:
1213 case TargetOpcode::G_USHLSAT:
1214 case TargetOpcode::G_SSHLSAT: {
1215 assert(DstOps.size() == 1 && "Invalid Dst");
1216 assert(SrcOps.size() == 2 && "Invalid Srcs");
1217 validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1218 SrcOps[0].getLLTTy(*getMRI()),
1219 SrcOps[1].getLLTTy(*getMRI()));
1220 break;
1221 }
1222 case TargetOpcode::G_SEXT:
1223 case TargetOpcode::G_ZEXT:
1224 case TargetOpcode::G_ANYEXT:
1225 assert(DstOps.size() == 1 && "Invalid Dst");
1226 assert(SrcOps.size() == 1 && "Invalid Srcs");
1227 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1228 SrcOps[0].getLLTTy(*getMRI()), true);
1229 break;
1230 case TargetOpcode::G_TRUNC:
1231 case TargetOpcode::G_FPTRUNC: {
1232 assert(DstOps.size() == 1 && "Invalid Dst");
1233 assert(SrcOps.size() == 1 && "Invalid Srcs");
1234 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1235 SrcOps[0].getLLTTy(*getMRI()), false);
1236 break;
1237 }
1238 case TargetOpcode::G_BITCAST: {
1239 assert(DstOps.size() == 1 && "Invalid Dst");
1240 assert(SrcOps.size() == 1 && "Invalid Srcs");
1241 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1242 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1243 break;
1244 }
1245 case TargetOpcode::COPY:
1246 assert(DstOps.size() == 1 && "Invalid Dst");
1247 // If the caller wants to add a subreg source it has to be done separately
1248 // so we may not have any SrcOps at this point yet.
1249 break;
1250 case TargetOpcode::G_FCMP:
1251 case TargetOpcode::G_ICMP: {
1252 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1253 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1254 // For F/ICMP, the first src operand is the predicate, followed by
1255 // the two comparands.
1256 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1257 "Expecting predicate");
1258 assert([&]() -> bool {
1259 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1260 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1261 : CmpInst::isFPPredicate(Pred);
1262 }() && "Invalid predicate");
1263 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1264 "Type mismatch");
1265 assert([&]() -> bool {
1266 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1267 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1268 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1269 return DstTy.isScalar();
1270 else
1271 return DstTy.isVector() &&
1272 DstTy.getNumElements() == Op0Ty.getNumElements();
1273 }() && "Type Mismatch");
1274 break;
1275 }
1276 case TargetOpcode::G_UNMERGE_VALUES: {
1277 assert(!DstOps.empty() && "Invalid trivial sequence");
1278 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1279 assert(llvm::all_of(DstOps,
1280 [&, this](const DstOp &Op) {
1281 return Op.getLLTTy(*getMRI()) ==
1282 DstOps[0].getLLTTy(*getMRI());
1283 }) &&
1284 "type mismatch in output list");
1285 assert((TypeSize::ScalarTy)DstOps.size() *
1286 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1287 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1288 "input operands do not cover output register");
1289 break;
1290 }
1291 case TargetOpcode::G_MERGE_VALUES: {
1292 assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1293 assert(DstOps.size() == 1 && "Invalid Dst");
1294 assert(llvm::all_of(SrcOps,
1295 [&, this](const SrcOp &Op) {
1296 return Op.getLLTTy(*getMRI()) ==
1297 SrcOps[0].getLLTTy(*getMRI());
1298 }) &&
1299 "type mismatch in input list");
1300 assert((TypeSize::ScalarTy)SrcOps.size() *
1301 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1302 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1303 "input operands do not cover output register");
1304 assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1305 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1306 break;
1307 }
1308 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1309 assert(DstOps.size() == 1 && "Invalid Dst size");
1310 assert(SrcOps.size() == 2 && "Invalid Src size");
1311 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1312 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1313 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1314 "Invalid operand type");
1315 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1316 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1317 DstOps[0].getLLTTy(*getMRI()) &&
1318 "Type mismatch");
1319 break;
1320 }
1321 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1322 assert(DstOps.size() == 1 && "Invalid dst size");
1323 assert(SrcOps.size() == 3 && "Invalid src size");
1324 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1325 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1326 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1327 SrcOps[1].getLLTTy(*getMRI()) &&
1328 "Type mismatch");
1329 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1330 assert(DstOps[0].getLLTTy(*getMRI()).getElementCount() ==
1331 SrcOps[0].getLLTTy(*getMRI()).getElementCount() &&
1332 "Type mismatch");
1333 break;
1334 }
1335 case TargetOpcode::G_BUILD_VECTOR: {
1336 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1337 "Must have at least 2 operands");
1338 assert(DstOps.size() == 1 && "Invalid DstOps");
1339 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1340 "Res type must be a vector");
1341 assert(llvm::all_of(SrcOps,
1342 [&, this](const SrcOp &Op) {
1343 return Op.getLLTTy(*getMRI()) ==
1344 SrcOps[0].getLLTTy(*getMRI());
1345 }) &&
1346 "type mismatch in input list");
1347 assert((TypeSize::ScalarTy)SrcOps.size() *
1348 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1349 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1350 "input scalars do not exactly cover the output vector register");
1351 break;
1352 }
1353 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1354 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1355 "Must have at least 2 operands");
1356 assert(DstOps.size() == 1 && "Invalid DstOps");
1357 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1358 "Res type must be a vector");
1359 assert(llvm::all_of(SrcOps,
1360 [&, this](const SrcOp &Op) {
1361 return Op.getLLTTy(*getMRI()) ==
1362 SrcOps[0].getLLTTy(*getMRI());
1363 }) &&
1364 "type mismatch in input list");
1365 break;
1366 }
1367 case TargetOpcode::G_CONCAT_VECTORS: {
1368 assert(DstOps.size() == 1 && "Invalid DstOps");
1369 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1370 "Must have at least 2 operands");
1371 assert(llvm::all_of(SrcOps,
1372 [&, this](const SrcOp &Op) {
1373 return (Op.getLLTTy(*getMRI()).isVector() &&
1374 Op.getLLTTy(*getMRI()) ==
1375 SrcOps[0].getLLTTy(*getMRI()));
1376 }) &&
1377 "type mismatch in input list");
1378 assert((TypeSize::ScalarTy)SrcOps.size() *
1379 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1380 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1381 "input vectors do not exactly cover the output vector register");
1382 break;
1383 }
1384 case TargetOpcode::G_UADDE: {
1385 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1386 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1387 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1388 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1389 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1390 "Invalid operand");
1391 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1392 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1393 "type mismatch");
1394 break;
1395 }
1396 }
1397
1398 auto MIB = buildInstr(Opc);
1399 for (const DstOp &Op : DstOps)
1400 Op.addDefToMIB(*getMRI(), MIB);
1401 for (const SrcOp &Op : SrcOps)
1402 Op.addSrcToMIB(MIB);
1403 if (Flags)
1404 MIB->setFlags(*Flags);
1405 return MIB;
1406}
Function Alias Analysis Results
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Addr
uint64_t Size
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:236
static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent)
This file declares the MachineIRBuilder class.
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getScalarSizeInBits(Type *Ty)
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
This file describes how to lower LLVM code to machine code.
const fltSemantics & getSemantics() const
Definition: APFloat.h:1303
Class for arbitrary precision integers.
Definition: APInt.h:76
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:284
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:154
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
iterator begin() const
Definition: ArrayRef.h:153
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
The address of a basic block.
Definition: Constants.h:888
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:965
bool isFPPredicate() const
Definition: InstrTypes.h:1083
bool isIntPredicate() const
Definition: InstrTypes.h:1084
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:267
const APFloat & getValueAPF() const
Definition: Constants.h:310
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition: Constants.h:147
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
LLT getLLTTy(const MachineRegisterInfo &MRI) const
Register getReg() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:342
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:182
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:267
constexpr bool isScalar() const
Definition: LowLevelType.h:146
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr bool isValid() const
Definition: LowLevelType.h:145
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:159
constexpr bool isVector() const
Definition: LowLevelType.h:148
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:193
constexpr bool isPointer() const
Definition: LowLevelType.h:149
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:290
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:184
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
Definition: LowLevelType.h:178
constexpr LLT getScalarType() const
Definition: LowLevelType.h:208
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Metadata node.
Definition: Metadata.h:1067
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBundleIterator< MachineInstr > iterator
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineInstrBuilder buildLoadFromOffset(const DstOp &Dst, const SrcOp &BasePtr, MachineMemOperand &BaseMMO, int64_t Offset)
Helper to create a load from a constant offset given a base address.
MachineInstrBuilder buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMIN Addr, Val, MMO.
MachineInstrBuilder buildBoolExtInReg(const DstOp &Res, const SrcOp &Op, bool IsVector, bool IsFP)
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
MachineInstrBuilder buildAtomicRMWXor(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO.
MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
LLVMContext & getContext() const
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildConstantPool(const DstOp &Res, unsigned Idx)
Build and insert Res = G_CONSTANT_POOL Idx.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op, bool IsFP)
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildAtomicRMWAnd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO.
MachineInstrBuilder buildZExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and inserts Res = G_AND Op, LowBitsSet(ImmOp) Since there is no G_ZEXT_INREG like G_SEXT_INREG,...
MachineInstrBuilder buildAtomicRMWMin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildAtomicRMWFAdd(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FADD Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWNand(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO.
MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Res = COPY Op depending on the differing sizes of Res and Op.
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src)
Build and insert a vector splat of a scalar Src using a G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idio...
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal, Register NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MDNode * getPCSections()
Get the current instruction's PC sections metadata.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
unsigned getBoolExtOp(bool IsVec, bool IsFP) const
MachineInstrBuilder buildAtomicRMWUmax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO.
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
void recordInsertion(MachineInstr *InsertedInstr) const
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR_TRUNC Op0, ...
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildAtomicCmpXchg(Register OldValRes, Register Addr, Register CmpVal, Register NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a, b, .....
void validateSelectOp(const LLT ResTy, const LLT TstTy, const LLT Op0Ty, const LLT Op1Ty)
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const DebugLoc & getDL()
Getter for DebugLoc.
MachineInstrBuilder buildBuildVectorConstant(const DstOp &Res, ArrayRef< APInt > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ... where each OpN is built with G_CONSTANT.
MachineInstrBuilder buildAtomicRMWUmin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO.
void validateBinaryOp(const LLT Res, const LLT Op0, const LLT Op1)
void validateShiftOp(const LLT Res, const LLT Op0, const LLT Op1)
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildInsert(const DstOp &Res, const SrcOp &Src, const SrcOp &Op, unsigned Index)
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildAtomicRMWSub(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO.
MachineInstrBuilder buildMergeValues(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
MachineInstrBuilder buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAX Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWOr(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
MachineInstrBuilder buildDeleteTrailingVectorElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x, y, z = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a,...
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildAtomicRMWAdd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
void validateTruncExt(const LLT Dst, const LLT Src, bool IsExtend)
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_PTRMASK Op0, Op1.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
void validateUnaryOp(const LLT Res, const LLT Op0)
MachineInstrBuilder buildBlockAddress(Register Res, const BlockAddress *BA)
Build and insert Res = G_BLOCK_ADDR BA.
MachineInstrBuilder buildAtomicRMWMax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
void setMF(MachineFunction &MF)
MachineInstrBuilder buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FSUB Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXchg(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO.
MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0, uint32_t NumBits)
Build and insert Res = G_PTRMASK Op0, G_CONSTANT (1 << NumBits) - 1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and insert Res = G_SEXT_INREG Op, ImmOp.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addBlockAddress(const BlockAddress *BA, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
A description of a memory reference used in the backend.
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:679
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
size_t size() const
Definition: SmallVector.h:91
void reserve(size_type N)
Definition: SmallVector.h:676
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
LLT getLLTTy(const MachineRegisterInfo &MRI) const
void addSrcToMIB(MachineInstrBuilder &MIB) const
Register getReg() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
LLVM Value Representation.
Definition: Value.h:74
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:203
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:210
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1731
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
DWARFExpression::Operation Op
APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition: Utils.cpp:631
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:760
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:331
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
MachineFunction * MF
MachineFunction under construction.
DebugLoc DL
Debug location to be set to any instruction we create.
const TargetInstrInfo * TII
Information used to access the description of the opcodes.
MDNode * PCSections
PC sections metadata to be set to any instruction we create.
MachineBasicBlock::iterator II
MachineRegisterInfo * MRI
Information used to verify types are consistent and to create virtual registers.
GISelChangeObserver * Observer
This class contains a discriminated union of information about pointers in memory operands,...