LLVM 17.0.0git
MachineIRBuilder.cpp
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the MachineIRBuidler class.
10//===----------------------------------------------------------------------===//
21
22using namespace llvm;
23
25 State.MF = &MF;
26 State.MBB = nullptr;
27 State.MRI = &MF.getRegInfo();
28 State.TII = MF.getSubtarget().getInstrInfo();
29 State.DL = DebugLoc();
30 State.PCSections = nullptr;
32 State.Observer = nullptr;
33}
34
35//------------------------------------------------------------------------------
36// Build instruction variants.
37//------------------------------------------------------------------------------
38
40 return BuildMI(getMF(), {getDL(), getPCSections()}, getTII().get(Opcode));
41}
42
44 getMBB().insert(getInsertPt(), MIB);
45 recordInsertion(MIB);
46 return MIB;
47}
48
51 const MDNode *Expr) {
52 assert(isa<DILocalVariable>(Variable) && "not a variable");
53 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
54 assert(
55 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
56 "Expected inlined-at fields to agree");
57 return insertInstr(BuildMI(getMF(), getDL(),
58 getTII().get(TargetOpcode::DBG_VALUE),
59 /*IsIndirect*/ false, Reg, Variable, Expr));
60}
61
64 const MDNode *Expr) {
65 assert(isa<DILocalVariable>(Variable) && "not a variable");
66 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
67 assert(
68 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
69 "Expected inlined-at fields to agree");
70 return insertInstr(BuildMI(getMF(), getDL(),
71 getTII().get(TargetOpcode::DBG_VALUE),
72 /*IsIndirect*/ true, Reg, Variable, Expr));
73}
74
76 const MDNode *Variable,
77 const MDNode *Expr) {
78 assert(isa<DILocalVariable>(Variable) && "not a variable");
79 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
80 assert(
81 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
82 "Expected inlined-at fields to agree");
83 return buildInstr(TargetOpcode::DBG_VALUE)
84 .addFrameIndex(FI)
85 .addImm(0)
86 .addMetadata(Variable)
87 .addMetadata(Expr);
88}
89
91 const MDNode *Variable,
92 const MDNode *Expr) {
93 assert(isa<DILocalVariable>(Variable) && "not a variable");
94 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
95 assert(
96 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
97 "Expected inlined-at fields to agree");
98 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
99
100 auto *NumericConstant = [&] () -> const Constant* {
101 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
102 if (CE->getOpcode() == Instruction::IntToPtr)
103 return CE->getOperand(0);
104 return &C;
105 }();
106
107 if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {
108 if (CI->getBitWidth() > 64)
109 MIB.addCImm(CI);
110 else
111 MIB.addImm(CI->getZExtValue());
112 } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {
113 MIB.addFPImm(CFP);
114 } else if (isa<ConstantPointerNull>(NumericConstant)) {
115 MIB.addImm(0);
116 } else {
117 // Insert $noreg if we didn't find a usable constant and had to drop it.
118 MIB.addReg(Register());
119 }
120
121 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
122 return insertInstr(MIB);
123}
124
126 assert(isa<DILabel>(Label) && "not a label");
127 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
128 "Expected inlined-at fields to agree");
129 auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
130
131 return MIB.addMetadata(Label);
132}
133
135 const SrcOp &Size,
136 Align Alignment) {
137 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
138 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
139 Res.addDefToMIB(*getMRI(), MIB);
140 Size.addSrcToMIB(MIB);
141 MIB.addImm(Alignment.value());
142 return MIB;
143}
144
146 int Idx) {
147 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
148 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
149 Res.addDefToMIB(*getMRI(), MIB);
150 MIB.addFrameIndex(Idx);
151 return MIB;
152}
153
155 const GlobalValue *GV) {
156 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
157 assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
158 GV->getType()->getAddressSpace() &&
159 "address space mismatch");
160
161 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
162 Res.addDefToMIB(*getMRI(), MIB);
163 MIB.addGlobalAddress(GV);
164 return MIB;
165}
166
168 unsigned JTI) {
169 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
170 .addJumpTableIndex(JTI);
171}
172
173void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
174 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
175 assert((Res == Op0) && "type mismatch");
176}
177
179 const LLT Op1) {
180 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
181 assert((Res == Op0 && Res == Op1) && "type mismatch");
182}
183
184void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
185 const LLT Op1) {
186 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
187 assert((Res == Op0) && "type mismatch");
188}
189
191 const SrcOp &Op0,
192 const SrcOp &Op1) {
193 assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() &&
194 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
195 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
196
197 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1});
198}
199
200std::optional<MachineInstrBuilder>
202 const LLT ValueTy, uint64_t Value) {
203 assert(Res == 0 && "Res is a result argument");
204 assert(ValueTy.isScalar() && "invalid offset type");
205
206 if (Value == 0) {
207 Res = Op0;
208 return std::nullopt;
209 }
210
212 auto Cst = buildConstant(ValueTy, Value);
213 return buildPtrAdd(Res, Op0, Cst.getReg(0));
214}
215
217 const SrcOp &Op0,
218 uint32_t NumBits) {
219 LLT PtrTy = Res.getLLTTy(*getMRI());
220 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
221 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
222 buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
223 return buildPtrMask(Res, Op0, MaskReg);
224}
225
228 const SrcOp &Op0) {
229 LLT ResTy = Res.getLLTTy(*getMRI());
230 LLT Op0Ty = Op0.getLLTTy(*getMRI());
231
232 assert((ResTy.isVector() && Op0Ty.isVector()) && "Non vector type");
233 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
234 "Different vector element types");
235 assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
236 "Op0 has more elements");
237
238 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
240 for (auto Op : Unmerge.getInstr()->defs())
241 Regs.push_back(Op.getReg());
242 Register Undef = buildUndef(Op0Ty.getElementType()).getReg(0);
243 unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
244 for (unsigned i = 0; i < NumberOfPadElts; ++i)
245 Regs.push_back(Undef);
246 return buildMergeLikeInstr(Res, Regs);
247}
248
251 const SrcOp &Op0) {
252 LLT ResTy = Res.getLLTTy(*getMRI());
253 LLT Op0Ty = Op0.getLLTTy(*getMRI());
254
255 assert((ResTy.isVector() && Op0Ty.isVector()) && "Non vector type");
256 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
257 "Different vector element types");
258 assert((ResTy.getNumElements() < Op0Ty.getNumElements()) &&
259 "Op0 has fewer elements");
260
262 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
263 for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
264 Regs.push_back(Unmerge.getReg(i));
265 return buildMergeLikeInstr(Res, Regs);
266}
267
269 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
270}
271
273 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
274 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
275}
276
278 unsigned JTI,
279 Register IndexReg) {
280 assert(getMRI()->getType(TablePtr).isPointer() &&
281 "Table reg must be a pointer");
282 return buildInstr(TargetOpcode::G_BRJT)
283 .addUse(TablePtr)
285 .addUse(IndexReg);
286}
287
289 const SrcOp &Op) {
290 return buildInstr(TargetOpcode::COPY, Res, Op);
291}
292
294 const ConstantInt &Val) {
295 LLT Ty = Res.getLLTTy(*getMRI());
296 LLT EltTy = Ty.getScalarType();
297 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
298 "creating constant with the wrong size");
299
300 if (Ty.isVector()) {
301 auto Const = buildInstr(TargetOpcode::G_CONSTANT)
302 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
303 .addCImm(&Val);
304 return buildSplatVector(Res, Const);
305 }
306
307 auto Const = buildInstr(TargetOpcode::G_CONSTANT);
308 Const->setDebugLoc(DebugLoc());
309 Res.addDefToMIB(*getMRI(), Const);
310 Const.addCImm(&Val);
311 return Const;
312}
313
315 int64_t Val) {
316 auto IntN = IntegerType::get(getMF().getFunction().getContext(),
318 ConstantInt *CI = ConstantInt::get(IntN, Val, true);
319 return buildConstant(Res, *CI);
320}
321
323 const ConstantFP &Val) {
324 LLT Ty = Res.getLLTTy(*getMRI());
325 LLT EltTy = Ty.getScalarType();
326
328 == EltTy.getSizeInBits() &&
329 "creating fconstant with the wrong size");
330
331 assert(!Ty.isPointer() && "invalid operand type");
332
333 if (Ty.isVector()) {
334 auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
335 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
336 .addFPImm(&Val);
337
338 return buildSplatVector(Res, Const);
339 }
340
341 auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
342 Const->setDebugLoc(DebugLoc());
343 Res.addDefToMIB(*getMRI(), Const);
344 Const.addFPImm(&Val);
345 return Const;
346}
347
349 const APInt &Val) {
350 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
351 return buildConstant(Res, *CI);
352}
353
355 double Val) {
356 LLT DstTy = Res.getLLTTy(*getMRI());
357 auto &Ctx = getMF().getFunction().getContext();
358 auto *CFP =
360 return buildFConstant(Res, *CFP);
361}
362
364 const APFloat &Val) {
365 auto &Ctx = getMF().getFunction().getContext();
366 auto *CFP = ConstantFP::get(Ctx, Val);
367 return buildFConstant(Res, *CFP);
368}
369
371 MachineBasicBlock &Dest) {
372 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
373
374 auto MIB = buildInstr(TargetOpcode::G_BRCOND);
375 Tst.addSrcToMIB(MIB);
376 MIB.addMBB(&Dest);
377 return MIB;
378}
379
382 MachinePointerInfo PtrInfo, Align Alignment,
384 const AAMDNodes &AAInfo) {
385 MMOFlags |= MachineMemOperand::MOLoad;
386 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
387
388 LLT Ty = Dst.getLLTTy(*getMRI());
389 MachineMemOperand *MMO =
390 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
391 return buildLoad(Dst, Addr, *MMO);
392}
393
395 const DstOp &Res,
396 const SrcOp &Addr,
397 MachineMemOperand &MMO) {
398 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
399 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
400
401 auto MIB = buildInstr(Opcode);
402 Res.addDefToMIB(*getMRI(), MIB);
403 Addr.addSrcToMIB(MIB);
404 MIB.addMemOperand(&MMO);
405 return MIB;
406}
407
409 const DstOp &Dst, const SrcOp &BasePtr,
410 MachineMemOperand &BaseMMO, int64_t Offset) {
411 LLT LoadTy = Dst.getLLTTy(*getMRI());
412 MachineMemOperand *OffsetMMO =
413 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
414
415 if (Offset == 0) // This may be a size or type changing load.
416 return buildLoad(Dst, BasePtr, *OffsetMMO);
417
418 LLT PtrTy = BasePtr.getLLTTy(*getMRI());
419 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
420 auto ConstOffset = buildConstant(OffsetTy, Offset);
421 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
422 return buildLoad(Dst, Ptr, *OffsetMMO);
423}
424
426 const SrcOp &Addr,
427 MachineMemOperand &MMO) {
428 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
429 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
430
431 auto MIB = buildInstr(TargetOpcode::G_STORE);
432 Val.addSrcToMIB(MIB);
433 Addr.addSrcToMIB(MIB);
434 MIB.addMemOperand(&MMO);
435 return MIB;
436}
437
440 MachinePointerInfo PtrInfo, Align Alignment,
442 const AAMDNodes &AAInfo) {
443 MMOFlags |= MachineMemOperand::MOStore;
444 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
445
446 LLT Ty = Val.getLLTTy(*getMRI());
447 MachineMemOperand *MMO =
448 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
449 return buildStore(Val, Addr, *MMO);
450}
451
453 const SrcOp &Op) {
454 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
455}
456
458 const SrcOp &Op) {
459 return buildInstr(TargetOpcode::G_SEXT, Res, Op);
460}
461
463 const SrcOp &Op) {
464 return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
465}
466
467unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
468 const auto *TLI = getMF().getSubtarget().getTargetLowering();
469 switch (TLI->getBooleanContents(IsVec, IsFP)) {
471 return TargetOpcode::G_SEXT;
473 return TargetOpcode::G_ZEXT;
474 default:
475 return TargetOpcode::G_ANYEXT;
476 }
477}
478
480 const SrcOp &Op,
481 bool IsFP) {
482 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
483 return buildInstr(ExtOp, Res, Op);
484}
485
487 const SrcOp &Op,
488 bool IsVector,
489 bool IsFP) {
490 const auto *TLI = getMF().getSubtarget().getTargetLowering();
491 switch (TLI->getBooleanContents(IsVector, IsFP)) {
493 return buildSExtInReg(Res, Op, 1);
495 return buildZExtInReg(Res, Op, 1);
497 return buildCopy(Res, Op);
498 }
499
500 llvm_unreachable("unexpected BooleanContent");
501}
502
504 const DstOp &Res,
505 const SrcOp &Op) {
506 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
507 TargetOpcode::G_SEXT == ExtOpc) &&
508 "Expecting Extending Opc");
509 assert(Res.getLLTTy(*getMRI()).isScalar() ||
510 Res.getLLTTy(*getMRI()).isVector());
511 assert(Res.getLLTTy(*getMRI()).isScalar() ==
512 Op.getLLTTy(*getMRI()).isScalar());
513
514 unsigned Opcode = TargetOpcode::COPY;
515 if (Res.getLLTTy(*getMRI()).getSizeInBits() >
516 Op.getLLTTy(*getMRI()).getSizeInBits())
517 Opcode = ExtOpc;
518 else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
519 Op.getLLTTy(*getMRI()).getSizeInBits())
520 Opcode = TargetOpcode::G_TRUNC;
521 else
522 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
523
524 return buildInstr(Opcode, Res, Op);
525}
526
528 const SrcOp &Op) {
529 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
530}
531
533 const SrcOp &Op) {
534 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
535}
536
538 const SrcOp &Op) {
539 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
540}
541
543 const SrcOp &Op,
544 int64_t ImmOp) {
545 LLT ResTy = Res.getLLTTy(*getMRI());
546 auto Mask = buildConstant(
547 ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
548 return buildAnd(Res, Op, Mask);
549}
550
552 const SrcOp &Src) {
553 LLT SrcTy = Src.getLLTTy(*getMRI());
554 LLT DstTy = Dst.getLLTTy(*getMRI());
555 if (SrcTy == DstTy)
556 return buildCopy(Dst, Src);
557
558 unsigned Opcode;
559 if (SrcTy.isPointer() && DstTy.isScalar())
560 Opcode = TargetOpcode::G_PTRTOINT;
561 else if (DstTy.isPointer() && SrcTy.isScalar())
562 Opcode = TargetOpcode::G_INTTOPTR;
563 else {
564 assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
565 Opcode = TargetOpcode::G_BITCAST;
566 }
567
568 return buildInstr(Opcode, Dst, Src);
569}
570
572 const SrcOp &Src,
573 uint64_t Index) {
574 LLT SrcTy = Src.getLLTTy(*getMRI());
575 LLT DstTy = Dst.getLLTTy(*getMRI());
576
577#ifndef NDEBUG
578 assert(SrcTy.isValid() && "invalid operand type");
579 assert(DstTy.isValid() && "invalid operand type");
580 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
581 "extracting off end of register");
582#endif
583
584 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
585 assert(Index == 0 && "insertion past the end of a register");
586 return buildCast(Dst, Src);
587 }
588
589 auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
590 Dst.addDefToMIB(*getMRI(), Extract);
591 Src.addSrcToMIB(Extract);
592 Extract.addImm(Index);
593 return Extract;
594}
595
597 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
598}
599
601 ArrayRef<Register> Ops) {
602 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
603 // we need some temporary storage for the DstOp objects. Here we use a
604 // sufficiently large SmallVector to not go through the heap.
605 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
606 assert(TmpVec.size() > 1);
607 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
608}
609
612 ArrayRef<Register> Ops) {
613 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
614 // we need some temporary storage for the DstOp objects. Here we use a
615 // sufficiently large SmallVector to not go through the heap.
616 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
617 assert(TmpVec.size() > 1);
618 return buildInstr(getOpcodeForMerge(Res, TmpVec), Res, TmpVec);
619}
620
623 std::initializer_list<SrcOp> Ops) {
624 assert(Ops.size() > 1);
625 return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
626}
627
628unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
629 ArrayRef<SrcOp> SrcOps) const {
630 if (DstOp.getLLTTy(*getMRI()).isVector()) {
631 if (SrcOps[0].getLLTTy(*getMRI()).isVector())
632 return TargetOpcode::G_CONCAT_VECTORS;
633 return TargetOpcode::G_BUILD_VECTOR;
634 }
635
636 return TargetOpcode::G_MERGE_VALUES;
637}
638
640 const SrcOp &Op) {
641 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
642 // we need some temporary storage for the DstOp objects. Here we use a
643 // sufficiently large SmallVector to not go through the heap.
644 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
645 assert(TmpVec.size() > 1);
646 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
647}
648
650 const SrcOp &Op) {
651 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
652 SmallVector<DstOp, 8> TmpVec(NumReg, Res);
653 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
654}
655
657 const SrcOp &Op) {
658 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
659 // we need some temporary storage for the DstOp objects. Here we use a
660 // sufficiently large SmallVector to not go through the heap.
661 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
662 assert(TmpVec.size() > 1);
663 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
664}
665
667 ArrayRef<Register> Ops) {
668 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
669 // we need some temporary storage for the DstOp objects. Here we use a
670 // sufficiently large SmallVector to not go through the heap.
671 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
672 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
673}
674
677 ArrayRef<APInt> Ops) {
678 SmallVector<SrcOp> TmpVec;
679 TmpVec.reserve(Ops.size());
680 LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
681 for (const auto &Op : Ops)
682 TmpVec.push_back(buildConstant(EltTy, Op));
683 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
684}
685
687 const SrcOp &Src) {
688 SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
689 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
690}
691
694 ArrayRef<Register> Ops) {
695 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
696 // we need some temporary storage for the DstOp objects. Here we use a
697 // sufficiently large SmallVector to not go through the heap.
698 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
699 if (TmpVec[0].getLLTTy(*getMRI()).getSizeInBits() ==
700 Res.getLLTTy(*getMRI()).getElementType().getSizeInBits())
701 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
702 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
703}
704
706 const SrcOp &Src) {
707 LLT DstTy = Res.getLLTTy(*getMRI());
708 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
709 "Expected Src to match Dst elt ty");
710 auto UndefVec = buildUndef(DstTy);
711 auto Zero = buildConstant(LLT::scalar(64), 0);
712 auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
713 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
714 return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
715}
716
718 const SrcOp &Src1,
719 const SrcOp &Src2,
720 ArrayRef<int> Mask) {
721 LLT DstTy = Res.getLLTTy(*getMRI());
722 LLT Src1Ty = Src1.getLLTTy(*getMRI());
723 LLT Src2Ty = Src2.getLLTTy(*getMRI());
724 assert((size_t)(Src1Ty.getNumElements() + Src2Ty.getNumElements()) >=
725 Mask.size());
726 assert(DstTy.getElementType() == Src1Ty.getElementType() &&
727 DstTy.getElementType() == Src2Ty.getElementType());
728 (void)DstTy;
729 (void)Src1Ty;
730 (void)Src2Ty;
731 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
732 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
733 .addShuffleMask(MaskAlloc);
734}
735
738 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
739 // we need some temporary storage for the DstOp objects. Here we use a
740 // sufficiently large SmallVector to not go through the heap.
741 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
742 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
743}
744
746 const SrcOp &Src,
747 const SrcOp &Op,
748 unsigned Index) {
749 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
750 Res.getLLTTy(*getMRI()).getSizeInBits() &&
751 "insertion past the end of a register");
752
753 if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
754 Op.getLLTTy(*getMRI()).getSizeInBits()) {
755 return buildCast(Res, Op);
756 }
757
758 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
759}
760
762 ArrayRef<Register> ResultRegs,
763 bool HasSideEffects) {
764 auto MIB =
765 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
766 : TargetOpcode::G_INTRINSIC);
767 for (unsigned ResultReg : ResultRegs)
768 MIB.addDef(ResultReg);
769 MIB.addIntrinsicID(ID);
770 return MIB;
771}
772
775 bool HasSideEffects) {
776 auto MIB =
777 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
778 : TargetOpcode::G_INTRINSIC);
779 for (DstOp Result : Results)
780 Result.addDefToMIB(*getMRI(), MIB);
781 MIB.addIntrinsicID(ID);
782 return MIB;
783}
784
786 const SrcOp &Op) {
787 return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
788}
789
792 std::optional<unsigned> Flags) {
793 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
794}
795
797 const DstOp &Res,
798 const SrcOp &Op0,
799 const SrcOp &Op1) {
800 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
801}
802
804 const DstOp &Res,
805 const SrcOp &Op0,
806 const SrcOp &Op1,
807 std::optional<unsigned> Flags) {
808
809 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
810}
811
814 const SrcOp &Op0, const SrcOp &Op1,
815 std::optional<unsigned> Flags) {
816
817 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
818}
819
822 const SrcOp &Elt, const SrcOp &Idx) {
823 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
824}
825
828 const SrcOp &Idx) {
829 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
830}
831
833 Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
834 Register NewVal, MachineMemOperand &MMO) {
835#ifndef NDEBUG
836 LLT OldValResTy = getMRI()->getType(OldValRes);
837 LLT SuccessResTy = getMRI()->getType(SuccessRes);
838 LLT AddrTy = getMRI()->getType(Addr);
839 LLT CmpValTy = getMRI()->getType(CmpVal);
840 LLT NewValTy = getMRI()->getType(NewVal);
841 assert(OldValResTy.isScalar() && "invalid operand type");
842 assert(SuccessResTy.isScalar() && "invalid operand type");
843 assert(AddrTy.isPointer() && "invalid operand type");
844 assert(CmpValTy.isValid() && "invalid operand type");
845 assert(NewValTy.isValid() && "invalid operand type");
846 assert(OldValResTy == CmpValTy && "type mismatch");
847 assert(OldValResTy == NewValTy && "type mismatch");
848#endif
850 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
851 .addDef(OldValRes)
852 .addDef(SuccessRes)
854 .addUse(CmpVal)
855 .addUse(NewVal)
856 .addMemOperand(&MMO);
857}
858
861 Register CmpVal, Register NewVal,
862 MachineMemOperand &MMO) {
863#ifndef NDEBUG
864 LLT OldValResTy = getMRI()->getType(OldValRes);
865 LLT AddrTy = getMRI()->getType(Addr);
866 LLT CmpValTy = getMRI()->getType(CmpVal);
867 LLT NewValTy = getMRI()->getType(NewVal);
868 assert(OldValResTy.isScalar() && "invalid operand type");
869 assert(AddrTy.isPointer() && "invalid operand type");
870 assert(CmpValTy.isValid() && "invalid operand type");
871 assert(NewValTy.isValid() && "invalid operand type");
872 assert(OldValResTy == CmpValTy && "type mismatch");
873 assert(OldValResTy == NewValTy && "type mismatch");
874#endif
875
876 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
877 .addDef(OldValRes)
878 .addUse(Addr)
879 .addUse(CmpVal)
880 .addUse(NewVal)
881 .addMemOperand(&MMO);
882}
883
885 unsigned Opcode, const DstOp &OldValRes,
886 const SrcOp &Addr, const SrcOp &Val,
887 MachineMemOperand &MMO) {
888
889#ifndef NDEBUG
890 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
891 LLT AddrTy = Addr.getLLTTy(*getMRI());
892 LLT ValTy = Val.getLLTTy(*getMRI());
893 assert(OldValResTy.isScalar() && "invalid operand type");
894 assert(AddrTy.isPointer() && "invalid operand type");
895 assert(ValTy.isValid() && "invalid operand type");
896 assert(OldValResTy == ValTy && "type mismatch");
897 assert(MMO.isAtomic() && "not atomic mem operand");
898#endif
899
900 auto MIB = buildInstr(Opcode);
901 OldValRes.addDefToMIB(*getMRI(), MIB);
902 Addr.addSrcToMIB(MIB);
903 Val.addSrcToMIB(MIB);
904 MIB.addMemOperand(&MMO);
905 return MIB;
906}
907
910 Register Val, MachineMemOperand &MMO) {
911 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
912 MMO);
913}
916 Register Val, MachineMemOperand &MMO) {
917 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
918 MMO);
919}
922 Register Val, MachineMemOperand &MMO) {
923 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
924 MMO);
925}
928 Register Val, MachineMemOperand &MMO) {
929 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
930 MMO);
931}
934 Register Val, MachineMemOperand &MMO) {
935 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
936 MMO);
937}
940 Register Val,
941 MachineMemOperand &MMO) {
942 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
943 MMO);
944}
947 Register Val, MachineMemOperand &MMO) {
948 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
949 MMO);
950}
953 Register Val, MachineMemOperand &MMO) {
954 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
955 MMO);
956}
959 Register Val, MachineMemOperand &MMO) {
960 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
961 MMO);
962}
965 Register Val, MachineMemOperand &MMO) {
966 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
967 MMO);
968}
971 Register Val, MachineMemOperand &MMO) {
972 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
973 MMO);
974}
975
978 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
979 MachineMemOperand &MMO) {
980 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
981 MMO);
982}
983
985MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
986 MachineMemOperand &MMO) {
987 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
988 MMO);
989}
990
993 const SrcOp &Val, MachineMemOperand &MMO) {
994 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
995 MMO);
996}
997
1000 const SrcOp &Val, MachineMemOperand &MMO) {
1001 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1002 MMO);
1003}
1004
1006MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1007 return buildInstr(TargetOpcode::G_FENCE)
1008 .addImm(Ordering)
1009 .addImm(Scope);
1010}
1011
1014#ifndef NDEBUG
1015 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1016#endif
1017
1018 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
1019}
1020
1021void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1022 bool IsExtend) {
1023#ifndef NDEBUG
1024 if (DstTy.isVector()) {
1025 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1026 assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
1027 "different number of elements in a trunc/ext");
1028 } else
1029 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1030
1031 if (IsExtend)
1032 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
1033 "invalid narrowing extend");
1034 else
1035 assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
1036 "invalid widening trunc");
1037#endif
1038}
1039
1040void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1041 const LLT Op0Ty, const LLT Op1Ty) {
1042#ifndef NDEBUG
1043 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1044 "invalid operand type");
1045 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1046 if (ResTy.isScalar() || ResTy.isPointer())
1047 assert(TstTy.isScalar() && "type mismatch");
1048 else
1049 assert((TstTy.isScalar() ||
1050 (TstTy.isVector() &&
1051 TstTy.getNumElements() == Op0Ty.getNumElements())) &&
1052 "type mismatch");
1053#endif
1054}
1055
1058 ArrayRef<SrcOp> SrcOps,
1059 std::optional<unsigned> Flags) {
1060 switch (Opc) {
1061 default:
1062 break;
1063 case TargetOpcode::G_SELECT: {
1064 assert(DstOps.size() == 1 && "Invalid select");
1065 assert(SrcOps.size() == 3 && "Invalid select");
1067 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1068 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1069 break;
1070 }
1071 case TargetOpcode::G_FNEG:
1072 case TargetOpcode::G_ABS:
1073 // All these are unary ops.
1074 assert(DstOps.size() == 1 && "Invalid Dst");
1075 assert(SrcOps.size() == 1 && "Invalid Srcs");
1076 validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1077 SrcOps[0].getLLTTy(*getMRI()));
1078 break;
1079 case TargetOpcode::G_ADD:
1080 case TargetOpcode::G_AND:
1081 case TargetOpcode::G_MUL:
1082 case TargetOpcode::G_OR:
1083 case TargetOpcode::G_SUB:
1084 case TargetOpcode::G_XOR:
1085 case TargetOpcode::G_UDIV:
1086 case TargetOpcode::G_SDIV:
1087 case TargetOpcode::G_UREM:
1088 case TargetOpcode::G_SREM:
1089 case TargetOpcode::G_SMIN:
1090 case TargetOpcode::G_SMAX:
1091 case TargetOpcode::G_UMIN:
1092 case TargetOpcode::G_UMAX:
1093 case TargetOpcode::G_UADDSAT:
1094 case TargetOpcode::G_SADDSAT:
1095 case TargetOpcode::G_USUBSAT:
1096 case TargetOpcode::G_SSUBSAT: {
1097 // All these are binary ops.
1098 assert(DstOps.size() == 1 && "Invalid Dst");
1099 assert(SrcOps.size() == 2 && "Invalid Srcs");
1100 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1101 SrcOps[0].getLLTTy(*getMRI()),
1102 SrcOps[1].getLLTTy(*getMRI()));
1103 break;
1104 }
1105 case TargetOpcode::G_SHL:
1106 case TargetOpcode::G_ASHR:
1107 case TargetOpcode::G_LSHR:
1108 case TargetOpcode::G_USHLSAT:
1109 case TargetOpcode::G_SSHLSAT: {
1110 assert(DstOps.size() == 1 && "Invalid Dst");
1111 assert(SrcOps.size() == 2 && "Invalid Srcs");
1112 validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1113 SrcOps[0].getLLTTy(*getMRI()),
1114 SrcOps[1].getLLTTy(*getMRI()));
1115 break;
1116 }
1117 case TargetOpcode::G_SEXT:
1118 case TargetOpcode::G_ZEXT:
1119 case TargetOpcode::G_ANYEXT:
1120 assert(DstOps.size() == 1 && "Invalid Dst");
1121 assert(SrcOps.size() == 1 && "Invalid Srcs");
1122 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1123 SrcOps[0].getLLTTy(*getMRI()), true);
1124 break;
1125 case TargetOpcode::G_TRUNC:
1126 case TargetOpcode::G_FPTRUNC: {
1127 assert(DstOps.size() == 1 && "Invalid Dst");
1128 assert(SrcOps.size() == 1 && "Invalid Srcs");
1129 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1130 SrcOps[0].getLLTTy(*getMRI()), false);
1131 break;
1132 }
1133 case TargetOpcode::G_BITCAST: {
1134 assert(DstOps.size() == 1 && "Invalid Dst");
1135 assert(SrcOps.size() == 1 && "Invalid Srcs");
1136 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1137 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1138 break;
1139 }
1140 case TargetOpcode::COPY:
1141 assert(DstOps.size() == 1 && "Invalid Dst");
1142 // If the caller wants to add a subreg source it has to be done separately
1143 // so we may not have any SrcOps at this point yet.
1144 break;
1145 case TargetOpcode::G_FCMP:
1146 case TargetOpcode::G_ICMP: {
1147 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1148 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1149 // For F/ICMP, the first src operand is the predicate, followed by
1150 // the two comparands.
1151 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1152 "Expecting predicate");
1153 assert([&]() -> bool {
1154 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1155 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1156 : CmpInst::isFPPredicate(Pred);
1157 }() && "Invalid predicate");
1158 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1159 "Type mismatch");
1160 assert([&]() -> bool {
1161 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1162 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1163 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1164 return DstTy.isScalar();
1165 else
1166 return DstTy.isVector() &&
1167 DstTy.getNumElements() == Op0Ty.getNumElements();
1168 }() && "Type Mismatch");
1169 break;
1170 }
1171 case TargetOpcode::G_UNMERGE_VALUES: {
1172 assert(!DstOps.empty() && "Invalid trivial sequence");
1173 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1174 assert(llvm::all_of(DstOps,
1175 [&, this](const DstOp &Op) {
1176 return Op.getLLTTy(*getMRI()) ==
1177 DstOps[0].getLLTTy(*getMRI());
1178 }) &&
1179 "type mismatch in output list");
1180 assert((TypeSize::ScalarTy)DstOps.size() *
1181 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1182 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1183 "input operands do not cover output register");
1184 break;
1185 }
1186 case TargetOpcode::G_MERGE_VALUES: {
1187 assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1188 assert(DstOps.size() == 1 && "Invalid Dst");
1189 assert(llvm::all_of(SrcOps,
1190 [&, this](const SrcOp &Op) {
1191 return Op.getLLTTy(*getMRI()) ==
1192 SrcOps[0].getLLTTy(*getMRI());
1193 }) &&
1194 "type mismatch in input list");
1195 assert((TypeSize::ScalarTy)SrcOps.size() *
1196 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1197 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1198 "input operands do not cover output register");
1199 assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1200 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1201 break;
1202 }
1203 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1204 assert(DstOps.size() == 1 && "Invalid Dst size");
1205 assert(SrcOps.size() == 2 && "Invalid Src size");
1206 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1207 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1208 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1209 "Invalid operand type");
1210 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1211 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1212 DstOps[0].getLLTTy(*getMRI()) &&
1213 "Type mismatch");
1214 break;
1215 }
1216 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1217 assert(DstOps.size() == 1 && "Invalid dst size");
1218 assert(SrcOps.size() == 3 && "Invalid src size");
1219 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1220 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1221 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1222 SrcOps[1].getLLTTy(*getMRI()) &&
1223 "Type mismatch");
1224 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1225 assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
1226 SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
1227 "Type mismatch");
1228 break;
1229 }
1230 case TargetOpcode::G_BUILD_VECTOR: {
1231 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1232 "Must have at least 2 operands");
1233 assert(DstOps.size() == 1 && "Invalid DstOps");
1234 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1235 "Res type must be a vector");
1236 assert(llvm::all_of(SrcOps,
1237 [&, this](const SrcOp &Op) {
1238 return Op.getLLTTy(*getMRI()) ==
1239 SrcOps[0].getLLTTy(*getMRI());
1240 }) &&
1241 "type mismatch in input list");
1242 assert((TypeSize::ScalarTy)SrcOps.size() *
1243 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1244 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1245 "input scalars do not exactly cover the output vector register");
1246 break;
1247 }
1248 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1249 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1250 "Must have at least 2 operands");
1251 assert(DstOps.size() == 1 && "Invalid DstOps");
1252 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1253 "Res type must be a vector");
1254 assert(llvm::all_of(SrcOps,
1255 [&, this](const SrcOp &Op) {
1256 return Op.getLLTTy(*getMRI()) ==
1257 SrcOps[0].getLLTTy(*getMRI());
1258 }) &&
1259 "type mismatch in input list");
1260 break;
1261 }
1262 case TargetOpcode::G_CONCAT_VECTORS: {
1263 assert(DstOps.size() == 1 && "Invalid DstOps");
1264 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1265 "Must have at least 2 operands");
1266 assert(llvm::all_of(SrcOps,
1267 [&, this](const SrcOp &Op) {
1268 return (Op.getLLTTy(*getMRI()).isVector() &&
1269 Op.getLLTTy(*getMRI()) ==
1270 SrcOps[0].getLLTTy(*getMRI()));
1271 }) &&
1272 "type mismatch in input list");
1273 assert((TypeSize::ScalarTy)SrcOps.size() *
1274 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1275 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1276 "input vectors do not exactly cover the output vector register");
1277 break;
1278 }
1279 case TargetOpcode::G_UADDE: {
1280 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1281 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1282 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1283 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1284 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1285 "Invalid operand");
1286 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1287 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1288 "type mismatch");
1289 break;
1290 }
1291 }
1292
1293 auto MIB = buildInstr(Opc);
1294 for (const DstOp &Op : DstOps)
1295 Op.addDefToMIB(*getMRI(), MIB);
1296 for (const SrcOp &Op : SrcOps)
1297 Op.addSrcToMIB(MIB);
1298 if (Flags)
1299 MIB->setFlags(*Flags);
1300 return MIB;
1301}
Function Alias Analysis Results
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Addr
uint64_t Size
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:236
static M68kRelType getType(unsigned Kind, MCSymbolRefExpr::VariantKind &Modifier, bool &IsPCRel)
This file declares the MachineIRBuilder class.
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getScalarSizeInBits(Type *Ty)
This file describes how to lower LLVM code to machine code.
const fltSemantics & getSemantics() const
Definition: APFloat.h:1238
Class for arbitrary precision integers.
Definition: APInt.h:75
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:289
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:152
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:163
iterator begin() const
Definition: ArrayRef.h:151
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:158
The address of a basic block.
Definition: Constants.h:875
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:718
bool isFPPredicate() const
Definition: InstrTypes.h:825
bool isIntPredicate() const
Definition: InstrTypes.h:826
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:256
const APFloat & getValueAPF() const
Definition: Constants.h:297
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
Definition: Constants.cpp:934
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:887
unsigned getBitWidth() const
getBitWidth - Return the bitwidth of this constant.
Definition: Constants.h:135
This is an important base class in LLVM.
Definition: Constant.h:41
A debug info location.
Definition: DebugLoc.h:33
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
LLT getLLTTy(const MachineRegisterInfo &MRI) const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:315
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:290
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:325
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr LLT getScalarType() const
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Metadata node.
Definition: Metadata.h:943
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBundleIterator< MachineInstr > iterator
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineInstrBuilder buildLoadFromOffset(const DstOp &Dst, const SrcOp &BasePtr, MachineMemOperand &BaseMMO, int64_t Offset)
Helper to create a load from a constant offset given a base address.
MachineInstrBuilder buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMIN Addr, Val, MMO.
MachineInstrBuilder buildBoolExtInReg(const DstOp &Res, const SrcOp &Op, bool IsVector, bool IsFP)
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
MachineInstrBuilder buildAtomicRMWXor(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO.
MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op, bool IsFP)
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildAtomicRMWAnd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO.
MachineInstrBuilder buildZExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and inserts Res = G_AND Op, LowBitsSet(ImmOp) Since there is no G_ZEXT_INREG like G_SEXT_INREG,...
MachineInstrBuilder buildAtomicRMWMin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildAtomicRMWFAdd(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FADD Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWNand(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO.
MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Res = COPY Op depending on the differing sizes of Res and Op.
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src)
Build and insert a vector splat of a scalar Src using a G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idio...
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal, Register NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MDNode * getPCSections()
Get the current instruction's PC sections metadata.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
unsigned getBoolExtOp(bool IsVec, bool IsFP) const
MachineInstrBuilder buildAtomicRMWUmax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO.
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
void recordInsertion(MachineInstr *InsertedInstr) const
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR_TRUNC Op0, ...
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildAtomicCmpXchg(Register OldValRes, Register Addr, Register CmpVal, Register NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a, b, .....
void validateSelectOp(const LLT ResTy, const LLT TstTy, const LLT Op0Ty, const LLT Op1Ty)
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const DebugLoc & getDL()
Getter for DebugLoc.
MachineInstrBuilder buildBuildVectorConstant(const DstOp &Res, ArrayRef< APInt > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ... where each OpN is built with G_CONSTANT.
MachineInstrBuilder buildAtomicRMWUmin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO.
void validateBinaryOp(const LLT Res, const LLT Op0, const LLT Op1)
void validateShiftOp(const LLT Res, const LLT Op0, const LLT Op1)
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildInsert(const DstOp &Res, const SrcOp &Src, const SrcOp &Op, unsigned Index)
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildAtomicRMWSub(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO.
MachineInstrBuilder buildMergeValues(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
MachineInstrBuilder buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAX Addr, Val, MMO.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildAtomicRMWOr(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects)
Build and insert either a G_INTRINSIC (if HasSideEffects is false) or G_INTRINSIC_W_SIDE_EFFECTS inst...
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
MachineInstrBuilder buildDeleteTrailingVectorElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x, y, z = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a,...
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildAtomicRMWAdd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
void validateTruncExt(const LLT Dst, const LLT Src, bool IsExtend)
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_PTRMASK Op0, Op1.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
void validateUnaryOp(const LLT Res, const LLT Op0)
MachineInstrBuilder buildBlockAddress(Register Res, const BlockAddress *BA)
Build and insert Res = G_BLOCK_ADDR BA.
MachineInstrBuilder buildAtomicRMWMax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO.
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
void setMF(MachineFunction &MF)
MachineInstrBuilder buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FSUB Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXchg(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO.
MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0, uint32_t NumBits)
Build and insert Res = G_PTRMASK Op0, G_CONSTANT (1 << NumBits) - 1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and insert Res = G_SEXT_INREG Op, ImmOp.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addBlockAddress(const BlockAddress *BA, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
A description of a memory reference used in the backend.
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:682
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
size_t size() const
Definition: SmallVector.h:91
void reserve(size_type N)
Definition: SmallVector.h:667
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
LLT getLLTTy(const MachineRegisterInfo &MRI) const
void addSrcToMIB(MachineInstrBuilder &MIB) const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
LLVM Value Representation.
Definition: Value.h:74
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:406
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1735
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition: Utils.cpp:481
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:651
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:265
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
MachineFunction * MF
MachineFunction under construction.
DebugLoc DL
Debug location to be set to any instruction we create.
const TargetInstrInfo * TII
Information used to access the description of the opcodes.
MDNode * PCSections
PC sections metadata to be set to any instruction we create.
MachineBasicBlock::iterator II
MachineRegisterInfo * MRI
Information used to verify types are consistent and to create virtual registers.
GISelChangeObserver * Observer
This class contains a discriminated union of information about pointers in memory operands,...