LLVM 17.0.0git
MachineIRBuilder.cpp
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the MachineIRBuidler class.
10//===----------------------------------------------------------------------===//
21
22using namespace llvm;
23
25 State.MF = &MF;
26 State.MBB = nullptr;
27 State.MRI = &MF.getRegInfo();
28 State.TII = MF.getSubtarget().getInstrInfo();
29 State.DL = DebugLoc();
30 State.PCSections = nullptr;
32 State.Observer = nullptr;
33}
34
35//------------------------------------------------------------------------------
36// Build instruction variants.
37//------------------------------------------------------------------------------
38
40 return BuildMI(getMF(), {getDL(), getPCSections()}, getTII().get(Opcode));
41}
42
44 getMBB().insert(getInsertPt(), MIB);
45 recordInsertion(MIB);
46 return MIB;
47}
48
51 const MDNode *Expr) {
52 assert(isa<DILocalVariable>(Variable) && "not a variable");
53 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
54 assert(
55 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
56 "Expected inlined-at fields to agree");
57 return insertInstr(BuildMI(getMF(), getDL(),
58 getTII().get(TargetOpcode::DBG_VALUE),
59 /*IsIndirect*/ false, Reg, Variable, Expr));
60}
61
64 const MDNode *Expr) {
65 assert(isa<DILocalVariable>(Variable) && "not a variable");
66 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
67 assert(
68 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
69 "Expected inlined-at fields to agree");
70 return insertInstr(BuildMI(getMF(), getDL(),
71 getTII().get(TargetOpcode::DBG_VALUE),
72 /*IsIndirect*/ true, Reg, Variable, Expr));
73}
74
76 const MDNode *Variable,
77 const MDNode *Expr) {
78 assert(isa<DILocalVariable>(Variable) && "not a variable");
79 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
80 assert(
81 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
82 "Expected inlined-at fields to agree");
83 return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE)
84 .addFrameIndex(FI)
85 .addImm(0)
86 .addMetadata(Variable)
87 .addMetadata(Expr));
88}
89
91 const MDNode *Variable,
92 const MDNode *Expr) {
93 assert(isa<DILocalVariable>(Variable) && "not a variable");
94 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
95 assert(
96 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
97 "Expected inlined-at fields to agree");
98 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
99
100 auto *NumericConstant = [&] () -> const Constant* {
101 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
102 if (CE->getOpcode() == Instruction::IntToPtr)
103 return CE->getOperand(0);
104 return &C;
105 }();
106
107 if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {
108 if (CI->getBitWidth() > 64)
109 MIB.addCImm(CI);
110 else
111 MIB.addImm(CI->getZExtValue());
112 } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {
113 MIB.addFPImm(CFP);
114 } else if (isa<ConstantPointerNull>(NumericConstant)) {
115 MIB.addImm(0);
116 } else {
117 // Insert $noreg if we didn't find a usable constant and had to drop it.
118 MIB.addReg(Register());
119 }
120
121 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
122 return insertInstr(MIB);
123}
124
126 assert(isa<DILabel>(Label) && "not a label");
127 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
128 "Expected inlined-at fields to agree");
129 auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
130
131 return MIB.addMetadata(Label);
132}
133
135 const SrcOp &Size,
136 Align Alignment) {
137 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
138 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
139 Res.addDefToMIB(*getMRI(), MIB);
140 Size.addSrcToMIB(MIB);
141 MIB.addImm(Alignment.value());
142 return MIB;
143}
144
146 int Idx) {
147 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
148 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
149 Res.addDefToMIB(*getMRI(), MIB);
150 MIB.addFrameIndex(Idx);
151 return MIB;
152}
153
155 const GlobalValue *GV) {
156 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
157 assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
158 GV->getType()->getAddressSpace() &&
159 "address space mismatch");
160
161 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
162 Res.addDefToMIB(*getMRI(), MIB);
163 MIB.addGlobalAddress(GV);
164 return MIB;
165}
166
168 unsigned Idx) {
169 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
170 auto MIB = buildInstr(TargetOpcode::G_CONSTANT_POOL);
171 Res.addDefToMIB(*getMRI(), MIB);
172 MIB.addConstantPoolIndex(Idx);
173 return MIB;
174}
175
177 unsigned JTI) {
178 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
179 .addJumpTableIndex(JTI);
180}
181
182void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
183 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
184 assert((Res == Op0) && "type mismatch");
185}
186
188 const LLT Op1) {
189 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
190 assert((Res == Op0 && Res == Op1) && "type mismatch");
191}
192
193void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
194 const LLT Op1) {
195 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
196 assert((Res == Op0) && "type mismatch");
197}
198
200 const SrcOp &Op0,
201 const SrcOp &Op1) {
202 assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() &&
203 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
204 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
205
206 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1});
207}
208
209std::optional<MachineInstrBuilder>
211 const LLT ValueTy, uint64_t Value) {
212 assert(Res == 0 && "Res is a result argument");
213 assert(ValueTy.isScalar() && "invalid offset type");
214
215 if (Value == 0) {
216 Res = Op0;
217 return std::nullopt;
218 }
219
221 auto Cst = buildConstant(ValueTy, Value);
222 return buildPtrAdd(Res, Op0, Cst.getReg(0));
223}
224
226 const SrcOp &Op0,
227 uint32_t NumBits) {
228 LLT PtrTy = Res.getLLTTy(*getMRI());
229 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
230 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
231 buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
232 return buildPtrMask(Res, Op0, MaskReg);
233}
234
237 const SrcOp &Op0) {
238 LLT ResTy = Res.getLLTTy(*getMRI());
239 LLT Op0Ty = Op0.getLLTTy(*getMRI());
240
241 assert((ResTy.isVector() && Op0Ty.isVector()) && "Non vector type");
242 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
243 "Different vector element types");
244 assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
245 "Op0 has more elements");
246
247 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
249 for (auto Op : Unmerge.getInstr()->defs())
250 Regs.push_back(Op.getReg());
251 Register Undef = buildUndef(Op0Ty.getElementType()).getReg(0);
252 unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
253 for (unsigned i = 0; i < NumberOfPadElts; ++i)
254 Regs.push_back(Undef);
255 return buildMergeLikeInstr(Res, Regs);
256}
257
260 const SrcOp &Op0) {
261 LLT ResTy = Res.getLLTTy(*getMRI());
262 LLT Op0Ty = Op0.getLLTTy(*getMRI());
263
264 assert((ResTy.isVector() && Op0Ty.isVector()) && "Non vector type");
265 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
266 "Different vector element types");
267 assert((ResTy.getNumElements() < Op0Ty.getNumElements()) &&
268 "Op0 has fewer elements");
269
271 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
272 for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
273 Regs.push_back(Unmerge.getReg(i));
274 return buildMergeLikeInstr(Res, Regs);
275}
276
278 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
279}
280
282 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
283 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
284}
285
287 unsigned JTI,
288 Register IndexReg) {
289 assert(getMRI()->getType(TablePtr).isPointer() &&
290 "Table reg must be a pointer");
291 return buildInstr(TargetOpcode::G_BRJT)
292 .addUse(TablePtr)
294 .addUse(IndexReg);
295}
296
298 const SrcOp &Op) {
299 return buildInstr(TargetOpcode::COPY, Res, Op);
300}
301
303 const ConstantInt &Val) {
304 LLT Ty = Res.getLLTTy(*getMRI());
305 LLT EltTy = Ty.getScalarType();
306 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
307 "creating constant with the wrong size");
308
309 if (Ty.isVector()) {
310 auto Const = buildInstr(TargetOpcode::G_CONSTANT)
311 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
312 .addCImm(&Val);
313 return buildSplatVector(Res, Const);
314 }
315
316 auto Const = buildInstr(TargetOpcode::G_CONSTANT);
317 Const->setDebugLoc(DebugLoc());
318 Res.addDefToMIB(*getMRI(), Const);
319 Const.addCImm(&Val);
320 return Const;
321}
322
324 int64_t Val) {
327 ConstantInt *CI = ConstantInt::get(IntN, Val, true);
328 return buildConstant(Res, *CI);
329}
330
332 const ConstantFP &Val) {
333 LLT Ty = Res.getLLTTy(*getMRI());
334 LLT EltTy = Ty.getScalarType();
335
337 == EltTy.getSizeInBits() &&
338 "creating fconstant with the wrong size");
339
340 assert(!Ty.isPointer() && "invalid operand type");
341
342 if (Ty.isVector()) {
343 auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
344 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
345 .addFPImm(&Val);
346
347 return buildSplatVector(Res, Const);
348 }
349
350 auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
351 Const->setDebugLoc(DebugLoc());
352 Res.addDefToMIB(*getMRI(), Const);
353 Const.addFPImm(&Val);
354 return Const;
355}
356
358 const APInt &Val) {
360 return buildConstant(Res, *CI);
361}
362
364 double Val) {
365 LLT DstTy = Res.getLLTTy(*getMRI());
366 auto &Ctx = getMF().getFunction().getContext();
367 auto *CFP =
369 return buildFConstant(Res, *CFP);
370}
371
373 const APFloat &Val) {
374 auto &Ctx = getMF().getFunction().getContext();
375 auto *CFP = ConstantFP::get(Ctx, Val);
376 return buildFConstant(Res, *CFP);
377}
378
380 MachineBasicBlock &Dest) {
381 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
382
383 auto MIB = buildInstr(TargetOpcode::G_BRCOND);
384 Tst.addSrcToMIB(MIB);
385 MIB.addMBB(&Dest);
386 return MIB;
387}
388
391 MachinePointerInfo PtrInfo, Align Alignment,
393 const AAMDNodes &AAInfo) {
394 MMOFlags |= MachineMemOperand::MOLoad;
395 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
396
397 LLT Ty = Dst.getLLTTy(*getMRI());
398 MachineMemOperand *MMO =
399 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
400 return buildLoad(Dst, Addr, *MMO);
401}
402
404 const DstOp &Res,
405 const SrcOp &Addr,
406 MachineMemOperand &MMO) {
407 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
408 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
409
410 auto MIB = buildInstr(Opcode);
411 Res.addDefToMIB(*getMRI(), MIB);
412 Addr.addSrcToMIB(MIB);
413 MIB.addMemOperand(&MMO);
414 return MIB;
415}
416
418 const DstOp &Dst, const SrcOp &BasePtr,
419 MachineMemOperand &BaseMMO, int64_t Offset) {
420 LLT LoadTy = Dst.getLLTTy(*getMRI());
421 MachineMemOperand *OffsetMMO =
422 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
423
424 if (Offset == 0) // This may be a size or type changing load.
425 return buildLoad(Dst, BasePtr, *OffsetMMO);
426
427 LLT PtrTy = BasePtr.getLLTTy(*getMRI());
428 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
429 auto ConstOffset = buildConstant(OffsetTy, Offset);
430 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
431 return buildLoad(Dst, Ptr, *OffsetMMO);
432}
433
435 const SrcOp &Addr,
436 MachineMemOperand &MMO) {
437 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
438 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
439
440 auto MIB = buildInstr(TargetOpcode::G_STORE);
441 Val.addSrcToMIB(MIB);
442 Addr.addSrcToMIB(MIB);
443 MIB.addMemOperand(&MMO);
444 return MIB;
445}
446
449 MachinePointerInfo PtrInfo, Align Alignment,
451 const AAMDNodes &AAInfo) {
452 MMOFlags |= MachineMemOperand::MOStore;
453 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
454
455 LLT Ty = Val.getLLTTy(*getMRI());
456 MachineMemOperand *MMO =
457 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
458 return buildStore(Val, Addr, *MMO);
459}
460
462 const SrcOp &Op) {
463 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
464}
465
467 const SrcOp &Op) {
468 return buildInstr(TargetOpcode::G_SEXT, Res, Op);
469}
470
472 const SrcOp &Op) {
473 return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
474}
475
476unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
477 const auto *TLI = getMF().getSubtarget().getTargetLowering();
478 switch (TLI->getBooleanContents(IsVec, IsFP)) {
480 return TargetOpcode::G_SEXT;
482 return TargetOpcode::G_ZEXT;
483 default:
484 return TargetOpcode::G_ANYEXT;
485 }
486}
487
489 const SrcOp &Op,
490 bool IsFP) {
491 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
492 return buildInstr(ExtOp, Res, Op);
493}
494
496 const SrcOp &Op,
497 bool IsVector,
498 bool IsFP) {
499 const auto *TLI = getMF().getSubtarget().getTargetLowering();
500 switch (TLI->getBooleanContents(IsVector, IsFP)) {
502 return buildSExtInReg(Res, Op, 1);
504 return buildZExtInReg(Res, Op, 1);
506 return buildCopy(Res, Op);
507 }
508
509 llvm_unreachable("unexpected BooleanContent");
510}
511
513 const DstOp &Res,
514 const SrcOp &Op) {
515 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
516 TargetOpcode::G_SEXT == ExtOpc) &&
517 "Expecting Extending Opc");
518 assert(Res.getLLTTy(*getMRI()).isScalar() ||
519 Res.getLLTTy(*getMRI()).isVector());
520 assert(Res.getLLTTy(*getMRI()).isScalar() ==
521 Op.getLLTTy(*getMRI()).isScalar());
522
523 unsigned Opcode = TargetOpcode::COPY;
524 if (Res.getLLTTy(*getMRI()).getSizeInBits() >
525 Op.getLLTTy(*getMRI()).getSizeInBits())
526 Opcode = ExtOpc;
527 else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
528 Op.getLLTTy(*getMRI()).getSizeInBits())
529 Opcode = TargetOpcode::G_TRUNC;
530 else
531 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
532
533 return buildInstr(Opcode, Res, Op);
534}
535
537 const SrcOp &Op) {
538 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
539}
540
542 const SrcOp &Op) {
543 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
544}
545
547 const SrcOp &Op) {
548 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
549}
550
552 const SrcOp &Op,
553 int64_t ImmOp) {
554 LLT ResTy = Res.getLLTTy(*getMRI());
555 auto Mask = buildConstant(
556 ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
557 return buildAnd(Res, Op, Mask);
558}
559
561 const SrcOp &Src) {
562 LLT SrcTy = Src.getLLTTy(*getMRI());
563 LLT DstTy = Dst.getLLTTy(*getMRI());
564 if (SrcTy == DstTy)
565 return buildCopy(Dst, Src);
566
567 unsigned Opcode;
568 if (SrcTy.isPointer() && DstTy.isScalar())
569 Opcode = TargetOpcode::G_PTRTOINT;
570 else if (DstTy.isPointer() && SrcTy.isScalar())
571 Opcode = TargetOpcode::G_INTTOPTR;
572 else {
573 assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
574 Opcode = TargetOpcode::G_BITCAST;
575 }
576
577 return buildInstr(Opcode, Dst, Src);
578}
579
581 const SrcOp &Src,
582 uint64_t Index) {
583 LLT SrcTy = Src.getLLTTy(*getMRI());
584 LLT DstTy = Dst.getLLTTy(*getMRI());
585
586#ifndef NDEBUG
587 assert(SrcTy.isValid() && "invalid operand type");
588 assert(DstTy.isValid() && "invalid operand type");
589 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
590 "extracting off end of register");
591#endif
592
593 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
594 assert(Index == 0 && "insertion past the end of a register");
595 return buildCast(Dst, Src);
596 }
597
598 auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
599 Dst.addDefToMIB(*getMRI(), Extract);
600 Src.addSrcToMIB(Extract);
601 Extract.addImm(Index);
602 return Extract;
603}
604
606 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
607}
608
610 ArrayRef<Register> Ops) {
611 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
612 // we need some temporary storage for the DstOp objects. Here we use a
613 // sufficiently large SmallVector to not go through the heap.
614 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
615 assert(TmpVec.size() > 1);
616 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
617}
618
621 ArrayRef<Register> Ops) {
622 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
623 // we need some temporary storage for the DstOp objects. Here we use a
624 // sufficiently large SmallVector to not go through the heap.
625 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
626 assert(TmpVec.size() > 1);
627 return buildInstr(getOpcodeForMerge(Res, TmpVec), Res, TmpVec);
628}
629
632 std::initializer_list<SrcOp> Ops) {
633 assert(Ops.size() > 1);
634 return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
635}
636
637unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
638 ArrayRef<SrcOp> SrcOps) const {
639 if (DstOp.getLLTTy(*getMRI()).isVector()) {
640 if (SrcOps[0].getLLTTy(*getMRI()).isVector())
641 return TargetOpcode::G_CONCAT_VECTORS;
642 return TargetOpcode::G_BUILD_VECTOR;
643 }
644
645 return TargetOpcode::G_MERGE_VALUES;
646}
647
649 const SrcOp &Op) {
650 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
651 // we need some temporary storage for the DstOp objects. Here we use a
652 // sufficiently large SmallVector to not go through the heap.
653 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
654 assert(TmpVec.size() > 1);
655 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
656}
657
659 const SrcOp &Op) {
660 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
661 SmallVector<DstOp, 8> TmpVec(NumReg, Res);
662 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
663}
664
666 const SrcOp &Op) {
667 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
668 // we need some temporary storage for the DstOp objects. Here we use a
669 // sufficiently large SmallVector to not go through the heap.
670 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
671 assert(TmpVec.size() > 1);
672 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
673}
674
676 ArrayRef<Register> Ops) {
677 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
678 // we need some temporary storage for the DstOp objects. Here we use a
679 // sufficiently large SmallVector to not go through the heap.
680 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
681 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
682}
683
686 ArrayRef<APInt> Ops) {
687 SmallVector<SrcOp> TmpVec;
688 TmpVec.reserve(Ops.size());
689 LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
690 for (const auto &Op : Ops)
691 TmpVec.push_back(buildConstant(EltTy, Op));
692 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
693}
694
696 const SrcOp &Src) {
697 SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
698 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
699}
700
703 ArrayRef<Register> Ops) {
704 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
705 // we need some temporary storage for the DstOp objects. Here we use a
706 // sufficiently large SmallVector to not go through the heap.
707 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
708 if (TmpVec[0].getLLTTy(*getMRI()).getSizeInBits() ==
709 Res.getLLTTy(*getMRI()).getElementType().getSizeInBits())
710 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
711 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
712}
713
715 const SrcOp &Src) {
716 LLT DstTy = Res.getLLTTy(*getMRI());
717 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
718 "Expected Src to match Dst elt ty");
719 auto UndefVec = buildUndef(DstTy);
720 auto Zero = buildConstant(LLT::scalar(64), 0);
721 auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
722 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
723 return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
724}
725
727 const SrcOp &Src1,
728 const SrcOp &Src2,
729 ArrayRef<int> Mask) {
730 LLT DstTy = Res.getLLTTy(*getMRI());
731 LLT Src1Ty = Src1.getLLTTy(*getMRI());
732 LLT Src2Ty = Src2.getLLTTy(*getMRI());
733 assert((size_t)(Src1Ty.getNumElements() + Src2Ty.getNumElements()) >=
734 Mask.size());
735 assert(DstTy.getElementType() == Src1Ty.getElementType() &&
736 DstTy.getElementType() == Src2Ty.getElementType());
737 (void)DstTy;
738 (void)Src1Ty;
739 (void)Src2Ty;
740 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
741 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
742 .addShuffleMask(MaskAlloc);
743}
744
747 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
748 // we need some temporary storage for the DstOp objects. Here we use a
749 // sufficiently large SmallVector to not go through the heap.
750 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
751 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
752}
753
755 const SrcOp &Src,
756 const SrcOp &Op,
757 unsigned Index) {
758 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
759 Res.getLLTTy(*getMRI()).getSizeInBits() &&
760 "insertion past the end of a register");
761
762 if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
763 Op.getLLTTy(*getMRI()).getSizeInBits()) {
764 return buildCast(Res, Op);
765 }
766
767 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
768}
769
771 ArrayRef<Register> ResultRegs,
772 bool HasSideEffects) {
773 auto MIB =
774 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
775 : TargetOpcode::G_INTRINSIC);
776 for (unsigned ResultReg : ResultRegs)
777 MIB.addDef(ResultReg);
778 MIB.addIntrinsicID(ID);
779 return MIB;
780}
781
784 bool HasSideEffects) {
785 auto MIB =
786 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
787 : TargetOpcode::G_INTRINSIC);
788 for (DstOp Result : Results)
789 Result.addDefToMIB(*getMRI(), MIB);
790 MIB.addIntrinsicID(ID);
791 return MIB;
792}
793
795 const SrcOp &Op) {
796 return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
797}
798
801 std::optional<unsigned> Flags) {
802 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
803}
804
806 const DstOp &Res,
807 const SrcOp &Op0,
808 const SrcOp &Op1) {
809 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
810}
811
813 const DstOp &Res,
814 const SrcOp &Op0,
815 const SrcOp &Op1,
816 std::optional<unsigned> Flags) {
817
818 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
819}
820
823 const SrcOp &Op0, const SrcOp &Op1,
824 std::optional<unsigned> Flags) {
825
826 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
827}
828
831 const SrcOp &Elt, const SrcOp &Idx) {
832 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
833}
834
837 const SrcOp &Idx) {
838 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
839}
840
842 Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
843 Register NewVal, MachineMemOperand &MMO) {
844#ifndef NDEBUG
845 LLT OldValResTy = getMRI()->getType(OldValRes);
846 LLT SuccessResTy = getMRI()->getType(SuccessRes);
847 LLT AddrTy = getMRI()->getType(Addr);
848 LLT CmpValTy = getMRI()->getType(CmpVal);
849 LLT NewValTy = getMRI()->getType(NewVal);
850 assert(OldValResTy.isScalar() && "invalid operand type");
851 assert(SuccessResTy.isScalar() && "invalid operand type");
852 assert(AddrTy.isPointer() && "invalid operand type");
853 assert(CmpValTy.isValid() && "invalid operand type");
854 assert(NewValTy.isValid() && "invalid operand type");
855 assert(OldValResTy == CmpValTy && "type mismatch");
856 assert(OldValResTy == NewValTy && "type mismatch");
857#endif
858
859 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
860 .addDef(OldValRes)
861 .addDef(SuccessRes)
862 .addUse(Addr)
863 .addUse(CmpVal)
864 .addUse(NewVal)
865 .addMemOperand(&MMO);
866}
870 Register CmpVal, Register NewVal,
871 MachineMemOperand &MMO) {
872#ifndef NDEBUG
873 LLT OldValResTy = getMRI()->getType(OldValRes);
874 LLT AddrTy = getMRI()->getType(Addr);
875 LLT CmpValTy = getMRI()->getType(CmpVal);
876 LLT NewValTy = getMRI()->getType(NewVal);
877 assert(OldValResTy.isScalar() && "invalid operand type");
878 assert(AddrTy.isPointer() && "invalid operand type");
879 assert(CmpValTy.isValid() && "invalid operand type");
880 assert(NewValTy.isValid() && "invalid operand type");
881 assert(OldValResTy == CmpValTy && "type mismatch");
882 assert(OldValResTy == NewValTy && "type mismatch");
883#endif
884
885 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
886 .addDef(OldValRes)
887 .addUse(Addr)
888 .addUse(CmpVal)
889 .addUse(NewVal)
890 .addMemOperand(&MMO);
891}
892
894 unsigned Opcode, const DstOp &OldValRes,
895 const SrcOp &Addr, const SrcOp &Val,
896 MachineMemOperand &MMO) {
897
898#ifndef NDEBUG
899 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
900 LLT AddrTy = Addr.getLLTTy(*getMRI());
901 LLT ValTy = Val.getLLTTy(*getMRI());
902 assert(OldValResTy.isScalar() && "invalid operand type");
903 assert(AddrTy.isPointer() && "invalid operand type");
904 assert(ValTy.isValid() && "invalid operand type");
905 assert(OldValResTy == ValTy && "type mismatch");
906 assert(MMO.isAtomic() && "not atomic mem operand");
907#endif
908
909 auto MIB = buildInstr(Opcode);
910 OldValRes.addDefToMIB(*getMRI(), MIB);
911 Addr.addSrcToMIB(MIB);
912 Val.addSrcToMIB(MIB);
913 MIB.addMemOperand(&MMO);
914 return MIB;
915}
916
919 Register Val, MachineMemOperand &MMO) {
920 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
921 MMO);
922}
925 Register Val, MachineMemOperand &MMO) {
926 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
927 MMO);
928}
931 Register Val, MachineMemOperand &MMO) {
932 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
933 MMO);
934}
937 Register Val, MachineMemOperand &MMO) {
938 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
939 MMO);
940}
943 Register Val, MachineMemOperand &MMO) {
944 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
945 MMO);
946}
949 Register Val,
950 MachineMemOperand &MMO) {
951 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
952 MMO);
953}
956 Register Val, MachineMemOperand &MMO) {
957 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
958 MMO);
959}
962 Register Val, MachineMemOperand &MMO) {
963 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
964 MMO);
965}
968 Register Val, MachineMemOperand &MMO) {
969 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
970 MMO);
971}
974 Register Val, MachineMemOperand &MMO) {
975 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
976 MMO);
977}
980 Register Val, MachineMemOperand &MMO) {
981 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
982 MMO);
983}
984
987 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
988 MachineMemOperand &MMO) {
989 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
990 MMO);
991}
992
994MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
995 MachineMemOperand &MMO) {
996 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
997 MMO);
998}
999
1002 const SrcOp &Val, MachineMemOperand &MMO) {
1003 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1004 MMO);
1005}
1006
1009 const SrcOp &Val, MachineMemOperand &MMO) {
1010 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1011 MMO);
1012}
1013
1015MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1016 return buildInstr(TargetOpcode::G_FENCE)
1017 .addImm(Ordering)
1018 .addImm(Scope);
1019}
1020
1023#ifndef NDEBUG
1024 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1025#endif
1026
1027 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
1028}
1029
1030void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1031 bool IsExtend) {
1032#ifndef NDEBUG
1033 if (DstTy.isVector()) {
1034 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1035 assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
1036 "different number of elements in a trunc/ext");
1037 } else
1038 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1039
1040 if (IsExtend)
1041 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
1042 "invalid narrowing extend");
1043 else
1044 assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
1045 "invalid widening trunc");
1046#endif
1047}
1048
1049void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1050 const LLT Op0Ty, const LLT Op1Ty) {
1051#ifndef NDEBUG
1052 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1053 "invalid operand type");
1054 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1055 if (ResTy.isScalar() || ResTy.isPointer())
1056 assert(TstTy.isScalar() && "type mismatch");
1057 else
1058 assert((TstTy.isScalar() ||
1059 (TstTy.isVector() &&
1060 TstTy.getNumElements() == Op0Ty.getNumElements())) &&
1061 "type mismatch");
1062#endif
1063}
1064
1067 ArrayRef<SrcOp> SrcOps,
1068 std::optional<unsigned> Flags) {
1069 switch (Opc) {
1070 default:
1071 break;
1072 case TargetOpcode::G_SELECT: {
1073 assert(DstOps.size() == 1 && "Invalid select");
1074 assert(SrcOps.size() == 3 && "Invalid select");
1076 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1077 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1078 break;
1079 }
1080 case TargetOpcode::G_FNEG:
1081 case TargetOpcode::G_ABS:
1082 // All these are unary ops.
1083 assert(DstOps.size() == 1 && "Invalid Dst");
1084 assert(SrcOps.size() == 1 && "Invalid Srcs");
1085 validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1086 SrcOps[0].getLLTTy(*getMRI()));
1087 break;
1088 case TargetOpcode::G_ADD:
1089 case TargetOpcode::G_AND:
1090 case TargetOpcode::G_MUL:
1091 case TargetOpcode::G_OR:
1092 case TargetOpcode::G_SUB:
1093 case TargetOpcode::G_XOR:
1094 case TargetOpcode::G_UDIV:
1095 case TargetOpcode::G_SDIV:
1096 case TargetOpcode::G_UREM:
1097 case TargetOpcode::G_SREM:
1098 case TargetOpcode::G_SMIN:
1099 case TargetOpcode::G_SMAX:
1100 case TargetOpcode::G_UMIN:
1101 case TargetOpcode::G_UMAX:
1102 case TargetOpcode::G_UADDSAT:
1103 case TargetOpcode::G_SADDSAT:
1104 case TargetOpcode::G_USUBSAT:
1105 case TargetOpcode::G_SSUBSAT: {
1106 // All these are binary ops.
1107 assert(DstOps.size() == 1 && "Invalid Dst");
1108 assert(SrcOps.size() == 2 && "Invalid Srcs");
1109 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1110 SrcOps[0].getLLTTy(*getMRI()),
1111 SrcOps[1].getLLTTy(*getMRI()));
1112 break;
1113 }
1114 case TargetOpcode::G_SHL:
1115 case TargetOpcode::G_ASHR:
1116 case TargetOpcode::G_LSHR:
1117 case TargetOpcode::G_USHLSAT:
1118 case TargetOpcode::G_SSHLSAT: {
1119 assert(DstOps.size() == 1 && "Invalid Dst");
1120 assert(SrcOps.size() == 2 && "Invalid Srcs");
1121 validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1122 SrcOps[0].getLLTTy(*getMRI()),
1123 SrcOps[1].getLLTTy(*getMRI()));
1124 break;
1125 }
1126 case TargetOpcode::G_SEXT:
1127 case TargetOpcode::G_ZEXT:
1128 case TargetOpcode::G_ANYEXT:
1129 assert(DstOps.size() == 1 && "Invalid Dst");
1130 assert(SrcOps.size() == 1 && "Invalid Srcs");
1131 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1132 SrcOps[0].getLLTTy(*getMRI()), true);
1133 break;
1134 case TargetOpcode::G_TRUNC:
1135 case TargetOpcode::G_FPTRUNC: {
1136 assert(DstOps.size() == 1 && "Invalid Dst");
1137 assert(SrcOps.size() == 1 && "Invalid Srcs");
1138 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1139 SrcOps[0].getLLTTy(*getMRI()), false);
1140 break;
1141 }
1142 case TargetOpcode::G_BITCAST: {
1143 assert(DstOps.size() == 1 && "Invalid Dst");
1144 assert(SrcOps.size() == 1 && "Invalid Srcs");
1145 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1146 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1147 break;
1148 }
1149 case TargetOpcode::COPY:
1150 assert(DstOps.size() == 1 && "Invalid Dst");
1151 // If the caller wants to add a subreg source it has to be done separately
1152 // so we may not have any SrcOps at this point yet.
1153 break;
1154 case TargetOpcode::G_FCMP:
1155 case TargetOpcode::G_ICMP: {
1156 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1157 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1158 // For F/ICMP, the first src operand is the predicate, followed by
1159 // the two comparands.
1160 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1161 "Expecting predicate");
1162 assert([&]() -> bool {
1163 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1164 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1165 : CmpInst::isFPPredicate(Pred);
1166 }() && "Invalid predicate");
1167 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1168 "Type mismatch");
1169 assert([&]() -> bool {
1170 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1171 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1172 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1173 return DstTy.isScalar();
1174 else
1175 return DstTy.isVector() &&
1176 DstTy.getNumElements() == Op0Ty.getNumElements();
1177 }() && "Type Mismatch");
1178 break;
1179 }
1180 case TargetOpcode::G_UNMERGE_VALUES: {
1181 assert(!DstOps.empty() && "Invalid trivial sequence");
1182 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1183 assert(llvm::all_of(DstOps,
1184 [&, this](const DstOp &Op) {
1185 return Op.getLLTTy(*getMRI()) ==
1186 DstOps[0].getLLTTy(*getMRI());
1187 }) &&
1188 "type mismatch in output list");
1189 assert((TypeSize::ScalarTy)DstOps.size() *
1190 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1191 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1192 "input operands do not cover output register");
1193 break;
1194 }
1195 case TargetOpcode::G_MERGE_VALUES: {
1196 assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1197 assert(DstOps.size() == 1 && "Invalid Dst");
1198 assert(llvm::all_of(SrcOps,
1199 [&, this](const SrcOp &Op) {
1200 return Op.getLLTTy(*getMRI()) ==
1201 SrcOps[0].getLLTTy(*getMRI());
1202 }) &&
1203 "type mismatch in input list");
1204 assert((TypeSize::ScalarTy)SrcOps.size() *
1205 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1206 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1207 "input operands do not cover output register");
1208 assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1209 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1210 break;
1211 }
1212 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1213 assert(DstOps.size() == 1 && "Invalid Dst size");
1214 assert(SrcOps.size() == 2 && "Invalid Src size");
1215 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1216 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1217 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1218 "Invalid operand type");
1219 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1220 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1221 DstOps[0].getLLTTy(*getMRI()) &&
1222 "Type mismatch");
1223 break;
1224 }
1225 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1226 assert(DstOps.size() == 1 && "Invalid dst size");
1227 assert(SrcOps.size() == 3 && "Invalid src size");
1228 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1229 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1230 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1231 SrcOps[1].getLLTTy(*getMRI()) &&
1232 "Type mismatch");
1233 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1234 assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
1235 SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
1236 "Type mismatch");
1237 break;
1238 }
1239 case TargetOpcode::G_BUILD_VECTOR: {
1240 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1241 "Must have at least 2 operands");
1242 assert(DstOps.size() == 1 && "Invalid DstOps");
1243 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1244 "Res type must be a vector");
1245 assert(llvm::all_of(SrcOps,
1246 [&, this](const SrcOp &Op) {
1247 return Op.getLLTTy(*getMRI()) ==
1248 SrcOps[0].getLLTTy(*getMRI());
1249 }) &&
1250 "type mismatch in input list");
1251 assert((TypeSize::ScalarTy)SrcOps.size() *
1252 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1253 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1254 "input scalars do not exactly cover the output vector register");
1255 break;
1256 }
1257 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1258 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1259 "Must have at least 2 operands");
1260 assert(DstOps.size() == 1 && "Invalid DstOps");
1261 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1262 "Res type must be a vector");
1263 assert(llvm::all_of(SrcOps,
1264 [&, this](const SrcOp &Op) {
1265 return Op.getLLTTy(*getMRI()) ==
1266 SrcOps[0].getLLTTy(*getMRI());
1267 }) &&
1268 "type mismatch in input list");
1269 break;
1270 }
1271 case TargetOpcode::G_CONCAT_VECTORS: {
1272 assert(DstOps.size() == 1 && "Invalid DstOps");
1273 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1274 "Must have at least 2 operands");
1275 assert(llvm::all_of(SrcOps,
1276 [&, this](const SrcOp &Op) {
1277 return (Op.getLLTTy(*getMRI()).isVector() &&
1278 Op.getLLTTy(*getMRI()) ==
1279 SrcOps[0].getLLTTy(*getMRI()));
1280 }) &&
1281 "type mismatch in input list");
1282 assert((TypeSize::ScalarTy)SrcOps.size() *
1283 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1284 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1285 "input vectors do not exactly cover the output vector register");
1286 break;
1287 }
1288 case TargetOpcode::G_UADDE: {
1289 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1290 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1291 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1292 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1293 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1294 "Invalid operand");
1295 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1296 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1297 "type mismatch");
1298 break;
1299 }
1300 }
1301
1302 auto MIB = buildInstr(Opc);
1303 for (const DstOp &Op : DstOps)
1304 Op.addDefToMIB(*getMRI(), MIB);
1305 for (const SrcOp &Op : SrcOps)
1306 Op.addSrcToMIB(MIB);
1307 if (Flags)
1308 MIB->setFlags(*Flags);
1309 return MIB;
1310}
Function Alias Analysis Results
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Addr
uint64_t Size
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:236
This file declares the MachineIRBuilder class.
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getScalarSizeInBits(Type *Ty)
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
This file describes how to lower LLVM code to machine code.
@ Flags
Definition: TextStubV5.cpp:93
const fltSemantics & getSemantics() const
Definition: APFloat.h:1277
Class for arbitrary precision integers.
Definition: APInt.h:75
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:289
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:152
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:163
iterator begin() const
Definition: ArrayRef.h:151
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:158
The address of a basic block.
Definition: Constants.h:874
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:711
bool isFPPredicate() const
Definition: InstrTypes.h:818
bool isIntPredicate() const
Definition: InstrTypes.h:819
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:260
const APFloat & getValueAPF() const
Definition: Constants.h:296
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
Definition: Constants.cpp:927
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
unsigned getBitWidth() const
getBitWidth - Return the bitwidth of this constant.
Definition: Constants.h:139
This is an important base class in LLVM.
Definition: Constant.h:41
A debug info location.
Definition: DebugLoc.h:33
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
LLT getLLTTy(const MachineRegisterInfo &MRI) const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:319
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:290
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:339
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:233
constexpr bool isScalar() const
Definition: LowLevelType.h:123
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr bool isValid() const
Definition: LowLevelType.h:121
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:133
constexpr bool isVector() const
Definition: LowLevelType.h:129
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:159
constexpr bool isPointer() const
Definition: LowLevelType.h:125
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:257
constexpr LLT getScalarType() const
Definition: LowLevelType.h:174
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Metadata node.
Definition: Metadata.h:950
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBundleIterator< MachineInstr > iterator
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineInstrBuilder buildLoadFromOffset(const DstOp &Dst, const SrcOp &BasePtr, MachineMemOperand &BaseMMO, int64_t Offset)
Helper to create a load from a constant offset given a base address.
MachineInstrBuilder buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMIN Addr, Val, MMO.
MachineInstrBuilder buildBoolExtInReg(const DstOp &Res, const SrcOp &Op, bool IsVector, bool IsFP)
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
MachineInstrBuilder buildAtomicRMWXor(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO.
MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
LLVMContext & getContext() const
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildConstantPool(const DstOp &Res, unsigned Idx)
Build and insert Res = G_CONSTANT_POOL Idx.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op, bool IsFP)
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildAtomicRMWAnd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO.
MachineInstrBuilder buildZExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and inserts Res = G_AND Op, LowBitsSet(ImmOp) Since there is no G_ZEXT_INREG like G_SEXT_INREG,...
MachineInstrBuilder buildAtomicRMWMin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildAtomicRMWFAdd(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FADD Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWNand(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO.
MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Res = COPY Op depending on the differing sizes of Res and Op.
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src)
Build and insert a vector splat of a scalar Src using a G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idio...
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal, Register NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MDNode * getPCSections()
Get the current instruction's PC sections metadata.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
unsigned getBoolExtOp(bool IsVec, bool IsFP) const
MachineInstrBuilder buildAtomicRMWUmax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO.
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
void recordInsertion(MachineInstr *InsertedInstr) const
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR_TRUNC Op0, ...
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildAtomicCmpXchg(Register OldValRes, Register Addr, Register CmpVal, Register NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a, b, .....
void validateSelectOp(const LLT ResTy, const LLT TstTy, const LLT Op0Ty, const LLT Op1Ty)
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const DebugLoc & getDL()
Getter for DebugLoc.
MachineInstrBuilder buildBuildVectorConstant(const DstOp &Res, ArrayRef< APInt > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ... where each OpN is built with G_CONSTANT.
MachineInstrBuilder buildAtomicRMWUmin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO.
void validateBinaryOp(const LLT Res, const LLT Op0, const LLT Op1)
void validateShiftOp(const LLT Res, const LLT Op0, const LLT Op1)
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildInsert(const DstOp &Res, const SrcOp &Src, const SrcOp &Op, unsigned Index)
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildAtomicRMWSub(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO.
MachineInstrBuilder buildMergeValues(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
MachineInstrBuilder buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAX Addr, Val, MMO.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildAtomicRMWOr(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects)
Build and insert either a G_INTRINSIC (if HasSideEffects is false) or G_INTRINSIC_W_SIDE_EFFECTS inst...
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
MachineInstrBuilder buildDeleteTrailingVectorElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x, y, z = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a,...
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildAtomicRMWAdd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
void validateTruncExt(const LLT Dst, const LLT Src, bool IsExtend)
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_PTRMASK Op0, Op1.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
void validateUnaryOp(const LLT Res, const LLT Op0)
MachineInstrBuilder buildBlockAddress(Register Res, const BlockAddress *BA)
Build and insert Res = G_BLOCK_ADDR BA.
MachineInstrBuilder buildAtomicRMWMax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO.
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
void setMF(MachineFunction &MF)
MachineInstrBuilder buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FSUB Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXchg(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO.
MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0, uint32_t NumBits)
Build and insert Res = G_PTRMASK Op0, G_CONSTANT (1 << NumBits) - 1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and insert Res = G_SEXT_INREG Op, ImmOp.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addBlockAddress(const BlockAddress *BA, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
A description of a memory reference used in the backend.
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:693
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
size_t size() const
Definition: SmallVector.h:91
void reserve(size_type N)
Definition: SmallVector.h:667
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
LLT getLLTTy(const MachineRegisterInfo &MRI) const
void addSrcToMIB(MachineInstrBuilder &MIB) const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
LLVM Value Representation.
Definition: Value.h:74
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:440
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1819
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition: Utils.cpp:478
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:651
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:325
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
MachineFunction * MF
MachineFunction under construction.
DebugLoc DL
Debug location to be set to any instruction we create.
const TargetInstrInfo * TII
Information used to access the description of the opcodes.
MDNode * PCSections
PC sections metadata to be set to any instruction we create.
MachineBasicBlock::iterator II
MachineRegisterInfo * MRI
Information used to verify types are consistent and to create virtual registers.
GISelChangeObserver * Observer
This class contains a discriminated union of information about pointers in memory operands,...