LLVM 22.0.0git
MachineIRBuilder.cpp
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the MachineIRBuidler class.
10//===----------------------------------------------------------------------===//
21
22using namespace llvm;
23
25 State.MF = &MF;
26 State.MBB = nullptr;
27 State.MRI = &MF.getRegInfo();
28 State.TII = MF.getSubtarget().getInstrInfo();
29 State.DL = DebugLoc();
30 State.PCSections = nullptr;
31 State.MMRA = nullptr;
32 State.II = MachineBasicBlock::iterator();
33 State.Observer = nullptr;
34}
35
36//------------------------------------------------------------------------------
37// Build instruction variants.
38//------------------------------------------------------------------------------
39
46
52
55 const MDNode *Expr) {
56 assert(isa<DILocalVariable>(Variable) && "not a variable");
57 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
58 assert(
59 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
60 "Expected inlined-at fields to agree");
61 return insertInstr(BuildMI(getMF(), getDL(),
62 getTII().get(TargetOpcode::DBG_VALUE),
63 /*IsIndirect*/ false, Reg, Variable, Expr));
64}
65
68 const MDNode *Expr) {
69 assert(isa<DILocalVariable>(Variable) && "not a variable");
70 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
71 assert(
72 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
73 "Expected inlined-at fields to agree");
74 return insertInstr(BuildMI(getMF(), getDL(),
75 getTII().get(TargetOpcode::DBG_VALUE),
76 /*IsIndirect*/ true, Reg, Variable, Expr));
77}
78
80 const MDNode *Variable,
81 const MDNode *Expr) {
82 assert(isa<DILocalVariable>(Variable) && "not a variable");
83 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
84 assert(
85 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
86 "Expected inlined-at fields to agree");
87 return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE)
88 .addFrameIndex(FI)
89 .addImm(0)
90 .addMetadata(Variable)
91 .addMetadata(Expr));
92}
93
95 const MDNode *Variable,
96 const MDNode *Expr) {
97 assert(isa<DILocalVariable>(Variable) && "not a variable");
98 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
99 assert(
100 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
101 "Expected inlined-at fields to agree");
102 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
103
104 auto *NumericConstant = [&] () -> const Constant* {
105 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
106 if (CE->getOpcode() == Instruction::IntToPtr)
107 return CE->getOperand(0);
108 return &C;
109 }();
110
111 if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {
112 if (CI->getBitWidth() > 64)
113 MIB.addCImm(CI);
114 else if (CI->getBitWidth() == 1)
115 MIB.addImm(CI->getZExtValue());
116 else
117 MIB.addImm(CI->getSExtValue());
118 } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {
119 MIB.addFPImm(CFP);
120 } else if (isa<ConstantPointerNull>(NumericConstant)) {
121 MIB.addImm(0);
122 } else {
123 // Insert $noreg if we didn't find a usable constant and had to drop it.
124 MIB.addReg(Register());
125 }
126
127 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
128 return insertInstr(MIB);
129}
130
132 assert(isa<DILabel>(Label) && "not a label");
133 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
134 "Expected inlined-at fields to agree");
135 auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
136
137 return MIB.addMetadata(Label);
138}
139
141 const SrcOp &Size,
142 Align Alignment) {
143 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
144 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
145 Res.addDefToMIB(*getMRI(), MIB);
146 Size.addSrcToMIB(MIB);
147 MIB.addImm(Alignment.value());
148 return MIB;
149}
150
152 int Idx) {
153 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
154 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
155 Res.addDefToMIB(*getMRI(), MIB);
156 MIB.addFrameIndex(Idx);
157 return MIB;
158}
159
161 const GlobalValue *GV) {
162 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
164 GV->getType()->getAddressSpace() &&
165 "address space mismatch");
166
167 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
168 Res.addDefToMIB(*getMRI(), MIB);
169 MIB.addGlobalAddress(GV);
170 return MIB;
171}
172
174 unsigned Idx) {
175 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
176 auto MIB = buildInstr(TargetOpcode::G_CONSTANT_POOL);
177 Res.addDefToMIB(*getMRI(), MIB);
178 MIB.addConstantPoolIndex(Idx);
179 return MIB;
180}
181
183 unsigned JTI) {
184 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
185 .addJumpTableIndex(JTI);
186}
187
188void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
189 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
190 assert((Res == Op0) && "type mismatch");
191}
192
194 const LLT Op1) {
195 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
196 assert((Res == Op0 && Res == Op1) && "type mismatch");
197}
198
199void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
200 const LLT Op1) {
201 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
202 assert((Res == Op0) && "type mismatch");
203}
204
207 const SrcOp &Op1, std::optional<unsigned> Flags) {
208 assert(Res.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
209 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
210 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
211
212 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}, Flags);
213}
214
222
223std::optional<MachineInstrBuilder>
225 const LLT ValueTy, uint64_t Value,
226 std::optional<unsigned> Flags) {
227 assert(Res == 0 && "Res is a result argument");
228 assert(ValueTy.isScalar() && "invalid offset type");
229
230 if (Value == 0) {
231 Res = Op0;
232 return std::nullopt;
233 }
234
236 auto Cst = buildConstant(ValueTy, Value);
237 return buildPtrAdd(Res, Op0, Cst.getReg(0), Flags);
238}
239
240std::optional<MachineInstrBuilder> MachineIRBuilder::materializeObjectPtrOffset(
241 Register &Res, Register Op0, const LLT ValueTy, uint64_t Value) {
242 return materializePtrAdd(Res, Op0, ValueTy, Value,
245}
246
248 const SrcOp &Op0,
249 uint32_t NumBits) {
250 LLT PtrTy = Res.getLLTTy(*getMRI());
251 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
252 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
254 return buildPtrMask(Res, Op0, MaskReg);
255}
256
259 const SrcOp &Op0) {
260 LLT ResTy = Res.getLLTTy(*getMRI());
261 LLT Op0Ty = Op0.getLLTTy(*getMRI());
262
263 assert(ResTy.isVector() && "Res non vector type");
264
266 if (Op0Ty.isVector()) {
267 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
268 "Different vector element types");
269 assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
270 "Op0 has more elements");
271 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
272
273 for (auto Op : Unmerge.getInstr()->defs())
274 Regs.push_back(Op.getReg());
275 } else {
276 assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
277 "Op0 has more size");
278 Regs.push_back(Op0.getReg());
279 }
280 Register Undef =
281 buildUndef(Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(0);
282 unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
283 for (unsigned i = 0; i < NumberOfPadElts; ++i)
284 Regs.push_back(Undef);
285 return buildMergeLikeInstr(Res, Regs);
286}
287
290 const SrcOp &Op0) {
291 LLT ResTy = Res.getLLTTy(*getMRI());
292 LLT Op0Ty = Op0.getLLTTy(*getMRI());
293
294 assert(Op0Ty.isVector() && "Non vector type");
295 assert(((ResTy.isScalar() && (ResTy == Op0Ty.getElementType())) ||
296 (ResTy.isVector() &&
297 (ResTy.getElementType() == Op0Ty.getElementType()))) &&
298 "Different vector element types");
299 assert(
300 (ResTy.isScalar() || (ResTy.getNumElements() < Op0Ty.getNumElements())) &&
301 "Op0 has fewer elements");
302
303 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
304 if (ResTy.isScalar())
305 return buildCopy(Res, Unmerge.getReg(0));
307 for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
308 Regs.push_back(Unmerge.getReg(i));
309 return buildMergeLikeInstr(Res, Regs);
310}
311
313 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
314}
315
317 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
318 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
319}
320
322 unsigned JTI,
323 Register IndexReg) {
324 assert(getMRI()->getType(TablePtr).isPointer() &&
325 "Table reg must be a pointer");
326 return buildInstr(TargetOpcode::G_BRJT)
327 .addUse(TablePtr)
329 .addUse(IndexReg);
330}
331
333 const SrcOp &Op) {
334 return buildInstr(TargetOpcode::COPY, Res, Op);
335}
336
338 const ConstantInt &Val) {
339 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
340 LLT Ty = Res.getLLTTy(*getMRI());
341 LLT EltTy = Ty.getScalarType();
342 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
343 "creating constant with the wrong size");
344
345 assert(!Ty.isScalableVector() &&
346 "unexpected scalable vector in buildConstant");
347
348 if (Ty.isFixedVector()) {
349 auto Const = buildInstr(TargetOpcode::G_CONSTANT)
350 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
351 .addCImm(&Val);
352 return buildSplatBuildVector(Res, Const);
353 }
354
355 auto Const = buildInstr(TargetOpcode::G_CONSTANT);
356 Const->setDebugLoc(DebugLoc());
357 Res.addDefToMIB(*getMRI(), Const);
358 Const.addCImm(&Val);
359 return Const;
360}
361
363 int64_t Val) {
366 // TODO: Avoid implicit trunc?
367 // See https://github.com/llvm/llvm-project/issues/112510.
368 ConstantInt *CI = ConstantInt::getSigned(IntN, Val, /*implicitTrunc=*/true);
369 return buildConstant(Res, *CI);
370}
371
373 const ConstantFP &Val) {
374 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
375 LLT Ty = Res.getLLTTy(*getMRI());
376 LLT EltTy = Ty.getScalarType();
377
379 == EltTy.getSizeInBits() &&
380 "creating fconstant with the wrong size");
381
382 assert(!Ty.isPointer() && "invalid operand type");
383
384 assert(!Ty.isScalableVector() &&
385 "unexpected scalable vector in buildFConstant");
386
387 if (Ty.isFixedVector()) {
388 auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
389 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
390 .addFPImm(&Val);
391
392 return buildSplatBuildVector(Res, Const);
393 }
394
395 auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
396 Const->setDebugLoc(DebugLoc());
397 Res.addDefToMIB(*getMRI(), Const);
398 Const.addFPImm(&Val);
399 return Const;
400}
401
403 const APInt &Val) {
404 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
405 return buildConstant(Res, *CI);
406}
407
409 double Val) {
410 LLT DstTy = Res.getLLTTy(*getMRI());
411 auto &Ctx = getMF().getFunction().getContext();
412 auto *CFP =
413 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
414 return buildFConstant(Res, *CFP);
415}
416
418 const APFloat &Val) {
419 auto &Ctx = getMF().getFunction().getContext();
420 auto *CFP = ConstantFP::get(Ctx, Val);
421 return buildFConstant(Res, *CFP);
422}
423
426 const ConstantPtrAuth *CPA,
427 Register Addr, Register AddrDisc) {
428 auto MIB = buildInstr(TargetOpcode::G_PTRAUTH_GLOBAL_VALUE);
429 Res.addDefToMIB(*getMRI(), MIB);
430 MIB.addUse(Addr);
431 MIB.addImm(CPA->getKey()->getZExtValue());
432 MIB.addUse(AddrDisc);
433 MIB.addImm(CPA->getDiscriminator()->getZExtValue());
434 return MIB;
435}
436
438 MachineBasicBlock &Dest) {
439 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
440
441 auto MIB = buildInstr(TargetOpcode::G_BRCOND);
442 Tst.addSrcToMIB(MIB);
443 MIB.addMBB(&Dest);
444 return MIB;
445}
446
449 MachinePointerInfo PtrInfo, Align Alignment,
451 const AAMDNodes &AAInfo) {
452 MMOFlags |= MachineMemOperand::MOLoad;
453 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
454
455 LLT Ty = Dst.getLLTTy(*getMRI());
456 MachineMemOperand *MMO =
457 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
458 return buildLoad(Dst, Addr, *MMO);
459}
460
462 const DstOp &Res,
463 const SrcOp &Addr,
464 MachineMemOperand &MMO) {
465 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
466 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
467
468 auto MIB = buildInstr(Opcode);
469 Res.addDefToMIB(*getMRI(), MIB);
470 Addr.addSrcToMIB(MIB);
471 MIB.addMemOperand(&MMO);
472 return MIB;
473}
474
476 const DstOp &Dst, const SrcOp &BasePtr,
477 MachineMemOperand &BaseMMO, int64_t Offset) {
478 LLT LoadTy = Dst.getLLTTy(*getMRI());
479 MachineMemOperand *OffsetMMO =
480 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
481
482 if (Offset == 0) // This may be a size or type changing load.
483 return buildLoad(Dst, BasePtr, *OffsetMMO);
484
485 LLT PtrTy = BasePtr.getLLTTy(*getMRI());
486 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
487 auto ConstOffset = buildConstant(OffsetTy, Offset);
488 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
489 return buildLoad(Dst, Ptr, *OffsetMMO);
490}
491
493 const SrcOp &Addr,
494 MachineMemOperand &MMO) {
495 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
496 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
497
498 auto MIB = buildInstr(TargetOpcode::G_STORE);
499 Val.addSrcToMIB(MIB);
500 Addr.addSrcToMIB(MIB);
501 MIB.addMemOperand(&MMO);
502 return MIB;
503}
504
507 MachinePointerInfo PtrInfo, Align Alignment,
509 const AAMDNodes &AAInfo) {
510 MMOFlags |= MachineMemOperand::MOStore;
511 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
512
513 LLT Ty = Val.getLLTTy(*getMRI());
514 MachineMemOperand *MMO =
515 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
516 return buildStore(Val, Addr, *MMO);
517}
518
520 const SrcOp &Op) {
521 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
522}
523
525 const SrcOp &Op) {
526 return buildInstr(TargetOpcode::G_SEXT, Res, Op);
527}
528
530 const SrcOp &Op,
531 std::optional<unsigned> Flags) {
532 return buildInstr(TargetOpcode::G_ZEXT, Res, Op, Flags);
533}
534
535unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
536 const auto *TLI = getMF().getSubtarget().getTargetLowering();
537 switch (TLI->getBooleanContents(IsVec, IsFP)) {
539 return TargetOpcode::G_SEXT;
541 return TargetOpcode::G_ZEXT;
542 default:
543 return TargetOpcode::G_ANYEXT;
544 }
545}
546
548 const SrcOp &Op,
549 bool IsFP) {
550 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
551 return buildInstr(ExtOp, Res, Op);
552}
553
555 const SrcOp &Op,
556 bool IsVector,
557 bool IsFP) {
558 const auto *TLI = getMF().getSubtarget().getTargetLowering();
559 switch (TLI->getBooleanContents(IsVector, IsFP)) {
561 return buildSExtInReg(Res, Op, 1);
563 return buildZExtInReg(Res, Op, 1);
565 return buildCopy(Res, Op);
566 }
567
568 llvm_unreachable("unexpected BooleanContent");
569}
570
572 const DstOp &Res,
573 const SrcOp &Op) {
574 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
575 TargetOpcode::G_SEXT == ExtOpc) &&
576 "Expecting Extending Opc");
577 assert(Res.getLLTTy(*getMRI()).isScalar() ||
578 Res.getLLTTy(*getMRI()).isVector());
579 assert(Res.getLLTTy(*getMRI()).isScalar() ==
580 Op.getLLTTy(*getMRI()).isScalar());
581
582 unsigned Opcode = TargetOpcode::COPY;
583 if (Res.getLLTTy(*getMRI()).getSizeInBits() >
584 Op.getLLTTy(*getMRI()).getSizeInBits())
585 Opcode = ExtOpc;
586 else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
587 Op.getLLTTy(*getMRI()).getSizeInBits())
588 Opcode = TargetOpcode::G_TRUNC;
589 else
590 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
591
592 return buildInstr(Opcode, Res, Op);
593}
594
596 const SrcOp &Op) {
597 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
598}
599
601 const SrcOp &Op) {
602 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
603}
604
606 const SrcOp &Op) {
607 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
608}
609
611 const SrcOp &Op,
612 int64_t ImmOp) {
613 LLT ResTy = Res.getLLTTy(*getMRI());
614 auto Mask = buildConstant(
615 ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
616 return buildAnd(Res, Op, Mask);
617}
618
620 const SrcOp &Src) {
621 LLT SrcTy = Src.getLLTTy(*getMRI());
622 LLT DstTy = Dst.getLLTTy(*getMRI());
623 if (SrcTy == DstTy)
624 return buildCopy(Dst, Src);
625
626 unsigned Opcode;
627 if (SrcTy.isPointerOrPointerVector())
628 Opcode = TargetOpcode::G_PTRTOINT;
629 else if (DstTy.isPointerOrPointerVector())
630 Opcode = TargetOpcode::G_INTTOPTR;
631 else {
632 assert(!SrcTy.isPointerOrPointerVector() &&
633 !DstTy.isPointerOrPointerVector() && "no G_ADDRCAST yet");
634 Opcode = TargetOpcode::G_BITCAST;
635 }
636
637 return buildInstr(Opcode, Dst, Src);
638}
639
641 const SrcOp &Src,
642 uint64_t Index) {
643 LLT SrcTy = Src.getLLTTy(*getMRI());
644 LLT DstTy = Dst.getLLTTy(*getMRI());
645
646#ifndef NDEBUG
647 assert(SrcTy.isValid() && "invalid operand type");
648 assert(DstTy.isValid() && "invalid operand type");
649 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
650 "extracting off end of register");
651#endif
652
653 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
654 assert(Index == 0 && "insertion past the end of a register");
655 return buildCast(Dst, Src);
656 }
657
658 auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
659 Dst.addDefToMIB(*getMRI(), Extract);
660 Src.addSrcToMIB(Extract);
661 Extract.addImm(Index);
662 return Extract;
663}
664
666 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
667}
668
671 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
672 // we need some temporary storage for the DstOp objects. Here we use a
673 // sufficiently large SmallVector to not go through the heap.
675 assert(TmpVec.size() > 1);
676 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
677}
678
682 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
683 // we need some temporary storage for the DstOp objects. Here we use a
684 // sufficiently large SmallVector to not go through the heap.
686 assert(TmpVec.size() > 1);
687 return buildInstr(getOpcodeForMerge(Res, TmpVec), Res, TmpVec);
688}
689
692 std::initializer_list<SrcOp> Ops) {
693 assert(Ops.size() > 1);
694 return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
695}
696
697unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
698 ArrayRef<SrcOp> SrcOps) const {
699 if (DstOp.getLLTTy(*getMRI()).isVector()) {
700 if (SrcOps[0].getLLTTy(*getMRI()).isVector())
701 return TargetOpcode::G_CONCAT_VECTORS;
702 return TargetOpcode::G_BUILD_VECTOR;
703 }
704
705 return TargetOpcode::G_MERGE_VALUES;
706}
707
709 const SrcOp &Op) {
710 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
711 // we need some temporary storage for the DstOp objects. Here we use a
712 // sufficiently large SmallVector to not go through the heap.
713 SmallVector<DstOp, 8> TmpVec(Res);
714 assert(TmpVec.size() > 1);
715 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
716}
717
719 const SrcOp &Op) {
720 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
721 SmallVector<DstOp, 8> TmpVec(NumReg, Res);
722 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
723}
724
727 const SrcOp &Op) {
728 LLT OpTy = Op.getLLTTy(*getMRI());
729 unsigned NumRegs = OpTy.getSizeInBits() / Attrs.Ty.getSizeInBits();
730 SmallVector<DstOp, 8> TmpVec(NumRegs, Attrs);
731 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
732}
733
735 const SrcOp &Op) {
736 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
737 // we need some temporary storage for the DstOp objects. Here we use a
738 // sufficiently large SmallVector to not go through the heap.
739 SmallVector<DstOp, 8> TmpVec(Res);
740 assert(TmpVec.size() > 1);
741 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
742}
743
746 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
747 // we need some temporary storage for the DstOp objects. Here we use a
748 // sufficiently large SmallVector to not go through the heap.
750 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
751}
752
756 SmallVector<SrcOp> TmpVec;
757 TmpVec.reserve(Ops.size());
758 LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
759 for (const auto &Op : Ops)
760 TmpVec.push_back(buildConstant(EltTy, Op));
761 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
762}
763
765 const SrcOp &Src) {
767 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
768}
769
773 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
774 // we need some temporary storage for the DstOp objects. Here we use a
775 // sufficiently large SmallVector to not go through the heap.
777 if (TmpVec[0].getLLTTy(*getMRI()).getSizeInBits() ==
778 Res.getLLTTy(*getMRI()).getElementType().getSizeInBits())
779 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
780 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
781}
782
784 const SrcOp &Src) {
785 LLT DstTy = Res.getLLTTy(*getMRI());
786 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
787 "Expected Src to match Dst elt ty");
788 auto UndefVec = buildUndef(DstTy);
789 auto Zero = buildConstant(LLT::scalar(64), 0);
790 auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
791 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
792 return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
793}
794
796 const SrcOp &Src) {
797 assert(Src.getLLTTy(*getMRI()) == Res.getLLTTy(*getMRI()).getElementType() &&
798 "Expected Src to match Dst elt ty");
799 return buildInstr(TargetOpcode::G_SPLAT_VECTOR, Res, Src);
800}
801
803 const SrcOp &Src1,
804 const SrcOp &Src2,
805 ArrayRef<int> Mask) {
806 LLT DstTy = Res.getLLTTy(*getMRI());
807 LLT Src1Ty = Src1.getLLTTy(*getMRI());
808 LLT Src2Ty = Src2.getLLTTy(*getMRI());
809 const LLT DstElemTy = DstTy.getScalarType();
810 const LLT ElemTy1 = Src1Ty.getScalarType();
811 const LLT ElemTy2 = Src2Ty.getScalarType();
812 assert(DstElemTy == ElemTy1 && DstElemTy == ElemTy2);
813 assert(Mask.size() > 1 && "Scalar G_SHUFFLE_VECTOR are not supported");
814 (void)DstElemTy;
815 (void)ElemTy1;
816 (void)ElemTy2;
817 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
818 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
819 .addShuffleMask(MaskAlloc);
820}
821
824 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
825 // we need some temporary storage for the DstOp objects. Here we use a
826 // sufficiently large SmallVector to not go through the heap.
828 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
829}
830
832 const SrcOp &Src,
833 const SrcOp &Op,
834 unsigned Index) {
835 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
836 Res.getLLTTy(*getMRI()).getSizeInBits() &&
837 "insertion past the end of a register");
838
839 if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
840 Op.getLLTTy(*getMRI()).getSizeInBits()) {
841 return buildCast(Res, Op);
842 }
843
844 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
845}
846
848 unsigned Step) {
849 unsigned Bitwidth = Res.getLLTTy(*getMRI()).getElementType().getSizeInBits();
850 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(),
851 APInt(Bitwidth, Step));
852 auto StepVector = buildInstr(TargetOpcode::G_STEP_VECTOR);
853 StepVector->setDebugLoc(DebugLoc());
854 Res.addDefToMIB(*getMRI(), StepVector);
855 StepVector.addCImm(CI);
856 return StepVector;
857}
858
860 unsigned MinElts) {
861
864 ConstantInt *CI = ConstantInt::get(IntN, MinElts);
865 return buildVScale(Res, *CI);
866}
867
869 const ConstantInt &MinElts) {
870 auto VScale = buildInstr(TargetOpcode::G_VSCALE);
871 VScale->setDebugLoc(DebugLoc());
872 Res.addDefToMIB(*getMRI(), VScale);
873 VScale.addCImm(&MinElts);
874 return VScale;
875}
876
878 const APInt &MinElts) {
879 ConstantInt *CI =
880 ConstantInt::get(getMF().getFunction().getContext(), MinElts);
881 return buildVScale(Res, *CI);
882}
883
884static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
885 if (HasSideEffects && IsConvergent)
886 return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
887 if (HasSideEffects)
888 return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
889 if (IsConvergent)
890 return TargetOpcode::G_INTRINSIC_CONVERGENT;
891 return TargetOpcode::G_INTRINSIC;
892}
893
896 ArrayRef<Register> ResultRegs,
897 bool HasSideEffects, bool isConvergent) {
898 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
899 for (Register ResultReg : ResultRegs)
900 MIB.addDef(ResultReg);
901 MIB.addIntrinsicID(ID);
902 return MIB;
903}
904
907 ArrayRef<Register> ResultRegs) {
909 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
910 bool isConvergent = Attrs.hasAttribute(Attribute::Convergent);
911 return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
912}
913
916 bool HasSideEffects,
917 bool isConvergent) {
918 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
919 for (DstOp Result : Results)
920 Result.addDefToMIB(*getMRI(), MIB);
921 MIB.addIntrinsicID(ID);
922 return MIB;
923}
924
928 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
929 bool isConvergent = Attrs.hasAttribute(Attribute::Convergent);
930 return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
931}
932
935 std::optional<unsigned> Flags) {
936 return buildInstr(TargetOpcode::G_TRUNC, Res, Op, Flags);
937}
938
941 std::optional<unsigned> Flags) {
942 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
943}
944
946 const DstOp &Res,
947 const SrcOp &Op0,
948 const SrcOp &Op1,
949 std::optional<unsigned> Flags) {
950 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1}, Flags);
951}
952
954 const DstOp &Res,
955 const SrcOp &Op0,
956 const SrcOp &Op1,
957 std::optional<unsigned> Flags) {
958
959 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
960}
961
963 const SrcOp &Op0,
964 const SrcOp &Op1) {
965 return buildInstr(TargetOpcode::G_SCMP, Res, {Op0, Op1});
966}
967
969 const SrcOp &Op0,
970 const SrcOp &Op1) {
971 return buildInstr(TargetOpcode::G_UCMP, Res, {Op0, Op1});
972}
973
976 const SrcOp &Op0, const SrcOp &Op1,
977 std::optional<unsigned> Flags) {
978
979 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
981
983 const SrcOp &Src0,
984 const SrcOp &Src1,
985 unsigned Idx) {
986 return buildInstr(TargetOpcode::G_INSERT_SUBVECTOR, Res,
987 {Src0, Src1, uint64_t(Idx)});
988}
989
991 const SrcOp &Src,
992 unsigned Idx) {
993 return buildInstr(TargetOpcode::G_EXTRACT_SUBVECTOR, Res,
994 {Src, uint64_t(Idx)});
995}
996
999 const SrcOp &Elt, const SrcOp &Idx) {
1000 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
1001}
1002
1005 const SrcOp &Idx) {
1006 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
1007}
1008
1010 const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr,
1011 const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO) {
1012#ifndef NDEBUG
1013 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1014 LLT SuccessResTy = SuccessRes.getLLTTy(*getMRI());
1015 LLT AddrTy = Addr.getLLTTy(*getMRI());
1016 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1017 LLT NewValTy = NewVal.getLLTTy(*getMRI());
1018 assert(OldValResTy.isScalar() && "invalid operand type");
1019 assert(SuccessResTy.isScalar() && "invalid operand type");
1020 assert(AddrTy.isPointer() && "invalid operand type");
1021 assert(CmpValTy.isValid() && "invalid operand type");
1022 assert(NewValTy.isValid() && "invalid operand type");
1023 assert(OldValResTy == CmpValTy && "type mismatch");
1024 assert(OldValResTy == NewValTy && "type mismatch");
1025#endif
1026
1027 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS);
1028 OldValRes.addDefToMIB(*getMRI(), MIB);
1029 SuccessRes.addDefToMIB(*getMRI(), MIB);
1030 Addr.addSrcToMIB(MIB);
1031 CmpVal.addSrcToMIB(MIB);
1032 NewVal.addSrcToMIB(MIB);
1033 MIB.addMemOperand(&MMO);
1034 return MIB;
1035}
1036
1039 const SrcOp &CmpVal, const SrcOp &NewVal,
1040 MachineMemOperand &MMO) {
1041#ifndef NDEBUG
1042 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1043 LLT AddrTy = Addr.getLLTTy(*getMRI());
1044 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1045 LLT NewValTy = NewVal.getLLTTy(*getMRI());
1046 assert(OldValResTy.isScalar() && "invalid operand type");
1047 assert(AddrTy.isPointer() && "invalid operand type");
1048 assert(CmpValTy.isValid() && "invalid operand type");
1049 assert(NewValTy.isValid() && "invalid operand type");
1050 assert(OldValResTy == CmpValTy && "type mismatch");
1051 assert(OldValResTy == NewValTy && "type mismatch");
1052#endif
1053
1054 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG);
1055 OldValRes.addDefToMIB(*getMRI(), MIB);
1056 Addr.addSrcToMIB(MIB);
1057 CmpVal.addSrcToMIB(MIB);
1058 NewVal.addSrcToMIB(MIB);
1059 MIB.addMemOperand(&MMO);
1060 return MIB;
1061}
1062
1064 unsigned Opcode, const DstOp &OldValRes,
1065 const SrcOp &Addr, const SrcOp &Val,
1066 MachineMemOperand &MMO) {
1067
1068#ifndef NDEBUG
1069 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1070 LLT AddrTy = Addr.getLLTTy(*getMRI());
1071 LLT ValTy = Val.getLLTTy(*getMRI());
1072 assert(AddrTy.isPointer() && "invalid operand type");
1073 assert(ValTy.isValid() && "invalid operand type");
1074 assert(OldValResTy == ValTy && "type mismatch");
1075 assert(MMO.isAtomic() && "not atomic mem operand");
1076#endif
1077
1078 auto MIB = buildInstr(Opcode);
1079 OldValRes.addDefToMIB(*getMRI(), MIB);
1080 Addr.addSrcToMIB(MIB);
1081 Val.addSrcToMIB(MIB);
1082 MIB.addMemOperand(&MMO);
1083 return MIB;
1084}
1085
1088 Register Val, MachineMemOperand &MMO) {
1089 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
1090 MMO);
1091}
1094 Register Val, MachineMemOperand &MMO) {
1095 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
1096 MMO);
1097}
1100 Register Val, MachineMemOperand &MMO) {
1101 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
1102 MMO);
1103}
1106 Register Val, MachineMemOperand &MMO) {
1107 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
1108 MMO);
1109}
1112 Register Val, MachineMemOperand &MMO) {
1113 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
1114 MMO);
1115}
1117 Register Addr,
1118 Register Val,
1119 MachineMemOperand &MMO) {
1120 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
1121 MMO);
1122}
1125 Register Val, MachineMemOperand &MMO) {
1126 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
1127 MMO);
1128}
1131 Register Val, MachineMemOperand &MMO) {
1132 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
1133 MMO);
1134}
1137 Register Val, MachineMemOperand &MMO) {
1138 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1139 MMO);
1140}
1143 Register Val, MachineMemOperand &MMO) {
1144 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1145 MMO);
1146}
1149 Register Val, MachineMemOperand &MMO) {
1150 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1151 MMO);
1152}
1153
1156 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1157 MachineMemOperand &MMO) {
1158 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1159 MMO);
1160}
1161
1163MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1164 MachineMemOperand &MMO) {
1165 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1166 MMO);
1167}
1168
1171 const SrcOp &Val, MachineMemOperand &MMO) {
1172 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1173 MMO);
1174}
1175
1178 const SrcOp &Val, MachineMemOperand &MMO) {
1179 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1180 MMO);
1181}
1182
1185 const SrcOp &Addr, const SrcOp &Val,
1186 MachineMemOperand &MMO) {
1187 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAXIMUM, OldValRes, Addr,
1188 Val, MMO);
1189}
1190
1193 const SrcOp &Addr, const SrcOp &Val,
1194 MachineMemOperand &MMO) {
1195 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMINIMUM, OldValRes, Addr,
1196 Val, MMO);
1197}
1198
1200MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1201 return buildInstr(TargetOpcode::G_FENCE)
1202 .addImm(Ordering)
1203 .addImm(Scope);
1204}
1205
1207 unsigned RW,
1208 unsigned Locality,
1209 unsigned CacheType,
1210 MachineMemOperand &MMO) {
1211 auto MIB = buildInstr(TargetOpcode::G_PREFETCH);
1212 Addr.addSrcToMIB(MIB);
1213 MIB.addImm(RW).addImm(Locality).addImm(CacheType);
1214 MIB.addMemOperand(&MMO);
1215 return MIB;
1216}
1217
1220#ifndef NDEBUG
1221 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1222#endif
1223
1224 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
1225}
1226
1227void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1228 bool IsExtend) {
1229#ifndef NDEBUG
1230 if (DstTy.isVector()) {
1231 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1232 assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
1233 "different number of elements in a trunc/ext");
1234 } else
1235 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1236
1237 if (IsExtend)
1238 assert(TypeSize::isKnownGT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1239 "invalid narrowing extend");
1240 else
1241 assert(TypeSize::isKnownLT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1242 "invalid widening trunc");
1243#endif
1244}
1245
1246void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1247 const LLT Op0Ty, const LLT Op1Ty) {
1248#ifndef NDEBUG
1249 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1250 "invalid operand type");
1251 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1252 if (ResTy.isScalar() || ResTy.isPointer())
1253 assert(TstTy.isScalar() && "type mismatch");
1254 else
1255 assert((TstTy.isScalar() ||
1256 (TstTy.isVector() &&
1257 TstTy.getElementCount() == Op0Ty.getElementCount())) &&
1258 "type mismatch");
1259#endif
1260}
1261
1264 ArrayRef<SrcOp> SrcOps,
1265 std::optional<unsigned> Flags) {
1266 switch (Opc) {
1267 default:
1268 break;
1269 case TargetOpcode::G_SELECT: {
1270 assert(DstOps.size() == 1 && "Invalid select");
1271 assert(SrcOps.size() == 3 && "Invalid select");
1273 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1274 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1275 break;
1276 }
1277 case TargetOpcode::G_FNEG:
1278 case TargetOpcode::G_ABS:
1279 // All these are unary ops.
1280 assert(DstOps.size() == 1 && "Invalid Dst");
1281 assert(SrcOps.size() == 1 && "Invalid Srcs");
1282 validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1283 SrcOps[0].getLLTTy(*getMRI()));
1284 break;
1285 case TargetOpcode::G_ADD:
1286 case TargetOpcode::G_AND:
1287 case TargetOpcode::G_MUL:
1288 case TargetOpcode::G_OR:
1289 case TargetOpcode::G_SUB:
1290 case TargetOpcode::G_XOR:
1291 case TargetOpcode::G_UDIV:
1292 case TargetOpcode::G_SDIV:
1293 case TargetOpcode::G_UREM:
1294 case TargetOpcode::G_SREM:
1295 case TargetOpcode::G_SMIN:
1296 case TargetOpcode::G_SMAX:
1297 case TargetOpcode::G_UMIN:
1298 case TargetOpcode::G_UMAX:
1299 case TargetOpcode::G_UADDSAT:
1300 case TargetOpcode::G_SADDSAT:
1301 case TargetOpcode::G_USUBSAT:
1302 case TargetOpcode::G_SSUBSAT: {
1303 // All these are binary ops.
1304 assert(DstOps.size() == 1 && "Invalid Dst");
1305 assert(SrcOps.size() == 2 && "Invalid Srcs");
1306 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1307 SrcOps[0].getLLTTy(*getMRI()),
1308 SrcOps[1].getLLTTy(*getMRI()));
1309 break;
1310 }
1311 case TargetOpcode::G_SHL:
1312 case TargetOpcode::G_ASHR:
1313 case TargetOpcode::G_LSHR:
1314 case TargetOpcode::G_USHLSAT:
1315 case TargetOpcode::G_SSHLSAT: {
1316 assert(DstOps.size() == 1 && "Invalid Dst");
1317 assert(SrcOps.size() == 2 && "Invalid Srcs");
1318 validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1319 SrcOps[0].getLLTTy(*getMRI()),
1320 SrcOps[1].getLLTTy(*getMRI()));
1321 break;
1322 }
1323 case TargetOpcode::G_SEXT:
1324 case TargetOpcode::G_ZEXT:
1325 case TargetOpcode::G_ANYEXT:
1326 assert(DstOps.size() == 1 && "Invalid Dst");
1327 assert(SrcOps.size() == 1 && "Invalid Srcs");
1328 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1329 SrcOps[0].getLLTTy(*getMRI()), true);
1330 break;
1331 case TargetOpcode::G_TRUNC:
1332 case TargetOpcode::G_FPTRUNC: {
1333 assert(DstOps.size() == 1 && "Invalid Dst");
1334 assert(SrcOps.size() == 1 && "Invalid Srcs");
1335 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1336 SrcOps[0].getLLTTy(*getMRI()), false);
1337 break;
1338 }
1339 case TargetOpcode::G_BITCAST: {
1340 assert(DstOps.size() == 1 && "Invalid Dst");
1341 assert(SrcOps.size() == 1 && "Invalid Srcs");
1342 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1343 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1344 break;
1345 }
1346 case TargetOpcode::COPY:
1347 assert(DstOps.size() == 1 && "Invalid Dst");
1348 // If the caller wants to add a subreg source it has to be done separately
1349 // so we may not have any SrcOps at this point yet.
1350 break;
1351 case TargetOpcode::G_FCMP:
1352 case TargetOpcode::G_ICMP: {
1353 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1354 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1355 // For F/ICMP, the first src operand is the predicate, followed by
1356 // the two comparands.
1357 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1358 "Expecting predicate");
1359 assert([&]() -> bool {
1360 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1361 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1362 : CmpInst::isFPPredicate(Pred);
1363 }() && "Invalid predicate");
1364 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1365 "Type mismatch");
1366 assert([&]() -> bool {
1367 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1368 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1369 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1370 return DstTy.isScalar();
1371 else
1372 return DstTy.isVector() &&
1373 DstTy.getElementCount() == Op0Ty.getElementCount();
1374 }() && "Type Mismatch");
1375 break;
1376 }
1377 case TargetOpcode::G_UNMERGE_VALUES: {
1378 assert(!DstOps.empty() && "Invalid trivial sequence");
1379 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1380 assert(llvm::all_of(DstOps,
1381 [&, this](const DstOp &Op) {
1382 return Op.getLLTTy(*getMRI()) ==
1383 DstOps[0].getLLTTy(*getMRI());
1384 }) &&
1385 "type mismatch in output list");
1386 assert((TypeSize::ScalarTy)DstOps.size() *
1387 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1388 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1389 "input operands do not cover output register");
1390 break;
1391 }
1392 case TargetOpcode::G_MERGE_VALUES: {
1393 assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1394 assert(DstOps.size() == 1 && "Invalid Dst");
1395 assert(llvm::all_of(SrcOps,
1396 [&, this](const SrcOp &Op) {
1397 return Op.getLLTTy(*getMRI()) ==
1398 SrcOps[0].getLLTTy(*getMRI());
1399 }) &&
1400 "type mismatch in input list");
1401 assert((TypeSize::ScalarTy)SrcOps.size() *
1402 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1403 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1404 "input operands do not cover output register");
1405 assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1406 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1407 break;
1408 }
1409 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1410 assert(DstOps.size() == 1 && "Invalid Dst size");
1411 assert(SrcOps.size() == 2 && "Invalid Src size");
1412 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1413 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1414 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1415 "Invalid operand type");
1416 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1417 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1418 DstOps[0].getLLTTy(*getMRI()) &&
1419 "Type mismatch");
1420 break;
1421 }
1422 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1423 assert(DstOps.size() == 1 && "Invalid dst size");
1424 assert(SrcOps.size() == 3 && "Invalid src size");
1425 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1426 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1427 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1428 SrcOps[1].getLLTTy(*getMRI()) &&
1429 "Type mismatch");
1430 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1431 assert(DstOps[0].getLLTTy(*getMRI()).getElementCount() ==
1432 SrcOps[0].getLLTTy(*getMRI()).getElementCount() &&
1433 "Type mismatch");
1434 break;
1435 }
1436 case TargetOpcode::G_BUILD_VECTOR: {
1437 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1438 "Must have at least 2 operands");
1439 assert(DstOps.size() == 1 && "Invalid DstOps");
1440 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1441 "Res type must be a vector");
1442 assert(llvm::all_of(SrcOps,
1443 [&, this](const SrcOp &Op) {
1444 return Op.getLLTTy(*getMRI()) ==
1445 SrcOps[0].getLLTTy(*getMRI());
1446 }) &&
1447 "type mismatch in input list");
1448 assert((TypeSize::ScalarTy)SrcOps.size() *
1449 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1450 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1451 "input scalars do not exactly cover the output vector register");
1452 break;
1453 }
1454 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1455 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1456 "Must have at least 2 operands");
1457 assert(DstOps.size() == 1 && "Invalid DstOps");
1458 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1459 "Res type must be a vector");
1460 assert(llvm::all_of(SrcOps,
1461 [&, this](const SrcOp &Op) {
1462 return Op.getLLTTy(*getMRI()) ==
1463 SrcOps[0].getLLTTy(*getMRI());
1464 }) &&
1465 "type mismatch in input list");
1466 break;
1467 }
1468 case TargetOpcode::G_CONCAT_VECTORS: {
1469 assert(DstOps.size() == 1 && "Invalid DstOps");
1470 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1471 "Must have at least 2 operands");
1472 assert(llvm::all_of(SrcOps,
1473 [&, this](const SrcOp &Op) {
1474 return (Op.getLLTTy(*getMRI()).isVector() &&
1475 Op.getLLTTy(*getMRI()) ==
1476 SrcOps[0].getLLTTy(*getMRI()));
1477 }) &&
1478 "type mismatch in input list");
1479 assert((TypeSize::ScalarTy)SrcOps.size() *
1480 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1481 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1482 "input vectors do not exactly cover the output vector register");
1483 break;
1484 }
1485 case TargetOpcode::G_UADDE: {
1486 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1487 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1488 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1489 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1490 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1491 "Invalid operand");
1492 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1493 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1494 "type mismatch");
1495 break;
1496 }
1497 }
1498
1499 auto MIB = buildInstr(Opc);
1500 for (const DstOp &Op : DstOps)
1501 Op.addDefToMIB(*getMRI(), MIB);
1502 for (const SrcOp &Op : SrcOps)
1503 Op.addSrcToMIB(MIB);
1504 if (Flags)
1505 MIB->setFlags(*Flags);
1506 return MIB;
1507}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Function Alias Analysis Results
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent)
This file declares the MachineIRBuilder class.
Promote Memory to Register
Definition Mem2Reg.cpp:110
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
static unsigned getScalarSizeInBits(Type *Ty)
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static LLVM_ABI unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition APFloat.cpp:354
const fltSemantics & getSemantics() const
Definition APFloat.h:1439
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
This class holds the attributes for a particular argument, parameter, function, or return value.
Definition Attributes.h:361
The address of a basic block.
Definition Constants.h:904
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:282
const APFloat & getValueAPF() const
Definition Constants.h:325
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
Definition Constants.h:135
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
A signed pointer, in the ptrauth sense.
Definition Constants.h:1037
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1068
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1071
This is an important base class in LLVM.
Definition Constant.h:43
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
LLT getLLTTy(const MachineRegisterInfo &MRI) const
Register getReg() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
PointerType * getType() const
Global values are always pointers.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
constexpr bool isPointerOrPointerVector() const
constexpr LLT getScalarType() const
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
Metadata node.
Definition Metadata.h:1078
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBundleIterator< MachineInstr > iterator
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineInstrBuilder buildLoadFromOffset(const DstOp &Dst, const SrcOp &BasePtr, MachineMemOperand &BaseMMO, int64_t Offset)
Helper to create a load from a constant offset given a base address.
MachineInstrBuilder buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMIN Addr, Val, MMO.
MachineInstrBuilder buildBoolExtInReg(const DstOp &Res, const SrcOp &Op, bool IsVector, bool IsFP)
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
MachineInstrBuilder buildAtomicRMWFMaximum(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAXIMUM Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXor(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO.
MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
LLVMContext & getContext() const
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildUCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_UCMP Op0, Op1.
MachineInstrBuilder buildConstantPool(const DstOp &Res, unsigned Idx)
Build and insert Res = G_CONSTANT_POOL Idx.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op, bool IsFP)
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildSCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SCMP Op0, Op1.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildAtomicRMWAnd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO.
MachineInstrBuilder buildZExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and inserts Res = G_AND Op, LowBitsSet(ImmOp) Since there is no G_ZEXT_INREG like G_SEXT_INREG,...
MachineInstrBuilder buildAtomicRMWMin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value, std::optional< unsigned > Flags=std::nullopt)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildAtomicRMWFAdd(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FADD Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWNand(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Res = COPY Op depending on the differing sizes of Res and Op.
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src)
Build and insert a vector splat of a scalar Src using a G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idio...
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MDNode * getPCSections()
Get the current instruction's PC sections metadata.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
unsigned getBoolExtOp(bool IsVec, bool IsFP) const
MachineInstrBuilder buildObjectPtrOffset(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildAtomicRMWUmax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO.
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
void recordInsertion(MachineInstr *InsertedInstr) const
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
std::optional< MachineInstrBuilder > materializeObjectPtrOffset(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildAtomicRMWFMinimum(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMINIMUM Addr, Val, MMO.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR_TRUNC Op0, ...
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a, b, .....
void validateSelectOp(const LLT ResTy, const LLT TstTy, const LLT Op0Ty, const LLT Op1Ty)
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const DebugLoc & getDL()
Getter for DebugLoc.
MachineInstrBuilder buildBuildVectorConstant(const DstOp &Res, ArrayRef< APInt > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ... where each OpN is built with G_CONSTANT.
MachineInstrBuilder buildAtomicRMWUmin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO.
void validateBinaryOp(const LLT Res, const LLT Op0, const LLT Op1)
void validateShiftOp(const LLT Res, const LLT Op0, const LLT Op1)
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildInsert(const DstOp &Res, const SrcOp &Src, const SrcOp &Op, unsigned Index)
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildAtomicRMWSub(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO.
MachineInstrBuilder buildMergeValues(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_TRUNC Op.
MachineInstrBuilder buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAX Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWOr(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = / G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
MachineInstrBuilder buildDeleteTrailingVectorElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x, y, z = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a,...
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildAtomicRMWAdd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildAtomicCmpXchg(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal, / MMO.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
void validateTruncExt(const LLT Dst, const LLT Src, bool IsExtend)
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_PTRMASK Op0, Op1.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
void validateUnaryOp(const LLT Res, const LLT Op0)
MachineInstrBuilder buildBlockAddress(Register Res, const BlockAddress *BA)
Build and insert Res = G_BLOCK_ADDR BA.
MDNode * getMMRAMetadata()
Get the current instruction's MMRA metadata.
MachineInstrBuilder buildAtomicRMWMax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
void setMF(MachineFunction &MF)
MachineInstrBuilder buildStepVector(const DstOp &Res, unsigned Step)
Build and insert Res = G_STEP_VECTOR Step.
MachineInstrBuilder buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FSUB Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXchg(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO.
MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0, uint32_t NumBits)
Build and insert Res = G_PTRMASK Op0, G_CONSTANT (1 << NumBits) - 1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and insert Res = G_SEXT_INREG Op, ImmOp.
MachineInstrBuilder buildConstantPtrAuth(const DstOp &Res, const ConstantPtrAuth *CPA, Register Addr, Register AddrDisc)
Build and insert G_PTRAUTH_GLOBAL_VALUE.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addBlockAddress(const BlockAddress *BA, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
A description of a memory reference used in the backend.
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
LLT getLLTTy(const MachineRegisterInfo &MRI) const
void addSrcToMIB(MachineInstrBuilder &MIB) const
Register getReg() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void addMetadata(unsigned KindID, MDNode &MD)
Add a metadata attachment.
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:223
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI AttributeSet getFnAttributes(LLVMContext &C, ID id)
Return the function attributes for an intrinsic.
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
Definition MathExtras.h:94
DWARFExpression::Operation Op
LLVM_ABI APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition Utils.cpp:660
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:761
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
This class contains a discriminated union of information about pointers in memory operands,...
All attributes(register class or bank and low-level type) a virtual register can have.