LLVM 22.0.0git
MachineIRBuilder.cpp
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the MachineIRBuidler class.
10//===----------------------------------------------------------------------===//
21
22using namespace llvm;
23
25 State.MF = &MF;
26 State.MBB = nullptr;
27 State.MRI = &MF.getRegInfo();
28 State.TII = MF.getSubtarget().getInstrInfo();
29 State.DL = DebugLoc();
30 State.PCSections = nullptr;
31 State.MMRA = nullptr;
32 State.II = MachineBasicBlock::iterator();
33 State.Observer = nullptr;
34}
35
36//------------------------------------------------------------------------------
37// Build instruction variants.
38//------------------------------------------------------------------------------
39
44
50
53 const MDNode *Expr) {
54 assert(isa<DILocalVariable>(Variable) && "not a variable");
55 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
56 assert(
57 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
58 "Expected inlined-at fields to agree");
59 return insertInstr(BuildMI(getMF(), getDL(),
60 getTII().get(TargetOpcode::DBG_VALUE),
61 /*IsIndirect*/ false, Reg, Variable, Expr));
62}
63
66 const MDNode *Expr) {
67 assert(isa<DILocalVariable>(Variable) && "not a variable");
68 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
69 assert(
70 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
71 "Expected inlined-at fields to agree");
72 return insertInstr(BuildMI(getMF(), getDL(),
73 getTII().get(TargetOpcode::DBG_VALUE),
74 /*IsIndirect*/ true, Reg, Variable, Expr));
75}
76
78 const MDNode *Variable,
79 const MDNode *Expr) {
80 assert(isa<DILocalVariable>(Variable) && "not a variable");
81 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
82 assert(
83 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
84 "Expected inlined-at fields to agree");
85 return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE)
86 .addFrameIndex(FI)
87 .addImm(0)
88 .addMetadata(Variable)
89 .addMetadata(Expr));
90}
91
93 const MDNode *Variable,
94 const MDNode *Expr) {
95 assert(isa<DILocalVariable>(Variable) && "not a variable");
96 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
97 assert(
98 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
99 "Expected inlined-at fields to agree");
100 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
101
102 auto *NumericConstant = [&] () -> const Constant* {
103 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
104 if (CE->getOpcode() == Instruction::IntToPtr)
105 return CE->getOperand(0);
106 return &C;
107 }();
108
109 if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {
110 if (CI->getBitWidth() > 64)
111 MIB.addCImm(CI);
112 else if (CI->getBitWidth() == 1)
113 MIB.addImm(CI->getZExtValue());
114 else
115 MIB.addImm(CI->getSExtValue());
116 } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {
117 MIB.addFPImm(CFP);
118 } else if (isa<ConstantPointerNull>(NumericConstant)) {
119 MIB.addImm(0);
120 } else {
121 // Insert $noreg if we didn't find a usable constant and had to drop it.
122 MIB.addReg(Register());
123 }
124
125 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
126 return insertInstr(MIB);
127}
128
130 assert(isa<DILabel>(Label) && "not a label");
131 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
132 "Expected inlined-at fields to agree");
133 auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
134
135 return MIB.addMetadata(Label);
136}
137
139 const SrcOp &Size,
140 Align Alignment) {
141 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
142 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
143 Res.addDefToMIB(*getMRI(), MIB);
144 Size.addSrcToMIB(MIB);
145 MIB.addImm(Alignment.value());
146 return MIB;
147}
148
150 int Idx) {
151 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
152 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
153 Res.addDefToMIB(*getMRI(), MIB);
154 MIB.addFrameIndex(Idx);
155 return MIB;
156}
157
159 const GlobalValue *GV) {
160 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
162 GV->getType()->getAddressSpace() &&
163 "address space mismatch");
164
165 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
166 Res.addDefToMIB(*getMRI(), MIB);
167 MIB.addGlobalAddress(GV);
168 return MIB;
169}
170
172 unsigned Idx) {
173 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
174 auto MIB = buildInstr(TargetOpcode::G_CONSTANT_POOL);
175 Res.addDefToMIB(*getMRI(), MIB);
176 MIB.addConstantPoolIndex(Idx);
177 return MIB;
178}
179
181 unsigned JTI) {
182 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
183 .addJumpTableIndex(JTI);
184}
185
186void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
187 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
188 assert((Res == Op0) && "type mismatch");
189}
190
192 const LLT Op1) {
193 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
194 assert((Res == Op0 && Res == Op1) && "type mismatch");
195}
196
197void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
198 const LLT Op1) {
199 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
200 assert((Res == Op0) && "type mismatch");
201}
202
205 const SrcOp &Op1, std::optional<unsigned> Flags) {
206 assert(Res.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
207 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
208 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
209
210 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}, Flags);
211}
212
220
221std::optional<MachineInstrBuilder>
223 const LLT ValueTy, uint64_t Value,
224 std::optional<unsigned> Flags) {
225 assert(Res == 0 && "Res is a result argument");
226 assert(ValueTy.isScalar() && "invalid offset type");
227
228 if (Value == 0) {
229 Res = Op0;
230 return std::nullopt;
231 }
232
234 auto Cst = buildConstant(ValueTy, Value);
235 return buildPtrAdd(Res, Op0, Cst.getReg(0), Flags);
236}
237
238std::optional<MachineInstrBuilder> MachineIRBuilder::materializeObjectPtrOffset(
239 Register &Res, Register Op0, const LLT ValueTy, uint64_t Value) {
240 return materializePtrAdd(Res, Op0, ValueTy, Value,
243}
244
246 const SrcOp &Op0,
247 uint32_t NumBits) {
248 LLT PtrTy = Res.getLLTTy(*getMRI());
249 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
250 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
252 return buildPtrMask(Res, Op0, MaskReg);
253}
254
257 const SrcOp &Op0) {
258 LLT ResTy = Res.getLLTTy(*getMRI());
259 LLT Op0Ty = Op0.getLLTTy(*getMRI());
260
261 assert(ResTy.isVector() && "Res non vector type");
262
264 if (Op0Ty.isVector()) {
265 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
266 "Different vector element types");
267 assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
268 "Op0 has more elements");
269 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
270
271 for (auto Op : Unmerge.getInstr()->defs())
272 Regs.push_back(Op.getReg());
273 } else {
274 assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
275 "Op0 has more size");
276 Regs.push_back(Op0.getReg());
277 }
278 Register Undef =
279 buildUndef(Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(0);
280 unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
281 for (unsigned i = 0; i < NumberOfPadElts; ++i)
282 Regs.push_back(Undef);
283 return buildMergeLikeInstr(Res, Regs);
284}
285
288 const SrcOp &Op0) {
289 LLT ResTy = Res.getLLTTy(*getMRI());
290 LLT Op0Ty = Op0.getLLTTy(*getMRI());
291
292 assert(Op0Ty.isVector() && "Non vector type");
293 assert(((ResTy.isScalar() && (ResTy == Op0Ty.getElementType())) ||
294 (ResTy.isVector() &&
295 (ResTy.getElementType() == Op0Ty.getElementType()))) &&
296 "Different vector element types");
297 assert(
298 (ResTy.isScalar() || (ResTy.getNumElements() < Op0Ty.getNumElements())) &&
299 "Op0 has fewer elements");
300
301 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
302 if (ResTy.isScalar())
303 return buildCopy(Res, Unmerge.getReg(0));
305 for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
306 Regs.push_back(Unmerge.getReg(i));
307 return buildMergeLikeInstr(Res, Regs);
308}
309
311 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
312}
313
315 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
316 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
317}
318
320 unsigned JTI,
321 Register IndexReg) {
322 assert(getMRI()->getType(TablePtr).isPointer() &&
323 "Table reg must be a pointer");
324 return buildInstr(TargetOpcode::G_BRJT)
325 .addUse(TablePtr)
327 .addUse(IndexReg);
328}
329
331 const SrcOp &Op) {
332 return buildInstr(TargetOpcode::COPY, Res, Op);
333}
334
336 const ConstantInt &Val) {
337 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
338 LLT Ty = Res.getLLTTy(*getMRI());
339 LLT EltTy = Ty.getScalarType();
340 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
341 "creating constant with the wrong size");
342
343 assert(!Ty.isScalableVector() &&
344 "unexpected scalable vector in buildConstant");
345
346 if (Ty.isFixedVector()) {
347 auto Const = buildInstr(TargetOpcode::G_CONSTANT)
348 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
349 .addCImm(&Val);
350 return buildSplatBuildVector(Res, Const);
351 }
352
353 auto Const = buildInstr(TargetOpcode::G_CONSTANT);
354 Const->setDebugLoc(DebugLoc());
355 Res.addDefToMIB(*getMRI(), Const);
356 Const.addCImm(&Val);
357 return Const;
358}
359
361 int64_t Val) {
364 ConstantInt *CI = ConstantInt::get(IntN, Val, true);
365 return buildConstant(Res, *CI);
366}
367
369 const ConstantFP &Val) {
370 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
371 LLT Ty = Res.getLLTTy(*getMRI());
372 LLT EltTy = Ty.getScalarType();
373
375 == EltTy.getSizeInBits() &&
376 "creating fconstant with the wrong size");
377
378 assert(!Ty.isPointer() && "invalid operand type");
379
380 assert(!Ty.isScalableVector() &&
381 "unexpected scalable vector in buildFConstant");
382
383 if (Ty.isFixedVector()) {
384 auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
385 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
386 .addFPImm(&Val);
387
388 return buildSplatBuildVector(Res, Const);
389 }
390
391 auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
392 Const->setDebugLoc(DebugLoc());
393 Res.addDefToMIB(*getMRI(), Const);
394 Const.addFPImm(&Val);
395 return Const;
396}
397
399 const APInt &Val) {
400 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
401 return buildConstant(Res, *CI);
402}
403
405 double Val) {
406 LLT DstTy = Res.getLLTTy(*getMRI());
407 auto &Ctx = getMF().getFunction().getContext();
408 auto *CFP =
409 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
410 return buildFConstant(Res, *CFP);
411}
412
414 const APFloat &Val) {
415 auto &Ctx = getMF().getFunction().getContext();
416 auto *CFP = ConstantFP::get(Ctx, Val);
417 return buildFConstant(Res, *CFP);
418}
419
422 const ConstantPtrAuth *CPA,
423 Register Addr, Register AddrDisc) {
424 auto MIB = buildInstr(TargetOpcode::G_PTRAUTH_GLOBAL_VALUE);
425 Res.addDefToMIB(*getMRI(), MIB);
426 MIB.addUse(Addr);
427 MIB.addImm(CPA->getKey()->getZExtValue());
428 MIB.addUse(AddrDisc);
429 MIB.addImm(CPA->getDiscriminator()->getZExtValue());
430 return MIB;
431}
432
434 MachineBasicBlock &Dest) {
435 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
436
437 auto MIB = buildInstr(TargetOpcode::G_BRCOND);
438 Tst.addSrcToMIB(MIB);
439 MIB.addMBB(&Dest);
440 return MIB;
441}
442
445 MachinePointerInfo PtrInfo, Align Alignment,
447 const AAMDNodes &AAInfo) {
448 MMOFlags |= MachineMemOperand::MOLoad;
449 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
450
451 LLT Ty = Dst.getLLTTy(*getMRI());
452 MachineMemOperand *MMO =
453 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
454 return buildLoad(Dst, Addr, *MMO);
455}
456
458 const DstOp &Res,
459 const SrcOp &Addr,
460 MachineMemOperand &MMO) {
461 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
462 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
463
464 auto MIB = buildInstr(Opcode);
465 Res.addDefToMIB(*getMRI(), MIB);
466 Addr.addSrcToMIB(MIB);
467 MIB.addMemOperand(&MMO);
468 return MIB;
469}
470
472 const DstOp &Dst, const SrcOp &BasePtr,
473 MachineMemOperand &BaseMMO, int64_t Offset) {
474 LLT LoadTy = Dst.getLLTTy(*getMRI());
475 MachineMemOperand *OffsetMMO =
476 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
477
478 if (Offset == 0) // This may be a size or type changing load.
479 return buildLoad(Dst, BasePtr, *OffsetMMO);
480
481 LLT PtrTy = BasePtr.getLLTTy(*getMRI());
482 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
483 auto ConstOffset = buildConstant(OffsetTy, Offset);
484 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
485 return buildLoad(Dst, Ptr, *OffsetMMO);
486}
487
489 const SrcOp &Addr,
490 MachineMemOperand &MMO) {
491 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
492 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
493
494 auto MIB = buildInstr(TargetOpcode::G_STORE);
495 Val.addSrcToMIB(MIB);
496 Addr.addSrcToMIB(MIB);
497 MIB.addMemOperand(&MMO);
498 return MIB;
499}
500
503 MachinePointerInfo PtrInfo, Align Alignment,
505 const AAMDNodes &AAInfo) {
506 MMOFlags |= MachineMemOperand::MOStore;
507 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
508
509 LLT Ty = Val.getLLTTy(*getMRI());
510 MachineMemOperand *MMO =
511 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
512 return buildStore(Val, Addr, *MMO);
513}
514
516 const SrcOp &Op) {
517 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
518}
519
521 const SrcOp &Op) {
522 return buildInstr(TargetOpcode::G_SEXT, Res, Op);
523}
524
526 const SrcOp &Op,
527 std::optional<unsigned> Flags) {
528 return buildInstr(TargetOpcode::G_ZEXT, Res, Op, Flags);
529}
530
531unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
532 const auto *TLI = getMF().getSubtarget().getTargetLowering();
533 switch (TLI->getBooleanContents(IsVec, IsFP)) {
535 return TargetOpcode::G_SEXT;
537 return TargetOpcode::G_ZEXT;
538 default:
539 return TargetOpcode::G_ANYEXT;
540 }
541}
542
544 const SrcOp &Op,
545 bool IsFP) {
546 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
547 return buildInstr(ExtOp, Res, Op);
548}
549
551 const SrcOp &Op,
552 bool IsVector,
553 bool IsFP) {
554 const auto *TLI = getMF().getSubtarget().getTargetLowering();
555 switch (TLI->getBooleanContents(IsVector, IsFP)) {
557 return buildSExtInReg(Res, Op, 1);
559 return buildZExtInReg(Res, Op, 1);
561 return buildCopy(Res, Op);
562 }
563
564 llvm_unreachable("unexpected BooleanContent");
565}
566
568 const DstOp &Res,
569 const SrcOp &Op) {
570 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
571 TargetOpcode::G_SEXT == ExtOpc) &&
572 "Expecting Extending Opc");
573 assert(Res.getLLTTy(*getMRI()).isScalar() ||
574 Res.getLLTTy(*getMRI()).isVector());
575 assert(Res.getLLTTy(*getMRI()).isScalar() ==
576 Op.getLLTTy(*getMRI()).isScalar());
577
578 unsigned Opcode = TargetOpcode::COPY;
579 if (Res.getLLTTy(*getMRI()).getSizeInBits() >
580 Op.getLLTTy(*getMRI()).getSizeInBits())
581 Opcode = ExtOpc;
582 else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
583 Op.getLLTTy(*getMRI()).getSizeInBits())
584 Opcode = TargetOpcode::G_TRUNC;
585 else
586 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
587
588 return buildInstr(Opcode, Res, Op);
589}
590
592 const SrcOp &Op) {
593 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
594}
595
597 const SrcOp &Op) {
598 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
599}
600
602 const SrcOp &Op) {
603 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
604}
605
607 const SrcOp &Op,
608 int64_t ImmOp) {
609 LLT ResTy = Res.getLLTTy(*getMRI());
610 auto Mask = buildConstant(
611 ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
612 return buildAnd(Res, Op, Mask);
613}
614
616 const SrcOp &Src) {
617 LLT SrcTy = Src.getLLTTy(*getMRI());
618 LLT DstTy = Dst.getLLTTy(*getMRI());
619 if (SrcTy == DstTy)
620 return buildCopy(Dst, Src);
621
622 unsigned Opcode;
623 if (SrcTy.isPointerOrPointerVector())
624 Opcode = TargetOpcode::G_PTRTOINT;
625 else if (DstTy.isPointerOrPointerVector())
626 Opcode = TargetOpcode::G_INTTOPTR;
627 else {
628 assert(!SrcTy.isPointerOrPointerVector() &&
629 !DstTy.isPointerOrPointerVector() && "no G_ADDRCAST yet");
630 Opcode = TargetOpcode::G_BITCAST;
631 }
632
633 return buildInstr(Opcode, Dst, Src);
634}
635
637 const SrcOp &Src,
638 uint64_t Index) {
639 LLT SrcTy = Src.getLLTTy(*getMRI());
640 LLT DstTy = Dst.getLLTTy(*getMRI());
641
642#ifndef NDEBUG
643 assert(SrcTy.isValid() && "invalid operand type");
644 assert(DstTy.isValid() && "invalid operand type");
645 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
646 "extracting off end of register");
647#endif
648
649 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
650 assert(Index == 0 && "insertion past the end of a register");
651 return buildCast(Dst, Src);
652 }
653
654 auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
655 Dst.addDefToMIB(*getMRI(), Extract);
656 Src.addSrcToMIB(Extract);
657 Extract.addImm(Index);
658 return Extract;
659}
660
662 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
663}
664
667 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
668 // we need some temporary storage for the DstOp objects. Here we use a
669 // sufficiently large SmallVector to not go through the heap.
671 assert(TmpVec.size() > 1);
672 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
673}
674
678 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
679 // we need some temporary storage for the DstOp objects. Here we use a
680 // sufficiently large SmallVector to not go through the heap.
682 assert(TmpVec.size() > 1);
683 return buildInstr(getOpcodeForMerge(Res, TmpVec), Res, TmpVec);
684}
685
688 std::initializer_list<SrcOp> Ops) {
689 assert(Ops.size() > 1);
690 return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
691}
692
693unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
694 ArrayRef<SrcOp> SrcOps) const {
695 if (DstOp.getLLTTy(*getMRI()).isVector()) {
696 if (SrcOps[0].getLLTTy(*getMRI()).isVector())
697 return TargetOpcode::G_CONCAT_VECTORS;
698 return TargetOpcode::G_BUILD_VECTOR;
699 }
700
701 return TargetOpcode::G_MERGE_VALUES;
702}
703
705 const SrcOp &Op) {
706 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
707 // we need some temporary storage for the DstOp objects. Here we use a
708 // sufficiently large SmallVector to not go through the heap.
709 SmallVector<DstOp, 8> TmpVec(Res);
710 assert(TmpVec.size() > 1);
711 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
712}
713
715 const SrcOp &Op) {
716 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
717 SmallVector<DstOp, 8> TmpVec(NumReg, Res);
718 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
719}
720
723 const SrcOp &Op) {
724 LLT OpTy = Op.getLLTTy(*getMRI());
725 unsigned NumRegs = OpTy.getSizeInBits() / Attrs.Ty.getSizeInBits();
726 SmallVector<DstOp, 8> TmpVec(NumRegs, Attrs);
727 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
728}
729
731 const SrcOp &Op) {
732 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
733 // we need some temporary storage for the DstOp objects. Here we use a
734 // sufficiently large SmallVector to not go through the heap.
735 SmallVector<DstOp, 8> TmpVec(Res);
736 assert(TmpVec.size() > 1);
737 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
738}
739
742 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
743 // we need some temporary storage for the DstOp objects. Here we use a
744 // sufficiently large SmallVector to not go through the heap.
746 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
747}
748
752 SmallVector<SrcOp> TmpVec;
753 TmpVec.reserve(Ops.size());
754 LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
755 for (const auto &Op : Ops)
756 TmpVec.push_back(buildConstant(EltTy, Op));
757 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
758}
759
761 const SrcOp &Src) {
763 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
764}
765
769 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
770 // we need some temporary storage for the DstOp objects. Here we use a
771 // sufficiently large SmallVector to not go through the heap.
773 if (TmpVec[0].getLLTTy(*getMRI()).getSizeInBits() ==
774 Res.getLLTTy(*getMRI()).getElementType().getSizeInBits())
775 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
776 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
777}
778
780 const SrcOp &Src) {
781 LLT DstTy = Res.getLLTTy(*getMRI());
782 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
783 "Expected Src to match Dst elt ty");
784 auto UndefVec = buildUndef(DstTy);
785 auto Zero = buildConstant(LLT::scalar(64), 0);
786 auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
787 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
788 return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
789}
790
792 const SrcOp &Src) {
793 assert(Src.getLLTTy(*getMRI()) == Res.getLLTTy(*getMRI()).getElementType() &&
794 "Expected Src to match Dst elt ty");
795 return buildInstr(TargetOpcode::G_SPLAT_VECTOR, Res, Src);
796}
797
799 const SrcOp &Src1,
800 const SrcOp &Src2,
801 ArrayRef<int> Mask) {
802 LLT DstTy = Res.getLLTTy(*getMRI());
803 LLT Src1Ty = Src1.getLLTTy(*getMRI());
804 LLT Src2Ty = Src2.getLLTTy(*getMRI());
805 const LLT DstElemTy = DstTy.getScalarType();
806 const LLT ElemTy1 = Src1Ty.getScalarType();
807 const LLT ElemTy2 = Src2Ty.getScalarType();
808 assert(DstElemTy == ElemTy1 && DstElemTy == ElemTy2);
809 assert(Mask.size() > 1 && "Scalar G_SHUFFLE_VECTOR are not supported");
810 (void)DstElemTy;
811 (void)ElemTy1;
812 (void)ElemTy2;
813 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
814 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
815 .addShuffleMask(MaskAlloc);
816}
817
820 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
821 // we need some temporary storage for the DstOp objects. Here we use a
822 // sufficiently large SmallVector to not go through the heap.
824 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
825}
826
828 const SrcOp &Src,
829 const SrcOp &Op,
830 unsigned Index) {
831 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
832 Res.getLLTTy(*getMRI()).getSizeInBits() &&
833 "insertion past the end of a register");
834
835 if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
836 Op.getLLTTy(*getMRI()).getSizeInBits()) {
837 return buildCast(Res, Op);
838 }
839
840 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
841}
842
844 unsigned Step) {
845 unsigned Bitwidth = Res.getLLTTy(*getMRI()).getElementType().getSizeInBits();
846 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(),
847 APInt(Bitwidth, Step));
848 auto StepVector = buildInstr(TargetOpcode::G_STEP_VECTOR);
849 StepVector->setDebugLoc(DebugLoc());
850 Res.addDefToMIB(*getMRI(), StepVector);
851 StepVector.addCImm(CI);
852 return StepVector;
853}
854
856 unsigned MinElts) {
857
860 ConstantInt *CI = ConstantInt::get(IntN, MinElts);
861 return buildVScale(Res, *CI);
862}
863
865 const ConstantInt &MinElts) {
866 auto VScale = buildInstr(TargetOpcode::G_VSCALE);
867 VScale->setDebugLoc(DebugLoc());
868 Res.addDefToMIB(*getMRI(), VScale);
869 VScale.addCImm(&MinElts);
870 return VScale;
871}
872
874 const APInt &MinElts) {
875 ConstantInt *CI =
876 ConstantInt::get(getMF().getFunction().getContext(), MinElts);
877 return buildVScale(Res, *CI);
878}
879
880static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
881 if (HasSideEffects && IsConvergent)
882 return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
883 if (HasSideEffects)
884 return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
885 if (IsConvergent)
886 return TargetOpcode::G_INTRINSIC_CONVERGENT;
887 return TargetOpcode::G_INTRINSIC;
888}
889
892 ArrayRef<Register> ResultRegs,
893 bool HasSideEffects, bool isConvergent) {
894 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
895 for (Register ResultReg : ResultRegs)
896 MIB.addDef(ResultReg);
897 MIB.addIntrinsicID(ID);
898 return MIB;
899}
900
903 ArrayRef<Register> ResultRegs) {
905 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
906 bool isConvergent = Attrs.hasAttribute(Attribute::Convergent);
907 return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
908}
909
912 bool HasSideEffects,
913 bool isConvergent) {
914 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
915 for (DstOp Result : Results)
916 Result.addDefToMIB(*getMRI(), MIB);
917 MIB.addIntrinsicID(ID);
918 return MIB;
919}
920
924 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
925 bool isConvergent = Attrs.hasAttribute(Attribute::Convergent);
926 return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
927}
928
931 std::optional<unsigned> Flags) {
932 return buildInstr(TargetOpcode::G_TRUNC, Res, Op, Flags);
933}
934
937 std::optional<unsigned> Flags) {
938 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
939}
940
942 const DstOp &Res,
943 const SrcOp &Op0,
944 const SrcOp &Op1,
945 std::optional<unsigned> Flags) {
946 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1}, Flags);
947}
948
950 const DstOp &Res,
951 const SrcOp &Op0,
952 const SrcOp &Op1,
953 std::optional<unsigned> Flags) {
954
955 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
956}
957
959 const SrcOp &Op0,
960 const SrcOp &Op1) {
961 return buildInstr(TargetOpcode::G_SCMP, Res, {Op0, Op1});
962}
963
965 const SrcOp &Op0,
966 const SrcOp &Op1) {
967 return buildInstr(TargetOpcode::G_UCMP, Res, {Op0, Op1});
968}
969
972 const SrcOp &Op0, const SrcOp &Op1,
973 std::optional<unsigned> Flags) {
975 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
976}
977
979 const SrcOp &Src0,
980 const SrcOp &Src1,
981 unsigned Idx) {
982 return buildInstr(TargetOpcode::G_INSERT_SUBVECTOR, Res,
983 {Src0, Src1, uint64_t(Idx)});
984}
985
987 const SrcOp &Src,
988 unsigned Idx) {
989 return buildInstr(TargetOpcode::G_EXTRACT_SUBVECTOR, Res,
990 {Src, uint64_t(Idx)});
991}
992
995 const SrcOp &Elt, const SrcOp &Idx) {
996 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
997}
998
1001 const SrcOp &Idx) {
1002 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
1003}
1004
1006 const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr,
1007 const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO) {
1008#ifndef NDEBUG
1009 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1010 LLT SuccessResTy = SuccessRes.getLLTTy(*getMRI());
1011 LLT AddrTy = Addr.getLLTTy(*getMRI());
1012 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1013 LLT NewValTy = NewVal.getLLTTy(*getMRI());
1014 assert(OldValResTy.isScalar() && "invalid operand type");
1015 assert(SuccessResTy.isScalar() && "invalid operand type");
1016 assert(AddrTy.isPointer() && "invalid operand type");
1017 assert(CmpValTy.isValid() && "invalid operand type");
1018 assert(NewValTy.isValid() && "invalid operand type");
1019 assert(OldValResTy == CmpValTy && "type mismatch");
1020 assert(OldValResTy == NewValTy && "type mismatch");
1021#endif
1022
1023 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS);
1024 OldValRes.addDefToMIB(*getMRI(), MIB);
1025 SuccessRes.addDefToMIB(*getMRI(), MIB);
1026 Addr.addSrcToMIB(MIB);
1027 CmpVal.addSrcToMIB(MIB);
1028 NewVal.addSrcToMIB(MIB);
1029 MIB.addMemOperand(&MMO);
1030 return MIB;
1031}
1032
1035 const SrcOp &CmpVal, const SrcOp &NewVal,
1036 MachineMemOperand &MMO) {
1037#ifndef NDEBUG
1038 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1039 LLT AddrTy = Addr.getLLTTy(*getMRI());
1040 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1041 LLT NewValTy = NewVal.getLLTTy(*getMRI());
1042 assert(OldValResTy.isScalar() && "invalid operand type");
1043 assert(AddrTy.isPointer() && "invalid operand type");
1044 assert(CmpValTy.isValid() && "invalid operand type");
1045 assert(NewValTy.isValid() && "invalid operand type");
1046 assert(OldValResTy == CmpValTy && "type mismatch");
1047 assert(OldValResTy == NewValTy && "type mismatch");
1048#endif
1049
1050 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG);
1051 OldValRes.addDefToMIB(*getMRI(), MIB);
1052 Addr.addSrcToMIB(MIB);
1053 CmpVal.addSrcToMIB(MIB);
1054 NewVal.addSrcToMIB(MIB);
1055 MIB.addMemOperand(&MMO);
1056 return MIB;
1057}
1058
1060 unsigned Opcode, const DstOp &OldValRes,
1061 const SrcOp &Addr, const SrcOp &Val,
1062 MachineMemOperand &MMO) {
1063
1064#ifndef NDEBUG
1065 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1066 LLT AddrTy = Addr.getLLTTy(*getMRI());
1067 LLT ValTy = Val.getLLTTy(*getMRI());
1068 assert(AddrTy.isPointer() && "invalid operand type");
1069 assert(ValTy.isValid() && "invalid operand type");
1070 assert(OldValResTy == ValTy && "type mismatch");
1071 assert(MMO.isAtomic() && "not atomic mem operand");
1072#endif
1073
1074 auto MIB = buildInstr(Opcode);
1075 OldValRes.addDefToMIB(*getMRI(), MIB);
1076 Addr.addSrcToMIB(MIB);
1077 Val.addSrcToMIB(MIB);
1078 MIB.addMemOperand(&MMO);
1079 return MIB;
1080}
1081
1084 Register Val, MachineMemOperand &MMO) {
1085 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
1086 MMO);
1087}
1090 Register Val, MachineMemOperand &MMO) {
1091 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
1092 MMO);
1093}
1096 Register Val, MachineMemOperand &MMO) {
1097 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
1098 MMO);
1099}
1102 Register Val, MachineMemOperand &MMO) {
1103 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
1104 MMO);
1105}
1108 Register Val, MachineMemOperand &MMO) {
1109 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
1110 MMO);
1111}
1113 Register Addr,
1114 Register Val,
1115 MachineMemOperand &MMO) {
1116 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
1117 MMO);
1118}
1121 Register Val, MachineMemOperand &MMO) {
1122 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
1123 MMO);
1124}
1127 Register Val, MachineMemOperand &MMO) {
1128 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
1129 MMO);
1130}
1133 Register Val, MachineMemOperand &MMO) {
1134 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1135 MMO);
1136}
1139 Register Val, MachineMemOperand &MMO) {
1140 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1141 MMO);
1142}
1145 Register Val, MachineMemOperand &MMO) {
1146 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1147 MMO);
1148}
1149
1152 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1153 MachineMemOperand &MMO) {
1154 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1155 MMO);
1156}
1157
1159MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1160 MachineMemOperand &MMO) {
1161 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1162 MMO);
1163}
1164
1167 const SrcOp &Val, MachineMemOperand &MMO) {
1168 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1169 MMO);
1170}
1171
1174 const SrcOp &Val, MachineMemOperand &MMO) {
1175 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1176 MMO);
1177}
1178
1181 const SrcOp &Addr, const SrcOp &Val,
1182 MachineMemOperand &MMO) {
1183 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAXIMUM, OldValRes, Addr,
1184 Val, MMO);
1185}
1186
1189 const SrcOp &Addr, const SrcOp &Val,
1190 MachineMemOperand &MMO) {
1191 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMINIMUM, OldValRes, Addr,
1192 Val, MMO);
1193}
1194
1196MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1197 return buildInstr(TargetOpcode::G_FENCE)
1198 .addImm(Ordering)
1199 .addImm(Scope);
1200}
1201
1203 unsigned RW,
1204 unsigned Locality,
1205 unsigned CacheType,
1206 MachineMemOperand &MMO) {
1207 auto MIB = buildInstr(TargetOpcode::G_PREFETCH);
1208 Addr.addSrcToMIB(MIB);
1209 MIB.addImm(RW).addImm(Locality).addImm(CacheType);
1210 MIB.addMemOperand(&MMO);
1211 return MIB;
1212}
1213
1216#ifndef NDEBUG
1217 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1218#endif
1219
1220 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
1221}
1222
1223void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1224 bool IsExtend) {
1225#ifndef NDEBUG
1226 if (DstTy.isVector()) {
1227 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1228 assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
1229 "different number of elements in a trunc/ext");
1230 } else
1231 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1232
1233 if (IsExtend)
1234 assert(TypeSize::isKnownGT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1235 "invalid narrowing extend");
1236 else
1237 assert(TypeSize::isKnownLT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1238 "invalid widening trunc");
1239#endif
1240}
1241
1242void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1243 const LLT Op0Ty, const LLT Op1Ty) {
1244#ifndef NDEBUG
1245 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1246 "invalid operand type");
1247 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1248 if (ResTy.isScalar() || ResTy.isPointer())
1249 assert(TstTy.isScalar() && "type mismatch");
1250 else
1251 assert((TstTy.isScalar() ||
1252 (TstTy.isVector() &&
1253 TstTy.getElementCount() == Op0Ty.getElementCount())) &&
1254 "type mismatch");
1255#endif
1256}
1257
1260 ArrayRef<SrcOp> SrcOps,
1261 std::optional<unsigned> Flags) {
1262 switch (Opc) {
1263 default:
1264 break;
1265 case TargetOpcode::G_SELECT: {
1266 assert(DstOps.size() == 1 && "Invalid select");
1267 assert(SrcOps.size() == 3 && "Invalid select");
1269 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1270 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1271 break;
1272 }
1273 case TargetOpcode::G_FNEG:
1274 case TargetOpcode::G_ABS:
1275 // All these are unary ops.
1276 assert(DstOps.size() == 1 && "Invalid Dst");
1277 assert(SrcOps.size() == 1 && "Invalid Srcs");
1278 validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1279 SrcOps[0].getLLTTy(*getMRI()));
1280 break;
1281 case TargetOpcode::G_ADD:
1282 case TargetOpcode::G_AND:
1283 case TargetOpcode::G_MUL:
1284 case TargetOpcode::G_OR:
1285 case TargetOpcode::G_SUB:
1286 case TargetOpcode::G_XOR:
1287 case TargetOpcode::G_UDIV:
1288 case TargetOpcode::G_SDIV:
1289 case TargetOpcode::G_UREM:
1290 case TargetOpcode::G_SREM:
1291 case TargetOpcode::G_SMIN:
1292 case TargetOpcode::G_SMAX:
1293 case TargetOpcode::G_UMIN:
1294 case TargetOpcode::G_UMAX:
1295 case TargetOpcode::G_UADDSAT:
1296 case TargetOpcode::G_SADDSAT:
1297 case TargetOpcode::G_USUBSAT:
1298 case TargetOpcode::G_SSUBSAT: {
1299 // All these are binary ops.
1300 assert(DstOps.size() == 1 && "Invalid Dst");
1301 assert(SrcOps.size() == 2 && "Invalid Srcs");
1302 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1303 SrcOps[0].getLLTTy(*getMRI()),
1304 SrcOps[1].getLLTTy(*getMRI()));
1305 break;
1306 }
1307 case TargetOpcode::G_SHL:
1308 case TargetOpcode::G_ASHR:
1309 case TargetOpcode::G_LSHR:
1310 case TargetOpcode::G_USHLSAT:
1311 case TargetOpcode::G_SSHLSAT: {
1312 assert(DstOps.size() == 1 && "Invalid Dst");
1313 assert(SrcOps.size() == 2 && "Invalid Srcs");
1314 validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1315 SrcOps[0].getLLTTy(*getMRI()),
1316 SrcOps[1].getLLTTy(*getMRI()));
1317 break;
1318 }
1319 case TargetOpcode::G_SEXT:
1320 case TargetOpcode::G_ZEXT:
1321 case TargetOpcode::G_ANYEXT:
1322 assert(DstOps.size() == 1 && "Invalid Dst");
1323 assert(SrcOps.size() == 1 && "Invalid Srcs");
1324 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1325 SrcOps[0].getLLTTy(*getMRI()), true);
1326 break;
1327 case TargetOpcode::G_TRUNC:
1328 case TargetOpcode::G_FPTRUNC: {
1329 assert(DstOps.size() == 1 && "Invalid Dst");
1330 assert(SrcOps.size() == 1 && "Invalid Srcs");
1331 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1332 SrcOps[0].getLLTTy(*getMRI()), false);
1333 break;
1334 }
1335 case TargetOpcode::G_BITCAST: {
1336 assert(DstOps.size() == 1 && "Invalid Dst");
1337 assert(SrcOps.size() == 1 && "Invalid Srcs");
1338 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1339 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1340 break;
1341 }
1342 case TargetOpcode::COPY:
1343 assert(DstOps.size() == 1 && "Invalid Dst");
1344 // If the caller wants to add a subreg source it has to be done separately
1345 // so we may not have any SrcOps at this point yet.
1346 break;
1347 case TargetOpcode::G_FCMP:
1348 case TargetOpcode::G_ICMP: {
1349 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1350 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1351 // For F/ICMP, the first src operand is the predicate, followed by
1352 // the two comparands.
1353 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1354 "Expecting predicate");
1355 assert([&]() -> bool {
1356 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1357 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1358 : CmpInst::isFPPredicate(Pred);
1359 }() && "Invalid predicate");
1360 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1361 "Type mismatch");
1362 assert([&]() -> bool {
1363 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1364 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1365 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1366 return DstTy.isScalar();
1367 else
1368 return DstTy.isVector() &&
1369 DstTy.getElementCount() == Op0Ty.getElementCount();
1370 }() && "Type Mismatch");
1371 break;
1372 }
1373 case TargetOpcode::G_UNMERGE_VALUES: {
1374 assert(!DstOps.empty() && "Invalid trivial sequence");
1375 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1376 assert(llvm::all_of(DstOps,
1377 [&, this](const DstOp &Op) {
1378 return Op.getLLTTy(*getMRI()) ==
1379 DstOps[0].getLLTTy(*getMRI());
1380 }) &&
1381 "type mismatch in output list");
1382 assert((TypeSize::ScalarTy)DstOps.size() *
1383 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1384 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1385 "input operands do not cover output register");
1386 break;
1387 }
1388 case TargetOpcode::G_MERGE_VALUES: {
1389 assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1390 assert(DstOps.size() == 1 && "Invalid Dst");
1391 assert(llvm::all_of(SrcOps,
1392 [&, this](const SrcOp &Op) {
1393 return Op.getLLTTy(*getMRI()) ==
1394 SrcOps[0].getLLTTy(*getMRI());
1395 }) &&
1396 "type mismatch in input list");
1397 assert((TypeSize::ScalarTy)SrcOps.size() *
1398 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1399 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1400 "input operands do not cover output register");
1401 assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1402 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1403 break;
1404 }
1405 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1406 assert(DstOps.size() == 1 && "Invalid Dst size");
1407 assert(SrcOps.size() == 2 && "Invalid Src size");
1408 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1409 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1410 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1411 "Invalid operand type");
1412 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1413 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1414 DstOps[0].getLLTTy(*getMRI()) &&
1415 "Type mismatch");
1416 break;
1417 }
1418 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1419 assert(DstOps.size() == 1 && "Invalid dst size");
1420 assert(SrcOps.size() == 3 && "Invalid src size");
1421 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1422 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1423 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1424 SrcOps[1].getLLTTy(*getMRI()) &&
1425 "Type mismatch");
1426 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1427 assert(DstOps[0].getLLTTy(*getMRI()).getElementCount() ==
1428 SrcOps[0].getLLTTy(*getMRI()).getElementCount() &&
1429 "Type mismatch");
1430 break;
1431 }
1432 case TargetOpcode::G_BUILD_VECTOR: {
1433 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1434 "Must have at least 2 operands");
1435 assert(DstOps.size() == 1 && "Invalid DstOps");
1436 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1437 "Res type must be a vector");
1438 assert(llvm::all_of(SrcOps,
1439 [&, this](const SrcOp &Op) {
1440 return Op.getLLTTy(*getMRI()) ==
1441 SrcOps[0].getLLTTy(*getMRI());
1442 }) &&
1443 "type mismatch in input list");
1444 assert((TypeSize::ScalarTy)SrcOps.size() *
1445 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1446 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1447 "input scalars do not exactly cover the output vector register");
1448 break;
1449 }
1450 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1451 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1452 "Must have at least 2 operands");
1453 assert(DstOps.size() == 1 && "Invalid DstOps");
1454 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1455 "Res type must be a vector");
1456 assert(llvm::all_of(SrcOps,
1457 [&, this](const SrcOp &Op) {
1458 return Op.getLLTTy(*getMRI()) ==
1459 SrcOps[0].getLLTTy(*getMRI());
1460 }) &&
1461 "type mismatch in input list");
1462 break;
1463 }
1464 case TargetOpcode::G_CONCAT_VECTORS: {
1465 assert(DstOps.size() == 1 && "Invalid DstOps");
1466 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1467 "Must have at least 2 operands");
1468 assert(llvm::all_of(SrcOps,
1469 [&, this](const SrcOp &Op) {
1470 return (Op.getLLTTy(*getMRI()).isVector() &&
1471 Op.getLLTTy(*getMRI()) ==
1472 SrcOps[0].getLLTTy(*getMRI()));
1473 }) &&
1474 "type mismatch in input list");
1475 assert((TypeSize::ScalarTy)SrcOps.size() *
1476 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1477 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1478 "input vectors do not exactly cover the output vector register");
1479 break;
1480 }
1481 case TargetOpcode::G_UADDE: {
1482 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1483 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1484 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1485 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1486 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1487 "Invalid operand");
1488 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1489 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1490 "type mismatch");
1491 break;
1492 }
1493 }
1494
1495 auto MIB = buildInstr(Opc);
1496 for (const DstOp &Op : DstOps)
1497 Op.addDefToMIB(*getMRI(), MIB);
1498 for (const SrcOp &Op : SrcOps)
1499 Op.addSrcToMIB(MIB);
1500 if (Flags)
1501 MIB->setFlags(*Flags);
1502 return MIB;
1503}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Function Alias Analysis Results
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent)
This file declares the MachineIRBuilder class.
Promote Memory to Register
Definition Mem2Reg.cpp:110
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
static unsigned getScalarSizeInBits(Type *Ty)
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static LLVM_ABI unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition APFloat.cpp:354
const fltSemantics & getSemantics() const
Definition APFloat.h:1439
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition ArrayRef.h:143
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:138
This class holds the attributes for a particular argument, parameter, function, or return value.
Definition Attributes.h:361
The address of a basic block.
Definition Constants.h:899
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:277
const APFloat & getValueAPF() const
Definition Constants.h:320
This is the shared class of boolean and integer constants.
Definition Constants.h:87
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:157
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
A signed pointer, in the ptrauth sense.
Definition Constants.h:1032
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1062
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1065
This is an important base class in LLVM.
Definition Constant.h:43
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
LLT getLLTTy(const MachineRegisterInfo &MRI) const
Register getReg() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
PointerType * getType() const
Global values are always pointers.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
constexpr bool isPointerOrPointerVector() const
constexpr LLT getScalarType() const
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
Metadata node.
Definition Metadata.h:1078
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBundleIterator< MachineInstr > iterator
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineInstrBuilder buildLoadFromOffset(const DstOp &Dst, const SrcOp &BasePtr, MachineMemOperand &BaseMMO, int64_t Offset)
Helper to create a load from a constant offset given a base address.
MachineInstrBuilder buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMIN Addr, Val, MMO.
MachineInstrBuilder buildBoolExtInReg(const DstOp &Res, const SrcOp &Op, bool IsVector, bool IsFP)
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
MachineInstrBuilder buildAtomicRMWFMaximum(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAXIMUM Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXor(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO.
MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
LLVMContext & getContext() const
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildUCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_UCMP Op0, Op1.
MachineInstrBuilder buildConstantPool(const DstOp &Res, unsigned Idx)
Build and insert Res = G_CONSTANT_POOL Idx.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op, bool IsFP)
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildSCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SCMP Op0, Op1.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildAtomicRMWAnd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO.
MachineInstrBuilder buildZExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and inserts Res = G_AND Op, LowBitsSet(ImmOp) Since there is no G_ZEXT_INREG like G_SEXT_INREG,...
MachineInstrBuilder buildAtomicRMWMin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value, std::optional< unsigned > Flags=std::nullopt)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildAtomicRMWFAdd(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FADD Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWNand(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Res = COPY Op depending on the differing sizes of Res and Op.
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src)
Build and insert a vector splat of a scalar Src using a G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idio...
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MDNode * getPCSections()
Get the current instruction's PC sections metadata.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
unsigned getBoolExtOp(bool IsVec, bool IsFP) const
MachineInstrBuilder buildObjectPtrOffset(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildAtomicRMWUmax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO.
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
void recordInsertion(MachineInstr *InsertedInstr) const
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
std::optional< MachineInstrBuilder > materializeObjectPtrOffset(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildAtomicRMWFMinimum(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMINIMUM Addr, Val, MMO.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR_TRUNC Op0, ...
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a, b, .....
void validateSelectOp(const LLT ResTy, const LLT TstTy, const LLT Op0Ty, const LLT Op1Ty)
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const DebugLoc & getDL()
Getter for DebugLoc.
MachineInstrBuilder buildBuildVectorConstant(const DstOp &Res, ArrayRef< APInt > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ... where each OpN is built with G_CONSTANT.
MachineInstrBuilder buildAtomicRMWUmin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO.
void validateBinaryOp(const LLT Res, const LLT Op0, const LLT Op1)
void validateShiftOp(const LLT Res, const LLT Op0, const LLT Op1)
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildInsert(const DstOp &Res, const SrcOp &Src, const SrcOp &Op, unsigned Index)
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildAtomicRMWSub(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO.
MachineInstrBuilder buildMergeValues(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_TRUNC Op.
MachineInstrBuilder buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAX Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWOr(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = / G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
MachineInstrBuilder buildDeleteTrailingVectorElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x, y, z = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a,...
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildAtomicRMWAdd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildAtomicCmpXchg(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal, / MMO.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
void validateTruncExt(const LLT Dst, const LLT Src, bool IsExtend)
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_PTRMASK Op0, Op1.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
void validateUnaryOp(const LLT Res, const LLT Op0)
MachineInstrBuilder buildBlockAddress(Register Res, const BlockAddress *BA)
Build and insert Res = G_BLOCK_ADDR BA.
MDNode * getMMRAMetadata()
Get the current instruction's MMRA metadata.
MachineInstrBuilder buildAtomicRMWMax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
void setMF(MachineFunction &MF)
MachineInstrBuilder buildStepVector(const DstOp &Res, unsigned Step)
Build and insert Res = G_STEP_VECTOR Step.
MachineInstrBuilder buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FSUB Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXchg(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO.
MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0, uint32_t NumBits)
Build and insert Res = G_PTRMASK Op0, G_CONSTANT (1 << NumBits) - 1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and insert Res = G_SEXT_INREG Op, ImmOp.
MachineInstrBuilder buildConstantPtrAuth(const DstOp &Res, const ConstantPtrAuth *CPA, Register Addr, Register AddrDisc)
Build and insert G_PTRAUTH_GLOBAL_VALUE.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addBlockAddress(const BlockAddress *BA, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
A description of a memory reference used in the backend.
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
LLT getLLTTy(const MachineRegisterInfo &MRI) const
void addSrcToMIB(MachineInstrBuilder &MIB) const
Register getReg() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void addMetadata(unsigned KindID, MDNode &MD)
Add a metadata attachment.
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:217
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:224
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI AttributeSet getFnAttributes(LLVMContext &C, ID id)
Return the function attributes for an intrinsic.
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
Definition MathExtras.h:94
DWARFExpression::Operation Op
LLVM_ABI APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition Utils.cpp:657
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:761
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
This class contains a discriminated union of information about pointers in memory operands,...
All attributes(register class or bank and low-level type) a virtual register can have.