LLVM 19.0.0git
Utils.cpp
Go to the documentation of this file.
1//===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file This file implements the utility functions used by the GlobalISel
9/// pipeline.
10//===----------------------------------------------------------------------===//
11
13#include "llvm/ADT/APFloat.h"
14#include "llvm/ADT/APInt.h"
33#include "llvm/IR/Constants.h"
36#include <numeric>
37#include <optional>
38
39#define DEBUG_TYPE "globalisel-utils"
40
41using namespace llvm;
42using namespace MIPatternMatch;
43
45 const TargetInstrInfo &TII,
46 const RegisterBankInfo &RBI, Register Reg,
47 const TargetRegisterClass &RegClass) {
48 if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
49 return MRI.createVirtualRegister(&RegClass);
50
51 return Reg;
52}
53
55 const MachineFunction &MF, const TargetRegisterInfo &TRI,
57 const RegisterBankInfo &RBI, MachineInstr &InsertPt,
58 const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
59 Register Reg = RegMO.getReg();
60 // Assume physical registers are properly constrained.
61 assert(Reg.isVirtual() && "PhysReg not implemented");
62
63 // Save the old register class to check whether
64 // the change notifications will be required.
65 // TODO: A better approach would be to pass
66 // the observers to constrainRegToClass().
67 auto *OldRegClass = MRI.getRegClassOrNull(Reg);
68 Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
69 // If we created a new virtual register because the class is not compatible
70 // then create a copy between the new and the old register.
71 if (ConstrainedReg != Reg) {
72 MachineBasicBlock::iterator InsertIt(&InsertPt);
73 MachineBasicBlock &MBB = *InsertPt.getParent();
74 // FIXME: The copy needs to have the classes constrained for its operands.
75 // Use operand's regbank to get the class for old register (Reg).
76 if (RegMO.isUse()) {
77 BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
78 TII.get(TargetOpcode::COPY), ConstrainedReg)
79 .addReg(Reg);
80 } else {
81 assert(RegMO.isDef() && "Must be a definition");
82 BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
83 TII.get(TargetOpcode::COPY), Reg)
84 .addReg(ConstrainedReg);
85 }
86 if (GISelChangeObserver *Observer = MF.getObserver()) {
87 Observer->changingInstr(*RegMO.getParent());
88 }
89 RegMO.setReg(ConstrainedReg);
90 if (GISelChangeObserver *Observer = MF.getObserver()) {
91 Observer->changedInstr(*RegMO.getParent());
92 }
93 } else if (OldRegClass != MRI.getRegClassOrNull(Reg)) {
94 if (GISelChangeObserver *Observer = MF.getObserver()) {
95 if (!RegMO.isDef()) {
96 MachineInstr *RegDef = MRI.getVRegDef(Reg);
97 Observer->changedInstr(*RegDef);
98 }
99 Observer->changingAllUsesOfReg(MRI, Reg);
100 Observer->finishedChangingAllUsesOfReg();
101 }
102 }
103 return ConstrainedReg;
104}
105
107 const MachineFunction &MF, const TargetRegisterInfo &TRI,
109 const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
110 MachineOperand &RegMO, unsigned OpIdx) {
111 Register Reg = RegMO.getReg();
112 // Assume physical registers are properly constrained.
113 assert(Reg.isVirtual() && "PhysReg not implemented");
114
115 const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx, &TRI, MF);
116 // Some of the target independent instructions, like COPY, may not impose any
117 // register class constraints on some of their operands: If it's a use, we can
118 // skip constraining as the instruction defining the register would constrain
119 // it.
120
121 if (OpRC) {
122 // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
123 // can have multiple regbanks for a superclass that combine different
124 // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
125 // resolved by targets during regbankselect should not be overridden.
126 if (const auto *SubRC = TRI.getCommonSubClass(
127 OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))
128 OpRC = SubRC;
129
130 OpRC = TRI.getAllocatableClass(OpRC);
131 }
132
133 if (!OpRC) {
134 assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
135 "Register class constraint is required unless either the "
136 "instruction is target independent or the operand is a use");
137 // FIXME: Just bailing out like this here could be not enough, unless we
138 // expect the users of this function to do the right thing for PHIs and
139 // COPY:
140 // v1 = COPY v0
141 // v2 = COPY v1
142 // v1 here may end up not being constrained at all. Please notice that to
143 // reproduce the issue we likely need a destination pattern of a selection
144 // rule producing such extra copies, not just an input GMIR with them as
145 // every existing target using selectImpl handles copies before calling it
146 // and they never reach this function.
147 return Reg;
148 }
149 return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC,
150 RegMO);
151}
152
154 const TargetInstrInfo &TII,
155 const TargetRegisterInfo &TRI,
156 const RegisterBankInfo &RBI) {
157 assert(!isPreISelGenericOpcode(I.getOpcode()) &&
158 "A selected instruction is expected");
159 MachineBasicBlock &MBB = *I.getParent();
162
163 for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
164 MachineOperand &MO = I.getOperand(OpI);
165
166 // There's nothing to be done on non-register operands.
167 if (!MO.isReg())
168 continue;
169
170 LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
171 assert(MO.isReg() && "Unsupported non-reg operand");
172
173 Register Reg = MO.getReg();
174 // Physical registers don't need to be constrained.
175 if (Reg.isPhysical())
176 continue;
177
178 // Register operands with a value of 0 (e.g. predicate operands) don't need
179 // to be constrained.
180 if (Reg == 0)
181 continue;
182
183 // If the operand is a vreg, we should constrain its regclass, and only
184 // insert COPYs if that's impossible.
185 // constrainOperandRegClass does that for us.
186 constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
187
188 // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
189 // done.
190 if (MO.isUse()) {
191 int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
192 if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
193 I.tieOperands(DefIdx, OpI);
194 }
195 }
196 return true;
197}
198
201 // Give up if either DstReg or SrcReg is a physical register.
202 if (DstReg.isPhysical() || SrcReg.isPhysical())
203 return false;
204 // Give up if the types don't match.
205 if (MRI.getType(DstReg) != MRI.getType(SrcReg))
206 return false;
207 // Replace if either DstReg has no constraints or the register
208 // constraints match.
209 const auto &DstRBC = MRI.getRegClassOrRegBank(DstReg);
210 if (!DstRBC || DstRBC == MRI.getRegClassOrRegBank(SrcReg))
211 return true;
212
213 // Otherwise match if the Src is already a regclass that is covered by the Dst
214 // RegBank.
215 return DstRBC.is<const RegisterBank *>() && MRI.getRegClassOrNull(SrcReg) &&
216 DstRBC.get<const RegisterBank *>()->covers(
217 *MRI.getRegClassOrNull(SrcReg));
218}
219
221 const MachineRegisterInfo &MRI) {
222 // FIXME: This logical is mostly duplicated with
223 // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
224 // MachineInstr::isLabel?
225
226 // Don't delete frame allocation labels.
227 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
228 return false;
229 // LIFETIME markers should be preserved even if they seem dead.
230 if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
231 MI.getOpcode() == TargetOpcode::LIFETIME_END)
232 return false;
233
234 // If we can move an instruction, we can remove it. Otherwise, it has
235 // a side-effect of some sort.
236 bool SawStore = false;
237 if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
238 return false;
239
240 // Instructions without side-effects are dead iff they only define dead vregs.
241 for (const auto &MO : MI.all_defs()) {
242 Register Reg = MO.getReg();
243 if (Reg.isPhysical() || !MRI.use_nodbg_empty(Reg))
244 return false;
245 }
246 return true;
247}
248
250 MachineFunction &MF,
251 const TargetPassConfig &TPC,
254 bool IsFatal = Severity == DS_Error &&
256 // Print the function name explicitly if we don't have a debug location (which
257 // makes the diagnostic less useful) or if we're going to emit a raw error.
258 if (!R.getLocation().isValid() || IsFatal)
259 R << (" (in function: " + MF.getName() + ")").str();
260
261 if (IsFatal)
262 report_fatal_error(Twine(R.getMsg()));
263 else
264 MORE.emit(R);
265}
266
271}
272
276 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
277 reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
278}
279
282 const char *PassName, StringRef Msg,
283 const MachineInstr &MI) {
284 MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
285 MI.getDebugLoc(), MI.getParent());
286 R << Msg;
287 // Printing MI is expensive; only do it if expensive remarks are enabled.
288 if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
289 R << ": " << ore::MNV("Inst", MI);
290 reportGISelFailure(MF, TPC, MORE, R);
291}
292
293std::optional<APInt> llvm::getIConstantVRegVal(Register VReg,
294 const MachineRegisterInfo &MRI) {
295 std::optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
296 VReg, MRI, /*LookThroughInstrs*/ false);
297 assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
298 "Value found while looking through instrs");
299 if (!ValAndVReg)
300 return std::nullopt;
301 return ValAndVReg->Value;
302}
303
304std::optional<int64_t>
306 std::optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
307 if (Val && Val->getBitWidth() <= 64)
308 return Val->getSExtValue();
309 return std::nullopt;
310}
311
312namespace {
313
314typedef std::function<bool(const MachineInstr *)> IsOpcodeFn;
315typedef std::function<std::optional<APInt>(const MachineInstr *MI)> GetAPCstFn;
316
317std::optional<ValueAndVReg> getConstantVRegValWithLookThrough(
318 Register VReg, const MachineRegisterInfo &MRI, IsOpcodeFn IsConstantOpcode,
319 GetAPCstFn getAPCstValue, bool LookThroughInstrs = true,
320 bool LookThroughAnyExt = false) {
323
324 while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
325 LookThroughInstrs) {
326 switch (MI->getOpcode()) {
327 case TargetOpcode::G_ANYEXT:
328 if (!LookThroughAnyExt)
329 return std::nullopt;
330 [[fallthrough]];
331 case TargetOpcode::G_TRUNC:
332 case TargetOpcode::G_SEXT:
333 case TargetOpcode::G_ZEXT:
334 SeenOpcodes.push_back(std::make_pair(
335 MI->getOpcode(),
336 MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
337 VReg = MI->getOperand(1).getReg();
338 break;
339 case TargetOpcode::COPY:
340 VReg = MI->getOperand(1).getReg();
341 if (VReg.isPhysical())
342 return std::nullopt;
343 break;
344 case TargetOpcode::G_INTTOPTR:
345 VReg = MI->getOperand(1).getReg();
346 break;
347 default:
348 return std::nullopt;
349 }
350 }
351 if (!MI || !IsConstantOpcode(MI))
352 return std::nullopt;
353
354 std::optional<APInt> MaybeVal = getAPCstValue(MI);
355 if (!MaybeVal)
356 return std::nullopt;
357 APInt &Val = *MaybeVal;
358 while (!SeenOpcodes.empty()) {
359 std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val();
360 switch (OpcodeAndSize.first) {
361 case TargetOpcode::G_TRUNC:
362 Val = Val.trunc(OpcodeAndSize.second);
363 break;
364 case TargetOpcode::G_ANYEXT:
365 case TargetOpcode::G_SEXT:
366 Val = Val.sext(OpcodeAndSize.second);
367 break;
368 case TargetOpcode::G_ZEXT:
369 Val = Val.zext(OpcodeAndSize.second);
370 break;
371 }
372 }
373
374 return ValueAndVReg{Val, VReg};
375}
376
377bool isIConstant(const MachineInstr *MI) {
378 if (!MI)
379 return false;
380 return MI->getOpcode() == TargetOpcode::G_CONSTANT;
381}
382
383bool isFConstant(const MachineInstr *MI) {
384 if (!MI)
385 return false;
386 return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
387}
388
389bool isAnyConstant(const MachineInstr *MI) {
390 if (!MI)
391 return false;
392 unsigned Opc = MI->getOpcode();
393 return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
394}
395
396std::optional<APInt> getCImmAsAPInt(const MachineInstr *MI) {
397 const MachineOperand &CstVal = MI->getOperand(1);
398 if (CstVal.isCImm())
399 return CstVal.getCImm()->getValue();
400 return std::nullopt;
401}
402
403std::optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) {
404 const MachineOperand &CstVal = MI->getOperand(1);
405 if (CstVal.isCImm())
406 return CstVal.getCImm()->getValue();
407 if (CstVal.isFPImm())
408 return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
409 return std::nullopt;
410}
411
412} // end anonymous namespace
413
415 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
416 return getConstantVRegValWithLookThrough(VReg, MRI, isIConstant,
417 getCImmAsAPInt, LookThroughInstrs);
418}
419
421 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
422 bool LookThroughAnyExt) {
423 return getConstantVRegValWithLookThrough(
424 VReg, MRI, isAnyConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs,
425 LookThroughAnyExt);
426}
427
428std::optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
429 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
430 auto Reg = getConstantVRegValWithLookThrough(
431 VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs);
432 if (!Reg)
433 return std::nullopt;
435 Reg->VReg};
436}
437
438const ConstantFP *
440 MachineInstr *MI = MRI.getVRegDef(VReg);
441 if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
442 return nullptr;
443 return MI->getOperand(1).getFPImm();
444}
445
446std::optional<DefinitionAndSourceRegister>
448 Register DefSrcReg = Reg;
449 auto *DefMI = MRI.getVRegDef(Reg);
450 auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
451 if (!DstTy.isValid())
452 return std::nullopt;
453 unsigned Opc = DefMI->getOpcode();
454 while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
455 Register SrcReg = DefMI->getOperand(1).getReg();
456 auto SrcTy = MRI.getType(SrcReg);
457 if (!SrcTy.isValid())
458 break;
459 DefMI = MRI.getVRegDef(SrcReg);
460 DefSrcReg = SrcReg;
461 Opc = DefMI->getOpcode();
462 }
463 return DefinitionAndSourceRegister{DefMI, DefSrcReg};
464}
465
467 const MachineRegisterInfo &MRI) {
468 std::optional<DefinitionAndSourceRegister> DefSrcReg =
470 return DefSrcReg ? DefSrcReg->MI : nullptr;
471}
472
474 const MachineRegisterInfo &MRI) {
475 std::optional<DefinitionAndSourceRegister> DefSrcReg =
477 return DefSrcReg ? DefSrcReg->Reg : Register();
478}
479
480void llvm::extractParts(Register Reg, LLT Ty, int NumParts,
482 MachineIRBuilder &MIRBuilder,
484 for (int i = 0; i < NumParts; ++i)
485 VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
486 MIRBuilder.buildUnmerge(VRegs, Reg);
487}
488
489bool llvm::extractParts(Register Reg, LLT RegTy, LLT MainTy, LLT &LeftoverTy,
491 SmallVectorImpl<Register> &LeftoverRegs,
492 MachineIRBuilder &MIRBuilder,
494 assert(!LeftoverTy.isValid() && "this is an out argument");
495
496 unsigned RegSize = RegTy.getSizeInBits();
497 unsigned MainSize = MainTy.getSizeInBits();
498 unsigned NumParts = RegSize / MainSize;
499 unsigned LeftoverSize = RegSize - NumParts * MainSize;
500
501 // Use an unmerge when possible.
502 if (LeftoverSize == 0) {
503 for (unsigned I = 0; I < NumParts; ++I)
504 VRegs.push_back(MRI.createGenericVirtualRegister(MainTy));
505 MIRBuilder.buildUnmerge(VRegs, Reg);
506 return true;
507 }
508
509 // Try to use unmerge for irregular vector split where possible
510 // For example when splitting a <6 x i32> into <4 x i32> with <2 x i32>
511 // leftover, it becomes:
512 // <2 x i32> %2, <2 x i32>%3, <2 x i32> %4 = G_UNMERGE_VALUE <6 x i32> %1
513 // <4 x i32> %5 = G_CONCAT_VECTOR <2 x i32> %2, <2 x i32> %3
514 if (RegTy.isVector() && MainTy.isVector()) {
515 unsigned RegNumElts = RegTy.getNumElements();
516 unsigned MainNumElts = MainTy.getNumElements();
517 unsigned LeftoverNumElts = RegNumElts % MainNumElts;
518 // If can unmerge to LeftoverTy, do it
519 if (MainNumElts % LeftoverNumElts == 0 &&
520 RegNumElts % LeftoverNumElts == 0 &&
521 RegTy.getScalarSizeInBits() == MainTy.getScalarSizeInBits() &&
522 LeftoverNumElts > 1) {
523 LeftoverTy =
524 LLT::fixed_vector(LeftoverNumElts, RegTy.getScalarSizeInBits());
525
526 // Unmerge the SrcReg to LeftoverTy vectors
527 SmallVector<Register, 4> UnmergeValues;
528 extractParts(Reg, LeftoverTy, RegNumElts / LeftoverNumElts, UnmergeValues,
529 MIRBuilder, MRI);
530
531 // Find how many LeftoverTy makes one MainTy
532 unsigned LeftoverPerMain = MainNumElts / LeftoverNumElts;
533 unsigned NumOfLeftoverVal =
534 ((RegNumElts % MainNumElts) / LeftoverNumElts);
535
536 // Create as many MainTy as possible using unmerged value
537 SmallVector<Register, 4> MergeValues;
538 for (unsigned I = 0; I < UnmergeValues.size() - NumOfLeftoverVal; I++) {
539 MergeValues.push_back(UnmergeValues[I]);
540 if (MergeValues.size() == LeftoverPerMain) {
541 VRegs.push_back(
542 MIRBuilder.buildMergeLikeInstr(MainTy, MergeValues).getReg(0));
543 MergeValues.clear();
544 }
545 }
546 // Populate LeftoverRegs with the leftovers
547 for (unsigned I = UnmergeValues.size() - NumOfLeftoverVal;
548 I < UnmergeValues.size(); I++) {
549 LeftoverRegs.push_back(UnmergeValues[I]);
550 }
551 return true;
552 }
553 }
554 // Perform irregular split. Leftover is last element of RegPieces.
555 if (MainTy.isVector()) {
556 SmallVector<Register, 8> RegPieces;
557 extractVectorParts(Reg, MainTy.getNumElements(), RegPieces, MIRBuilder,
558 MRI);
559 for (unsigned i = 0; i < RegPieces.size() - 1; ++i)
560 VRegs.push_back(RegPieces[i]);
561 LeftoverRegs.push_back(RegPieces[RegPieces.size() - 1]);
562 LeftoverTy = MRI.getType(LeftoverRegs[0]);
563 return true;
564 }
565
566 LeftoverTy = LLT::scalar(LeftoverSize);
567 // For irregular sizes, extract the individual parts.
568 for (unsigned I = 0; I != NumParts; ++I) {
569 Register NewReg = MRI.createGenericVirtualRegister(MainTy);
570 VRegs.push_back(NewReg);
571 MIRBuilder.buildExtract(NewReg, Reg, MainSize * I);
572 }
573
574 for (unsigned Offset = MainSize * NumParts; Offset < RegSize;
575 Offset += LeftoverSize) {
576 Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
577 LeftoverRegs.push_back(NewReg);
578 MIRBuilder.buildExtract(NewReg, Reg, Offset);
579 }
580
581 return true;
582}
583
584void llvm::extractVectorParts(Register Reg, unsigned NumElts,
586 MachineIRBuilder &MIRBuilder,
588 LLT RegTy = MRI.getType(Reg);
589 assert(RegTy.isVector() && "Expected a vector type");
590
591 LLT EltTy = RegTy.getElementType();
592 LLT NarrowTy = (NumElts == 1) ? EltTy : LLT::fixed_vector(NumElts, EltTy);
593 unsigned RegNumElts = RegTy.getNumElements();
594 unsigned LeftoverNumElts = RegNumElts % NumElts;
595 unsigned NumNarrowTyPieces = RegNumElts / NumElts;
596
597 // Perfect split without leftover
598 if (LeftoverNumElts == 0)
599 return extractParts(Reg, NarrowTy, NumNarrowTyPieces, VRegs, MIRBuilder,
600 MRI);
601
602 // Irregular split. Provide direct access to all elements for artifact
603 // combiner using unmerge to elements. Then build vectors with NumElts
604 // elements. Remaining element(s) will be (used to build vector) Leftover.
606 extractParts(Reg, EltTy, RegNumElts, Elts, MIRBuilder, MRI);
607
608 unsigned Offset = 0;
609 // Requested sub-vectors of NarrowTy.
610 for (unsigned i = 0; i < NumNarrowTyPieces; ++i, Offset += NumElts) {
611 ArrayRef<Register> Pieces(&Elts[Offset], NumElts);
612 VRegs.push_back(MIRBuilder.buildMergeLikeInstr(NarrowTy, Pieces).getReg(0));
613 }
614
615 // Leftover element(s).
616 if (LeftoverNumElts == 1) {
617 VRegs.push_back(Elts[Offset]);
618 } else {
619 LLT LeftoverTy = LLT::fixed_vector(LeftoverNumElts, EltTy);
620 ArrayRef<Register> Pieces(&Elts[Offset], LeftoverNumElts);
621 VRegs.push_back(
622 MIRBuilder.buildMergeLikeInstr(LeftoverTy, Pieces).getReg(0));
623 }
624}
625
627 const MachineRegisterInfo &MRI) {
629 return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
630}
631
632APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
633 if (Size == 32)
634 return APFloat(float(Val));
635 if (Size == 64)
636 return APFloat(Val);
637 if (Size != 16)
638 llvm_unreachable("Unsupported FPConstant size");
639 bool Ignored;
640 APFloat APF(Val);
641 APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
642 return APF;
643}
644
645std::optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode,
646 const Register Op1,
647 const Register Op2,
648 const MachineRegisterInfo &MRI) {
649 auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
650 if (!MaybeOp2Cst)
651 return std::nullopt;
652
653 auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
654 if (!MaybeOp1Cst)
655 return std::nullopt;
656
657 const APInt &C1 = MaybeOp1Cst->Value;
658 const APInt &C2 = MaybeOp2Cst->Value;
659 switch (Opcode) {
660 default:
661 break;
662 case TargetOpcode::G_ADD:
663 case TargetOpcode::G_PTR_ADD:
664 return C1 + C2;
665 case TargetOpcode::G_AND:
666 return C1 & C2;
667 case TargetOpcode::G_ASHR:
668 return C1.ashr(C2);
669 case TargetOpcode::G_LSHR:
670 return C1.lshr(C2);
671 case TargetOpcode::G_MUL:
672 return C1 * C2;
673 case TargetOpcode::G_OR:
674 return C1 | C2;
675 case TargetOpcode::G_SHL:
676 return C1 << C2;
677 case TargetOpcode::G_SUB:
678 return C1 - C2;
679 case TargetOpcode::G_XOR:
680 return C1 ^ C2;
681 case TargetOpcode::G_UDIV:
682 if (!C2.getBoolValue())
683 break;
684 return C1.udiv(C2);
685 case TargetOpcode::G_SDIV:
686 if (!C2.getBoolValue())
687 break;
688 return C1.sdiv(C2);
689 case TargetOpcode::G_UREM:
690 if (!C2.getBoolValue())
691 break;
692 return C1.urem(C2);
693 case TargetOpcode::G_SREM:
694 if (!C2.getBoolValue())
695 break;
696 return C1.srem(C2);
697 case TargetOpcode::G_SMIN:
698 return APIntOps::smin(C1, C2);
699 case TargetOpcode::G_SMAX:
700 return APIntOps::smax(C1, C2);
701 case TargetOpcode::G_UMIN:
702 return APIntOps::umin(C1, C2);
703 case TargetOpcode::G_UMAX:
704 return APIntOps::umax(C1, C2);
705 }
706
707 return std::nullopt;
708}
709
710std::optional<APFloat>
711llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
712 const Register Op2, const MachineRegisterInfo &MRI) {
713 const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
714 if (!Op2Cst)
715 return std::nullopt;
716
717 const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
718 if (!Op1Cst)
719 return std::nullopt;
720
721 APFloat C1 = Op1Cst->getValueAPF();
722 const APFloat &C2 = Op2Cst->getValueAPF();
723 switch (Opcode) {
724 case TargetOpcode::G_FADD:
725 C1.add(C2, APFloat::rmNearestTiesToEven);
726 return C1;
727 case TargetOpcode::G_FSUB:
728 C1.subtract(C2, APFloat::rmNearestTiesToEven);
729 return C1;
730 case TargetOpcode::G_FMUL:
731 C1.multiply(C2, APFloat::rmNearestTiesToEven);
732 return C1;
733 case TargetOpcode::G_FDIV:
734 C1.divide(C2, APFloat::rmNearestTiesToEven);
735 return C1;
736 case TargetOpcode::G_FREM:
737 C1.mod(C2);
738 return C1;
739 case TargetOpcode::G_FCOPYSIGN:
740 C1.copySign(C2);
741 return C1;
742 case TargetOpcode::G_FMINNUM:
743 return minnum(C1, C2);
744 case TargetOpcode::G_FMAXNUM:
745 return maxnum(C1, C2);
746 case TargetOpcode::G_FMINIMUM:
747 return minimum(C1, C2);
748 case TargetOpcode::G_FMAXIMUM:
749 return maximum(C1, C2);
750 case TargetOpcode::G_FMINNUM_IEEE:
751 case TargetOpcode::G_FMAXNUM_IEEE:
752 // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
753 // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
754 // and currently there isn't a nice wrapper in APFloat for the version with
755 // correct snan handling.
756 break;
757 default:
758 break;
759 }
760
761 return std::nullopt;
762}
763
765llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
766 const Register Op2,
767 const MachineRegisterInfo &MRI) {
768 auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
769 if (!SrcVec2)
770 return SmallVector<APInt>();
771
772 auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
773 if (!SrcVec1)
774 return SmallVector<APInt>();
775
776 SmallVector<APInt> FoldedElements;
777 for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
778 auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
779 SrcVec2->getSourceReg(Idx), MRI);
780 if (!MaybeCst)
781 return SmallVector<APInt>();
782 FoldedElements.push_back(*MaybeCst);
783 }
784 return FoldedElements;
785}
786
788 bool SNaN) {
789 const MachineInstr *DefMI = MRI.getVRegDef(Val);
790 if (!DefMI)
791 return false;
792
793 const TargetMachine& TM = DefMI->getMF()->getTarget();
794 if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
795 return true;
796
797 // If the value is a constant, we can obviously see if it is a NaN or not.
798 if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
799 return !FPVal->getValueAPF().isNaN() ||
800 (SNaN && !FPVal->getValueAPF().isSignaling());
801 }
802
803 if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
804 for (const auto &Op : DefMI->uses())
805 if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
806 return false;
807 return true;
808 }
809
810 switch (DefMI->getOpcode()) {
811 default:
812 break;
813 case TargetOpcode::G_FADD:
814 case TargetOpcode::G_FSUB:
815 case TargetOpcode::G_FMUL:
816 case TargetOpcode::G_FDIV:
817 case TargetOpcode::G_FREM:
818 case TargetOpcode::G_FSIN:
819 case TargetOpcode::G_FCOS:
820 case TargetOpcode::G_FMA:
821 case TargetOpcode::G_FMAD:
822 if (SNaN)
823 return true;
824
825 // TODO: Need isKnownNeverInfinity
826 return false;
827 case TargetOpcode::G_FMINNUM_IEEE:
828 case TargetOpcode::G_FMAXNUM_IEEE: {
829 if (SNaN)
830 return true;
831 // This can return a NaN if either operand is an sNaN, or if both operands
832 // are NaN.
833 return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
837 }
838 case TargetOpcode::G_FMINNUM:
839 case TargetOpcode::G_FMAXNUM: {
840 // Only one needs to be known not-nan, since it will be returned if the
841 // other ends up being one.
842 return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
844 }
845 }
846
847 if (SNaN) {
848 // FP operations quiet. For now, just handle the ones inserted during
849 // legalization.
850 switch (DefMI->getOpcode()) {
851 case TargetOpcode::G_FPEXT:
852 case TargetOpcode::G_FPTRUNC:
853 case TargetOpcode::G_FCANONICALIZE:
854 return true;
855 default:
856 return false;
857 }
858 }
859
860 return false;
861}
862
864 const MachinePointerInfo &MPO) {
865 auto PSV = dyn_cast_if_present<const PseudoSourceValue *>(MPO.V);
866 if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
867 MachineFrameInfo &MFI = MF.getFrameInfo();
868 return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
869 MPO.Offset);
870 }
871
872 if (const Value *V = dyn_cast_if_present<const Value *>(MPO.V)) {
873 const Module *M = MF.getFunction().getParent();
874 return V->getPointerAlignment(M->getDataLayout());
875 }
876
877 return Align(1);
878}
879
881 const TargetInstrInfo &TII,
882 MCRegister PhysReg,
883 const TargetRegisterClass &RC,
884 const DebugLoc &DL, LLT RegTy) {
885 MachineBasicBlock &EntryMBB = MF.front();
887 Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
888 if (LiveIn) {
889 MachineInstr *Def = MRI.getVRegDef(LiveIn);
890 if (Def) {
891 // FIXME: Should the verifier check this is in the entry block?
892 assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
893 return LiveIn;
894 }
895
896 // It's possible the incoming argument register and copy was added during
897 // lowering, but later deleted due to being/becoming dead. If this happens,
898 // re-insert the copy.
899 } else {
900 // The live in register was not present, so add it.
901 LiveIn = MF.addLiveIn(PhysReg, &RC);
902 if (RegTy.isValid())
903 MRI.setType(LiveIn, RegTy);
904 }
905
906 BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
907 .addReg(PhysReg);
908 if (!EntryMBB.isLiveIn(PhysReg))
909 EntryMBB.addLiveIn(PhysReg);
910 return LiveIn;
911}
912
913std::optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode,
914 const Register Op1, uint64_t Imm,
915 const MachineRegisterInfo &MRI) {
916 auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
917 if (MaybeOp1Cst) {
918 switch (Opcode) {
919 default:
920 break;
921 case TargetOpcode::G_SEXT_INREG: {
922 LLT Ty = MRI.getType(Op1);
923 return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
924 }
925 }
926 }
927 return std::nullopt;
928}
929
930std::optional<APInt> llvm::ConstantFoldCastOp(unsigned Opcode, LLT DstTy,
931 const Register Op0,
932 const MachineRegisterInfo &MRI) {
933 std::optional<APInt> Val = getIConstantVRegVal(Op0, MRI);
934 if (!Val)
935 return Val;
936
937 const unsigned DstSize = DstTy.getScalarSizeInBits();
938
939 switch (Opcode) {
940 case TargetOpcode::G_SEXT:
941 return Val->sext(DstSize);
942 case TargetOpcode::G_ZEXT:
943 case TargetOpcode::G_ANYEXT:
944 // TODO: DAG considers target preference when constant folding any_extend.
945 return Val->zext(DstSize);
946 default:
947 break;
948 }
949
950 llvm_unreachable("unexpected cast opcode to constant fold");
951}
952
953std::optional<APFloat>
954llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src,
955 const MachineRegisterInfo &MRI) {
956 assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
957 if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
958 APFloat DstVal(getFltSemanticForLLT(DstTy));
959 DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
960 APFloat::rmNearestTiesToEven);
961 return DstVal;
962 }
963 return std::nullopt;
964}
965
966std::optional<SmallVector<unsigned>>
968 LLT Ty = MRI.getType(Src);
969 SmallVector<unsigned> FoldedCTLZs;
970 auto tryFoldScalar = [&](Register R) -> std::optional<unsigned> {
971 auto MaybeCst = getIConstantVRegVal(R, MRI);
972 if (!MaybeCst)
973 return std::nullopt;
974 return MaybeCst->countl_zero();
975 };
976 if (Ty.isVector()) {
977 // Try to constant fold each element.
978 auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
979 if (!BV)
980 return std::nullopt;
981 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
982 if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
983 FoldedCTLZs.emplace_back(*MaybeFold);
984 continue;
985 }
986 return std::nullopt;
987 }
988 return FoldedCTLZs;
989 }
990 if (auto MaybeCst = tryFoldScalar(Src)) {
991 FoldedCTLZs.emplace_back(*MaybeCst);
992 return FoldedCTLZs;
993 }
994 return std::nullopt;
995}
996
998 GISelKnownBits *KB) {
999 std::optional<DefinitionAndSourceRegister> DefSrcReg =
1001 if (!DefSrcReg)
1002 return false;
1003
1004 const MachineInstr &MI = *DefSrcReg->MI;
1005 const LLT Ty = MRI.getType(Reg);
1006
1007 switch (MI.getOpcode()) {
1008 case TargetOpcode::G_CONSTANT: {
1009 unsigned BitWidth = Ty.getScalarSizeInBits();
1010 const ConstantInt *CI = MI.getOperand(1).getCImm();
1011 return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
1012 }
1013 case TargetOpcode::G_SHL: {
1014 // A left-shift of a constant one will have exactly one bit set because
1015 // shifting the bit off the end is undefined.
1016
1017 // TODO: Constant splat
1018 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1019 if (*ConstLHS == 1)
1020 return true;
1021 }
1022
1023 break;
1024 }
1025 case TargetOpcode::G_LSHR: {
1026 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1027 if (ConstLHS->isSignMask())
1028 return true;
1029 }
1030
1031 break;
1032 }
1033 case TargetOpcode::G_BUILD_VECTOR: {
1034 // TODO: Probably should have a recursion depth guard since you could have
1035 // bitcasted vector elements.
1036 for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
1037 if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB))
1038 return false;
1039
1040 return true;
1041 }
1042 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1043 // Only handle constants since we would need to know if number of leading
1044 // zeros is greater than the truncation amount.
1045 const unsigned BitWidth = Ty.getScalarSizeInBits();
1046 for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
1047 auto Const = getIConstantVRegVal(MO.getReg(), MRI);
1048 if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
1049 return false;
1050 }
1051
1052 return true;
1053 }
1054 default:
1055 break;
1056 }
1057
1058 if (!KB)
1059 return false;
1060
1061 // More could be done here, though the above checks are enough
1062 // to handle some common cases.
1063
1064 // Fall back to computeKnownBits to catch other known cases.
1065 KnownBits Known = KB->getKnownBits(Reg);
1066 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
1067}
1068
1071}
1072
1073LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
1074 if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1075 return OrigTy;
1076
1077 if (OrigTy.isVector() && TargetTy.isVector()) {
1078 LLT OrigElt = OrigTy.getElementType();
1079 LLT TargetElt = TargetTy.getElementType();
1080
1081 // TODO: The docstring for this function says the intention is to use this
1082 // function to build MERGE/UNMERGE instructions. It won't be the case that
1083 // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1084 // could implement getLCMType between the two in the future if there was a
1085 // need, but it is not worth it now as this function should not be used in
1086 // that way.
1087 assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1088 (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1089 "getLCMType not implemented between fixed and scalable vectors.");
1090
1091 if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
1092 int GCDMinElts = std::gcd(OrigTy.getElementCount().getKnownMinValue(),
1093 TargetTy.getElementCount().getKnownMinValue());
1094 // Prefer the original element type.
1096 TargetTy.getElementCount().getKnownMinValue());
1097 return LLT::vector(Mul.divideCoefficientBy(GCDMinElts),
1098 OrigTy.getElementType());
1099 }
1100 unsigned LCM = std::lcm(OrigTy.getSizeInBits().getKnownMinValue(),
1101 TargetTy.getSizeInBits().getKnownMinValue());
1102 return LLT::vector(
1103 ElementCount::get(LCM / OrigElt.getSizeInBits(), OrigTy.isScalable()),
1104 OrigElt);
1105 }
1106
1107 // One type is scalar, one type is vector
1108 if (OrigTy.isVector() || TargetTy.isVector()) {
1109 LLT VecTy = OrigTy.isVector() ? OrigTy : TargetTy;
1110 LLT ScalarTy = OrigTy.isVector() ? TargetTy : OrigTy;
1111 LLT EltTy = VecTy.getElementType();
1112 LLT OrigEltTy = OrigTy.isVector() ? OrigTy.getElementType() : OrigTy;
1113
1114 // Prefer scalar type from OrigTy.
1115 if (EltTy.getSizeInBits() == ScalarTy.getSizeInBits())
1116 return LLT::vector(VecTy.getElementCount(), OrigEltTy);
1117
1118 // Different size scalars. Create vector with the same total size.
1119 // LCM will take fixed/scalable from VecTy.
1120 unsigned LCM = std::lcm(EltTy.getSizeInBits().getFixedValue() *
1122 ScalarTy.getSizeInBits().getFixedValue());
1123 // Prefer type from OrigTy
1124 return LLT::vector(ElementCount::get(LCM / OrigEltTy.getSizeInBits(),
1125 VecTy.getElementCount().isScalable()),
1126 OrigEltTy);
1127 }
1128
1129 // At this point, both types are scalars of different size
1130 unsigned LCM = std::lcm(OrigTy.getSizeInBits().getFixedValue(),
1131 TargetTy.getSizeInBits().getFixedValue());
1132 // Preserve pointer types.
1133 if (LCM == OrigTy.getSizeInBits())
1134 return OrigTy;
1135 if (LCM == TargetTy.getSizeInBits())
1136 return TargetTy;
1137 return LLT::scalar(LCM);
1138}
1139
1140LLT llvm::getCoverTy(LLT OrigTy, LLT TargetTy) {
1141
1142 if ((OrigTy.isScalableVector() && TargetTy.isFixedVector()) ||
1143 (OrigTy.isFixedVector() && TargetTy.isScalableVector()))
1145 "getCoverTy not implemented between fixed and scalable vectors.");
1146
1147 if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy ||
1148 (OrigTy.getScalarSizeInBits() != TargetTy.getScalarSizeInBits()))
1149 return getLCMType(OrigTy, TargetTy);
1150
1151 unsigned OrigTyNumElts = OrigTy.getElementCount().getKnownMinValue();
1152 unsigned TargetTyNumElts = TargetTy.getElementCount().getKnownMinValue();
1153 if (OrigTyNumElts % TargetTyNumElts == 0)
1154 return OrigTy;
1155
1156 unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts);
1158 OrigTy.getElementType());
1159}
1160
1161LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
1162 if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1163 return OrigTy;
1164
1165 if (OrigTy.isVector() && TargetTy.isVector()) {
1166 LLT OrigElt = OrigTy.getElementType();
1167
1168 // TODO: The docstring for this function says the intention is to use this
1169 // function to build MERGE/UNMERGE instructions. It won't be the case that
1170 // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1171 // could implement getGCDType between the two in the future if there was a
1172 // need, but it is not worth it now as this function should not be used in
1173 // that way.
1174 assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1175 (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1176 "getGCDType not implemented between fixed and scalable vectors.");
1177
1178 unsigned GCD = std::gcd(OrigTy.getSizeInBits().getKnownMinValue(),
1179 TargetTy.getSizeInBits().getKnownMinValue());
1180 if (GCD == OrigElt.getSizeInBits())
1182 OrigElt);
1183
1184 // Cannot produce original element type, but both have vscale in common.
1185 if (GCD < OrigElt.getSizeInBits())
1187 GCD);
1188
1189 return LLT::vector(
1191 OrigTy.isScalable()),
1192 OrigElt);
1193 }
1194
1195 // If one type is vector and the element size matches the scalar size, then
1196 // the gcd is the scalar type.
1197 if (OrigTy.isVector() &&
1198 OrigTy.getElementType().getSizeInBits() == TargetTy.getSizeInBits())
1199 return OrigTy.getElementType();
1200 if (TargetTy.isVector() &&
1201 TargetTy.getElementType().getSizeInBits() == OrigTy.getSizeInBits())
1202 return OrigTy;
1203
1204 // At this point, both types are either scalars of different type or one is a
1205 // vector and one is a scalar. If both types are scalars, the GCD type is the
1206 // GCD between the two scalar sizes. If one is vector and one is scalar, then
1207 // the GCD type is the GCD between the scalar and the vector element size.
1208 LLT OrigScalar = OrigTy.getScalarType();
1209 LLT TargetScalar = TargetTy.getScalarType();
1210 unsigned GCD = std::gcd(OrigScalar.getSizeInBits().getFixedValue(),
1211 TargetScalar.getSizeInBits().getFixedValue());
1212 return LLT::scalar(GCD);
1213}
1214
1216 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
1217 "Only G_SHUFFLE_VECTOR can have a splat index!");
1218 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
1219 auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
1220
1221 // If all elements are undefined, this shuffle can be considered a splat.
1222 // Return 0 for better potential for callers to simplify.
1223 if (FirstDefinedIdx == Mask.end())
1224 return 0;
1225
1226 // Make sure all remaining elements are either undef or the same
1227 // as the first non-undef value.
1228 int SplatValue = *FirstDefinedIdx;
1229 if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
1230 [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
1231 return std::nullopt;
1232
1233 return SplatValue;
1234}
1235
1236static bool isBuildVectorOp(unsigned Opcode) {
1237 return Opcode == TargetOpcode::G_BUILD_VECTOR ||
1238 Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
1239}
1240
1241namespace {
1242
1243std::optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
1244 const MachineRegisterInfo &MRI,
1245 bool AllowUndef) {
1247 if (!MI)
1248 return std::nullopt;
1249
1250 bool isConcatVectorsOp = MI->getOpcode() == TargetOpcode::G_CONCAT_VECTORS;
1251 if (!isBuildVectorOp(MI->getOpcode()) && !isConcatVectorsOp)
1252 return std::nullopt;
1253
1254 std::optional<ValueAndVReg> SplatValAndReg;
1255 for (MachineOperand &Op : MI->uses()) {
1256 Register Element = Op.getReg();
1257 // If we have a G_CONCAT_VECTOR, we recursively look into the
1258 // vectors that we're concatenating to see if they're splats.
1259 auto ElementValAndReg =
1260 isConcatVectorsOp
1261 ? getAnyConstantSplat(Element, MRI, AllowUndef)
1263
1264 // If AllowUndef, treat undef as value that will result in a constant splat.
1265 if (!ElementValAndReg) {
1266 if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
1267 continue;
1268 return std::nullopt;
1269 }
1270
1271 // Record splat value
1272 if (!SplatValAndReg)
1273 SplatValAndReg = ElementValAndReg;
1274
1275 // Different constant than the one already recorded, not a constant splat.
1276 if (SplatValAndReg->Value != ElementValAndReg->Value)
1277 return std::nullopt;
1278 }
1279
1280 return SplatValAndReg;
1281}
1282
1283} // end anonymous namespace
1284
1286 const MachineRegisterInfo &MRI,
1287 int64_t SplatValue, bool AllowUndef) {
1288 if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef))
1289 return mi_match(SplatValAndReg->VReg, MRI, m_SpecificICst(SplatValue));
1290 return false;
1291}
1292
1294 const MachineRegisterInfo &MRI,
1295 int64_t SplatValue, bool AllowUndef) {
1296 return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1297 AllowUndef);
1298}
1299
1300std::optional<APInt>
1302 if (auto SplatValAndReg =
1303 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) {
1304 if (std::optional<ValueAndVReg> ValAndVReg =
1305 getIConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI))
1306 return ValAndVReg->Value;
1307 }
1308
1309 return std::nullopt;
1310}
1311
1312std::optional<APInt>
1314 const MachineRegisterInfo &MRI) {
1315 return getIConstantSplatVal(MI.getOperand(0).getReg(), MRI);
1316}
1317
1318std::optional<int64_t>
1320 const MachineRegisterInfo &MRI) {
1321 if (auto SplatValAndReg =
1322 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false))
1323 return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1324 return std::nullopt;
1325}
1326
1327std::optional<int64_t>
1329 const MachineRegisterInfo &MRI) {
1330 return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI);
1331}
1332
1333std::optional<FPValueAndVReg>
1335 bool AllowUndef) {
1336 if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1337 return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1338 return std::nullopt;
1339}
1340
1342 const MachineRegisterInfo &MRI,
1343 bool AllowUndef) {
1344 return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1345}
1346
1348 const MachineRegisterInfo &MRI,
1349 bool AllowUndef) {
1350 return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1351}
1352
1353std::optional<RegOrConstant>
1355 unsigned Opc = MI.getOpcode();
1356 if (!isBuildVectorOp(Opc))
1357 return std::nullopt;
1358 if (auto Splat = getIConstantSplatSExtVal(MI, MRI))
1359 return RegOrConstant(*Splat);
1360 auto Reg = MI.getOperand(1).getReg();
1361 if (any_of(drop_begin(MI.operands(), 2),
1362 [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1363 return std::nullopt;
1364 return RegOrConstant(Reg);
1365}
1366
1368 const MachineRegisterInfo &MRI,
1369 bool AllowFP = true,
1370 bool AllowOpaqueConstants = true) {
1371 switch (MI.getOpcode()) {
1372 case TargetOpcode::G_CONSTANT:
1373 case TargetOpcode::G_IMPLICIT_DEF:
1374 return true;
1375 case TargetOpcode::G_FCONSTANT:
1376 return AllowFP;
1377 case TargetOpcode::G_GLOBAL_VALUE:
1378 case TargetOpcode::G_FRAME_INDEX:
1379 case TargetOpcode::G_BLOCK_ADDR:
1380 case TargetOpcode::G_JUMP_TABLE:
1381 return AllowOpaqueConstants;
1382 default:
1383 return false;
1384 }
1385}
1386
1388 const MachineRegisterInfo &MRI) {
1389 Register Def = MI.getOperand(0).getReg();
1390 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1391 return true;
1392 GBuildVector *BV = dyn_cast<GBuildVector>(&MI);
1393 if (!BV)
1394 return false;
1395 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1397 getOpcodeDef<GImplicitDef>(BV->getSourceReg(SrcIdx), MRI))
1398 continue;
1399 return false;
1400 }
1401 return true;
1402}
1403
1405 const MachineRegisterInfo &MRI,
1406 bool AllowFP, bool AllowOpaqueConstants) {
1407 if (isConstantScalar(MI, MRI, AllowFP, AllowOpaqueConstants))
1408 return true;
1409
1410 if (!isBuildVectorOp(MI.getOpcode()))
1411 return false;
1412
1413 const unsigned NumOps = MI.getNumOperands();
1414 for (unsigned I = 1; I != NumOps; ++I) {
1415 const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg());
1416 if (!isConstantScalar(*ElementDef, MRI, AllowFP, AllowOpaqueConstants))
1417 return false;
1418 }
1419
1420 return true;
1421}
1422
1423std::optional<APInt>
1425 const MachineRegisterInfo &MRI) {
1426 Register Def = MI.getOperand(0).getReg();
1427 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1428 return C->Value;
1429 auto MaybeCst = getIConstantSplatSExtVal(MI, MRI);
1430 if (!MaybeCst)
1431 return std::nullopt;
1432 const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1433 return APInt(ScalarSize, *MaybeCst, true);
1434}
1435
1437 const MachineRegisterInfo &MRI, bool AllowUndefs) {
1438 switch (MI.getOpcode()) {
1439 case TargetOpcode::G_IMPLICIT_DEF:
1440 return AllowUndefs;
1441 case TargetOpcode::G_CONSTANT:
1442 return MI.getOperand(1).getCImm()->isNullValue();
1443 case TargetOpcode::G_FCONSTANT: {
1444 const ConstantFP *FPImm = MI.getOperand(1).getFPImm();
1445 return FPImm->isZero() && !FPImm->isNegative();
1446 }
1447 default:
1448 if (!AllowUndefs) // TODO: isBuildVectorAllZeros assumes undef is OK already
1449 return false;
1450 return isBuildVectorAllZeros(MI, MRI);
1451 }
1452}
1453
1455 const MachineRegisterInfo &MRI,
1456 bool AllowUndefs) {
1457 switch (MI.getOpcode()) {
1458 case TargetOpcode::G_IMPLICIT_DEF:
1459 return AllowUndefs;
1460 case TargetOpcode::G_CONSTANT:
1461 return MI.getOperand(1).getCImm()->isAllOnesValue();
1462 default:
1463 if (!AllowUndefs) // TODO: isBuildVectorAllOnes assumes undef is OK already
1464 return false;
1465 return isBuildVectorAllOnes(MI, MRI);
1466 }
1467}
1468
1470 const MachineRegisterInfo &MRI, Register Reg,
1471 std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1472
1473 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1474 if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1475 return Match(nullptr);
1476
1477 // TODO: Also handle fconstant
1478 if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1479 return Match(Def->getOperand(1).getCImm());
1480
1481 if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1482 return false;
1483
1484 for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1485 Register SrcElt = Def->getOperand(I).getReg();
1486 const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1487 if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1488 if (!Match(nullptr))
1489 return false;
1490 continue;
1491 }
1492
1493 if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1494 !Match(SrcDef->getOperand(1).getCImm()))
1495 return false;
1496 }
1497
1498 return true;
1499}
1500
1501bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1502 bool IsFP) {
1503 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1504 case TargetLowering::UndefinedBooleanContent:
1505 return Val & 0x1;
1506 case TargetLowering::ZeroOrOneBooleanContent:
1507 return Val == 1;
1508 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1509 return Val == -1;
1510 }
1511 llvm_unreachable("Invalid boolean contents");
1512}
1513
1514bool llvm::isConstFalseVal(const TargetLowering &TLI, int64_t Val,
1515 bool IsVector, bool IsFP) {
1516 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1517 case TargetLowering::UndefinedBooleanContent:
1518 return ~Val & 0x1;
1519 case TargetLowering::ZeroOrOneBooleanContent:
1520 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1521 return Val == 0;
1522 }
1523 llvm_unreachable("Invalid boolean contents");
1524}
1525
1526int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1527 bool IsFP) {
1528 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1529 case TargetLowering::UndefinedBooleanContent:
1530 case TargetLowering::ZeroOrOneBooleanContent:
1531 return 1;
1532 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1533 return -1;
1534 }
1535 llvm_unreachable("Invalid boolean contents");
1536}
1537
1540 const auto &F = MBB.getParent()->getFunction();
1541 return F.hasOptSize() || F.hasMinSize() ||
1543}
1544
1546 LostDebugLocObserver *LocObserver,
1547 SmallInstListTy &DeadInstChain) {
1548 for (MachineOperand &Op : MI.uses()) {
1549 if (Op.isReg() && Op.getReg().isVirtual())
1550 DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1551 }
1552 LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1553 DeadInstChain.remove(&MI);
1554 MI.eraseFromParent();
1555 if (LocObserver)
1556 LocObserver->checkpoint(false);
1557}
1558
1561 LostDebugLocObserver *LocObserver) {
1562 SmallInstListTy DeadInstChain;
1563 for (MachineInstr *MI : DeadInstrs)
1564 saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1565
1566 while (!DeadInstChain.empty()) {
1567 MachineInstr *Inst = DeadInstChain.pop_back_val();
1568 if (!isTriviallyDead(*Inst, MRI))
1569 continue;
1570 saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1571 }
1572}
1573
1575 LostDebugLocObserver *LocObserver) {
1576 return eraseInstrs({&MI}, MRI, LocObserver);
1577}
1578
1580 for (auto &Def : MI.defs()) {
1581 assert(Def.isReg() && "Must be a reg");
1582
1584 for (auto &MOUse : MRI.use_operands(Def.getReg())) {
1585 MachineInstr *DbgValue = MOUse.getParent();
1586 // Ignore partially formed DBG_VALUEs.
1587 if (DbgValue->isNonListDebugValue() && DbgValue->getNumOperands() == 4) {
1588 DbgUsers.push_back(&MOUse);
1589 }
1590 }
1591
1592 if (!DbgUsers.empty()) {
1594 }
1595 }
1596}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
unsigned RegSize
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
basic Basic Alias true
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static void reportGISelDiagnostic(DiagnosticSeverity Severity, MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Definition: Utils.cpp:249
static bool isBuildVectorOp(unsigned Opcode)
Definition: Utils.cpp:1236
static bool isConstantScalar(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Definition: Utils.cpp:1367
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Size
This contains common code to allow clients to notify changes to machine instr.
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Tracks DebugLocs between checkpoints and verifies that they are transferred.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
===- MachineOptimizationRemarkEmitter.h - Opt Diagnostics -*- C++ -*-—===//
unsigned const TargetRegisterInfo * TRI
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
static const char PassName[]
BinaryOperator * Mul
Class recording the (high level) value of a variable.
opStatus divide(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:1069
void copySign(const APFloat &RHS)
Definition: APFloat.h:1163
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition: APFloat.cpp:5196
opStatus subtract(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:1051
opStatus add(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:1042
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
Definition: APFloat.h:1193
opStatus multiply(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:1060
APInt bitcastToAPInt() const
Definition: APFloat.h:1210
opStatus mod(const APFloat &RHS)
Definition: APFloat.h:1087
Class for arbitrary precision integers.
Definition: APInt.h:76
APInt udiv(const APInt &RHS) const
Unsigned division operation.
Definition: APInt.cpp:1579
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:981
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:1002
APInt trunc(unsigned width) const
Truncate to new width.
Definition: APInt.cpp:906
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition: APInt.cpp:1672
APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
Definition: APInt.cpp:1650
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition: APInt.h:805
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition: APInt.cpp:1742
APInt sext(unsigned width) const
Sign extend to a new width.
Definition: APInt.cpp:954
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:418
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:829
Represent the analysis usage information of a pass.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:260
const APFloat & getValueAPF() const
Definition: Constants.h:296
bool isNegative() const
Return true if the sign bit is set.
Definition: Constants.h:303
bool isZero() const
Return true if the value is positive or negative zero.
Definition: Constants.h:300
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:137
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:296
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition: TypeSize.h:302
Represents a G_BUILD_VECTOR.
Abstract class that contains various methods for clients to notify about changes.
KnownBits getKnownBits(Register R)
void insert(MachineInstr *I)
Add the specified instruction to the worklist if it isn't already in it.
Definition: GISelWorkList.h:74
MachineInstr * pop_back_val()
bool empty() const
Definition: GISelWorkList.h:38
void remove(const MachineInstr *I)
Remove I from the worklist if it exists.
Definition: GISelWorkList.h:83
Register getSourceReg(unsigned I) const
Returns the I'th source register.
unsigned getNumSources() const
Returns the number of source registers.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:655
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:174
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:259
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
Definition: LowLevelType.h:56
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr bool isValid() const
Definition: LowLevelType.h:137
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:151
constexpr bool isVector() const
Definition: LowLevelType.h:147
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:162
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:185
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:282
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:176
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelType.h:92
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
Definition: LowLevelType.h:170
constexpr LLT getScalarType() const
Definition: LowLevelType.h:200
static constexpr LLT scalarOrVector(ElementCount EC, LLT ScalarTy)
Definition: LowLevelType.h:116
void checkpoint(bool CheckDebugLocs=true)
Call this to indicate that it's a good point to assess whether locations have been lost.
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:230
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
MachineFunctionProperties & set(Property P)
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
GISelChangeObserver * getObserver() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
Helper class to build MachineInstr.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:68
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:543
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:326
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
Definition: MachineInstr.h:376
iterator_range< mop_iterator > uses()
Returns a range that includes all operands that are register uses.
Definition: MachineInstr.h:707
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:472
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:553
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setReg(Register Reg)
Change the register this operand corresponds to.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
Diagnostic information for missed-optimization remarks.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Analysis providing profile information.
Represents a value which can be a Register or a constant.
Definition: Utils.h:385
Holds all the information related to register banks.
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
This class implements the register bank concept.
Definition: RegisterBank.h:28
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetInstrInfo - Interface to description of machine instruction set.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:78
Target-Independent Code Generator Pass Configuration Options.
bool isGlobalISelAbortEnabled() const
Check whether or not GlobalISel should abort on error.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
LLVM Value Representation.
Definition: Value.h:74
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:187
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition: TypeSize.h:243
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
Definition: APInt.h:2171
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
Definition: APInt.h:2176
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition: APInt.h:2181
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
Definition: APInt.h:2186
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
SpecificConstantMatch m_SpecificICst(int64_t RequestedValue)
Matches a constant equal to RequestedValue.
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
DiagnosticInfoMIROptimization::MachineArgument MNV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII, MCRegister PhysReg, const TargetRegisterClass &RC, const DebugLoc &DL, LLT RegTy=LLT())
Return a virtual register corresponding to the incoming argument register PhysReg.
Definition: Utils.cpp:880
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:456
bool isBuildVectorAllZeros(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Definition: Utils.cpp:1341
Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
Definition: Utils.cpp:54
MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
Definition: Utils.cpp:626
const ConstantFP * getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:439
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
Definition: Utils.cpp:293
std::optional< APFloat > ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:954
std::optional< APInt > getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:1301
bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
Definition: Utils.cpp:1454
const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
std::optional< APFloat > ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:711
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1579
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:153
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
std::optional< APInt > ConstantFoldExtOp(unsigned Opcode, const Register Op1, uint64_t Imm, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:913
std::optional< RegOrConstant > getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:1354
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 maximum semantics.
Definition: APFloat.h:1430
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to have exactly one bit set when defined.
std::optional< APInt > isConstantOrConstantSplatVector(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a constant integer or a splat vector of constant integers.
Definition: Utils.cpp:1424
bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
Definition: Utils.cpp:1436
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
Definition: Utils.cpp:466
bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg, std::function< bool(const Constant *ConstVal)> Match, bool AllowUndefs=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant G_B...
Definition: Utils.cpp:1469
bool isPreISelGenericOptimizationHint(unsigned Opcode)
Definition: TargetOpcodes.h:42
bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Returns true if given the TargetLowering's boolean contents information, the value Val contains a tru...
Definition: Utils.cpp:1501
LLVM_READNONE LLT getLCMType(LLT OrigTy, LLT TargetTy)
Return the least common multiple type of OrigTy and TargetTy, by changing the number of vector elemen...
Definition: Utils.cpp:1073
std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition: Utils.cpp:305
std::optional< APInt > ConstantFoldBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:645
bool shouldOptForSize(const MachineBasicBlock &MBB, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
Returns true if the given block should be optimized for size.
Definition: Utils.cpp:1538
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1738
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE maxNum semantics.
Definition: APFloat.h:1406
bool isConstantOrConstantVector(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Return true if the specified instruction is known to be a constant, or a vector of constants.
Definition: Utils.cpp:1404
bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI)
Check if DstReg can be replaced with SrcReg depending on the register constraints.
Definition: Utils.cpp:199
void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver, SmallInstListTy &DeadInstChain)
Definition: Utils.cpp:1545
void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition: Utils.cpp:273
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
std::optional< ValueAndVReg > getAnyConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true, bool LookThroughAnyExt=false)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT or G_FCONST...
Definition: Utils.cpp:420
bool isBuildVectorAllOnes(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Definition: Utils.cpp:1347
SmallVector< APInt > ConstantFoldVectorBinop(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Tries to constant fold a vector binop with sources Op1 and Op2.
Definition: Utils.cpp:765
std::optional< FPValueAndVReg > getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI, bool AllowUndef=true)
Returns a floating point scalar constant of a build vector splat if it exists.
Definition: Utils.cpp:1334
std::optional< APInt > ConstantFoldCastOp(unsigned Opcode, LLT DstTy, const Register Op0, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:930
void extractParts(Register Reg, LLT Ty, int NumParts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Helper function to split a wide generic register into bitwise blocks with the given Type (which impli...
Definition: Utils.cpp:480
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
Definition: Utils.cpp:1069
LLVM_READNONE LLT getCoverTy(LLT OrigTy, LLT TargetTy)
Return smallest type that covers both OrigTy and TargetTy and is multiple of TargetTy.
Definition: Utils.cpp:1140
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE minNum semantics.
Definition: APFloat.h:1395
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
bool isTargetSpecificOpcode(unsigned Opcode)
Check whether the given Opcode is a target-specific opcode.
Definition: TargetOpcodes.h:36
std::optional< FPValueAndVReg > getFConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_FCONSTANT returns it...
Definition: Utils.cpp:428
bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Definition: Utils.cpp:1514
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition: Utils.cpp:632
bool isBuildVectorConstantSplat(const Register Reg, const MachineRegisterInfo &MRI, int64_t SplatValue, bool AllowUndef)
Return true if the specified register is defined by G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all ...
Definition: Utils.cpp:1285
void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
Definition: Utils.cpp:1574
DiagnosticSeverity
Defines the different supported severity of a diagnostic.
@ DS_Warning
@ DS_Error
Register constrainRegToClass(MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, Register Reg, const TargetRegisterClass &RegClass)
Try to constrain Reg to the specified register class.
Definition: Utils.cpp:44
int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP)
Returns an integer representing true, as defined by the TargetBooleanContents.
Definition: Utils.cpp:1526
std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition: Utils.cpp:414
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1758
bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI)
Returns true if Val can be assumed to never be a signaling NaN.
Definition: Utils.h:328
std::optional< DefinitionAndSourceRegister > getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, and underlying value Register folding away any copies.
Definition: Utils.cpp:447
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:212
void eraseInstrs(ArrayRef< MachineInstr * > DeadInstrs, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
Definition: Utils.cpp:1559
void salvageDebugInfoForDbgValue(const MachineRegisterInfo &MRI, MachineInstr &MI, ArrayRef< MachineOperand * > DbgUsers)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
bool isKnownNeverNaN(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the source register for Reg, folding away any trivial copies.
Definition: Utils.cpp:473
LLVM_READNONE LLT getGCDType(LLT OrigTy, LLT TargetTy)
Return a type where the total size is the greatest common divisor of OrigTy and TargetTy.
Definition: Utils.cpp:1161
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 minimum semantics.
Definition: APFloat.h:1417
std::optional< int64_t > getIConstantSplatSExtVal(const Register Reg, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:1319
void extractVectorParts(Register Reg, unsigned NumElts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Version which handles irregular sub-vector splits.
Definition: Utils.cpp:584
std::optional< SmallVector< unsigned > > ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI)
Tries to constant fold a G_CTLZ operation on Src.
Definition: Utils.cpp:967
int getSplatIndex(ArrayRef< int > Mask)
If all non-negative Mask elements are the same value, return that value.
bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...
Definition: Utils.cpp:220
Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
Definition: Utils.cpp:863
void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel warning as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition: Utils.cpp:267
#define MORE()
Definition: regcomp.c:252
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Simple struct used to hold a Register value and the instruction which defines it.
Definition: Utils.h:224
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
Definition: KnownBits.h:280
unsigned countMinPopulation() const
Returns the number of bits known to be one.
Definition: KnownBits.h:277
This class contains a discriminated union of information about pointers in memory operands,...
int64_t Offset
Offset - This is an offset from the base Value*.
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
Simple struct used to hold a constant integer value and a virtual register.
Definition: Utils.h:183