LLVM 19.0.0git
Utils.cpp
Go to the documentation of this file.
1//===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file This file implements the utility functions used by the GlobalISel
9/// pipeline.
10//===----------------------------------------------------------------------===//
11
13#include "llvm/ADT/APFloat.h"
14#include "llvm/ADT/APInt.h"
35#include "llvm/IR/Constants.h"
38#include <numeric>
39#include <optional>
40
41#define DEBUG_TYPE "globalisel-utils"
42
43using namespace llvm;
44using namespace MIPatternMatch;
45
47 const TargetInstrInfo &TII,
48 const RegisterBankInfo &RBI, Register Reg,
49 const TargetRegisterClass &RegClass) {
50 if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
51 return MRI.createVirtualRegister(&RegClass);
52
53 return Reg;
54}
55
57 const MachineFunction &MF, const TargetRegisterInfo &TRI,
59 const RegisterBankInfo &RBI, MachineInstr &InsertPt,
60 const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
61 Register Reg = RegMO.getReg();
62 // Assume physical registers are properly constrained.
63 assert(Reg.isVirtual() && "PhysReg not implemented");
64
65 // Save the old register class to check whether
66 // the change notifications will be required.
67 // TODO: A better approach would be to pass
68 // the observers to constrainRegToClass().
69 auto *OldRegClass = MRI.getRegClassOrNull(Reg);
70 Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
71 // If we created a new virtual register because the class is not compatible
72 // then create a copy between the new and the old register.
73 if (ConstrainedReg != Reg) {
74 MachineBasicBlock::iterator InsertIt(&InsertPt);
75 MachineBasicBlock &MBB = *InsertPt.getParent();
76 // FIXME: The copy needs to have the classes constrained for its operands.
77 // Use operand's regbank to get the class for old register (Reg).
78 if (RegMO.isUse()) {
79 BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
80 TII.get(TargetOpcode::COPY), ConstrainedReg)
81 .addReg(Reg);
82 } else {
83 assert(RegMO.isDef() && "Must be a definition");
84 BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
85 TII.get(TargetOpcode::COPY), Reg)
86 .addReg(ConstrainedReg);
87 }
88 if (GISelChangeObserver *Observer = MF.getObserver()) {
89 Observer->changingInstr(*RegMO.getParent());
90 }
91 RegMO.setReg(ConstrainedReg);
92 if (GISelChangeObserver *Observer = MF.getObserver()) {
93 Observer->changedInstr(*RegMO.getParent());
94 }
95 } else if (OldRegClass != MRI.getRegClassOrNull(Reg)) {
96 if (GISelChangeObserver *Observer = MF.getObserver()) {
97 if (!RegMO.isDef()) {
98 MachineInstr *RegDef = MRI.getVRegDef(Reg);
99 Observer->changedInstr(*RegDef);
100 }
101 Observer->changingAllUsesOfReg(MRI, Reg);
102 Observer->finishedChangingAllUsesOfReg();
103 }
104 }
105 return ConstrainedReg;
106}
107
109 const MachineFunction &MF, const TargetRegisterInfo &TRI,
111 const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
112 MachineOperand &RegMO, unsigned OpIdx) {
113 Register Reg = RegMO.getReg();
114 // Assume physical registers are properly constrained.
115 assert(Reg.isVirtual() && "PhysReg not implemented");
116
117 const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx, &TRI, MF);
118 // Some of the target independent instructions, like COPY, may not impose any
119 // register class constraints on some of their operands: If it's a use, we can
120 // skip constraining as the instruction defining the register would constrain
121 // it.
122
123 if (OpRC) {
124 // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
125 // can have multiple regbanks for a superclass that combine different
126 // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
127 // resolved by targets during regbankselect should not be overridden.
128 if (const auto *SubRC = TRI.getCommonSubClass(
129 OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))
130 OpRC = SubRC;
131
132 OpRC = TRI.getAllocatableClass(OpRC);
133 }
134
135 if (!OpRC) {
136 assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
137 "Register class constraint is required unless either the "
138 "instruction is target independent or the operand is a use");
139 // FIXME: Just bailing out like this here could be not enough, unless we
140 // expect the users of this function to do the right thing for PHIs and
141 // COPY:
142 // v1 = COPY v0
143 // v2 = COPY v1
144 // v1 here may end up not being constrained at all. Please notice that to
145 // reproduce the issue we likely need a destination pattern of a selection
146 // rule producing such extra copies, not just an input GMIR with them as
147 // every existing target using selectImpl handles copies before calling it
148 // and they never reach this function.
149 return Reg;
150 }
151 return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC,
152 RegMO);
153}
154
156 const TargetInstrInfo &TII,
157 const TargetRegisterInfo &TRI,
158 const RegisterBankInfo &RBI) {
159 assert(!isPreISelGenericOpcode(I.getOpcode()) &&
160 "A selected instruction is expected");
161 MachineBasicBlock &MBB = *I.getParent();
164
165 for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
166 MachineOperand &MO = I.getOperand(OpI);
167
168 // There's nothing to be done on non-register operands.
169 if (!MO.isReg())
170 continue;
171
172 LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
173 assert(MO.isReg() && "Unsupported non-reg operand");
174
175 Register Reg = MO.getReg();
176 // Physical registers don't need to be constrained.
177 if (Reg.isPhysical())
178 continue;
179
180 // Register operands with a value of 0 (e.g. predicate operands) don't need
181 // to be constrained.
182 if (Reg == 0)
183 continue;
184
185 // If the operand is a vreg, we should constrain its regclass, and only
186 // insert COPYs if that's impossible.
187 // constrainOperandRegClass does that for us.
188 constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
189
190 // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
191 // done.
192 if (MO.isUse()) {
193 int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
194 if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
195 I.tieOperands(DefIdx, OpI);
196 }
197 }
198 return true;
199}
200
203 // Give up if either DstReg or SrcReg is a physical register.
204 if (DstReg.isPhysical() || SrcReg.isPhysical())
205 return false;
206 // Give up if the types don't match.
207 if (MRI.getType(DstReg) != MRI.getType(SrcReg))
208 return false;
209 // Replace if either DstReg has no constraints or the register
210 // constraints match.
211 const auto &DstRBC = MRI.getRegClassOrRegBank(DstReg);
212 if (!DstRBC || DstRBC == MRI.getRegClassOrRegBank(SrcReg))
213 return true;
214
215 // Otherwise match if the Src is already a regclass that is covered by the Dst
216 // RegBank.
217 return DstRBC.is<const RegisterBank *>() && MRI.getRegClassOrNull(SrcReg) &&
218 DstRBC.get<const RegisterBank *>()->covers(
219 *MRI.getRegClassOrNull(SrcReg));
220}
221
223 const MachineRegisterInfo &MRI) {
224 // FIXME: This logical is mostly duplicated with
225 // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
226 // MachineInstr::isLabel?
227
228 // Don't delete frame allocation labels.
229 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
230 return false;
231 // LIFETIME markers should be preserved even if they seem dead.
232 if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
233 MI.getOpcode() == TargetOpcode::LIFETIME_END)
234 return false;
235
236 // If we can move an instruction, we can remove it. Otherwise, it has
237 // a side-effect of some sort.
238 bool SawStore = false;
239 if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
240 return false;
241
242 // Instructions without side-effects are dead iff they only define dead vregs.
243 for (const auto &MO : MI.all_defs()) {
244 Register Reg = MO.getReg();
245 if (Reg.isPhysical() || !MRI.use_nodbg_empty(Reg))
246 return false;
247 }
248 return true;
249}
250
252 MachineFunction &MF,
253 const TargetPassConfig &TPC,
256 bool IsFatal = Severity == DS_Error &&
258 // Print the function name explicitly if we don't have a debug location (which
259 // makes the diagnostic less useful) or if we're going to emit a raw error.
260 if (!R.getLocation().isValid() || IsFatal)
261 R << (" (in function: " + MF.getName() + ")").str();
262
263 if (IsFatal)
264 report_fatal_error(Twine(R.getMsg()));
265 else
266 MORE.emit(R);
267}
268
273}
274
278 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
279 reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
280}
281
284 const char *PassName, StringRef Msg,
285 const MachineInstr &MI) {
286 MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
287 MI.getDebugLoc(), MI.getParent());
288 R << Msg;
289 // Printing MI is expensive; only do it if expensive remarks are enabled.
290 if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
291 R << ": " << ore::MNV("Inst", MI);
292 reportGISelFailure(MF, TPC, MORE, R);
293}
294
295std::optional<APInt> llvm::getIConstantVRegVal(Register VReg,
296 const MachineRegisterInfo &MRI) {
297 std::optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
298 VReg, MRI, /*LookThroughInstrs*/ false);
299 assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
300 "Value found while looking through instrs");
301 if (!ValAndVReg)
302 return std::nullopt;
303 return ValAndVReg->Value;
304}
305
306std::optional<int64_t>
308 std::optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
309 if (Val && Val->getBitWidth() <= 64)
310 return Val->getSExtValue();
311 return std::nullopt;
312}
313
314namespace {
315
316typedef std::function<bool(const MachineInstr *)> IsOpcodeFn;
317typedef std::function<std::optional<APInt>(const MachineInstr *MI)> GetAPCstFn;
318
319std::optional<ValueAndVReg> getConstantVRegValWithLookThrough(
320 Register VReg, const MachineRegisterInfo &MRI, IsOpcodeFn IsConstantOpcode,
321 GetAPCstFn getAPCstValue, bool LookThroughInstrs = true,
322 bool LookThroughAnyExt = false) {
325
326 while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
327 LookThroughInstrs) {
328 switch (MI->getOpcode()) {
329 case TargetOpcode::G_ANYEXT:
330 if (!LookThroughAnyExt)
331 return std::nullopt;
332 [[fallthrough]];
333 case TargetOpcode::G_TRUNC:
334 case TargetOpcode::G_SEXT:
335 case TargetOpcode::G_ZEXT:
336 SeenOpcodes.push_back(std::make_pair(
337 MI->getOpcode(),
338 MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
339 VReg = MI->getOperand(1).getReg();
340 break;
341 case TargetOpcode::COPY:
342 VReg = MI->getOperand(1).getReg();
343 if (VReg.isPhysical())
344 return std::nullopt;
345 break;
346 case TargetOpcode::G_INTTOPTR:
347 VReg = MI->getOperand(1).getReg();
348 break;
349 default:
350 return std::nullopt;
351 }
352 }
353 if (!MI || !IsConstantOpcode(MI))
354 return std::nullopt;
355
356 std::optional<APInt> MaybeVal = getAPCstValue(MI);
357 if (!MaybeVal)
358 return std::nullopt;
359 APInt &Val = *MaybeVal;
360 for (auto [Opcode, Size] : reverse(SeenOpcodes)) {
361 switch (Opcode) {
362 case TargetOpcode::G_TRUNC:
363 Val = Val.trunc(Size);
364 break;
365 case TargetOpcode::G_ANYEXT:
366 case TargetOpcode::G_SEXT:
367 Val = Val.sext(Size);
368 break;
369 case TargetOpcode::G_ZEXT:
370 Val = Val.zext(Size);
371 break;
372 }
373 }
374
375 return ValueAndVReg{Val, VReg};
376}
377
378bool isIConstant(const MachineInstr *MI) {
379 if (!MI)
380 return false;
381 return MI->getOpcode() == TargetOpcode::G_CONSTANT;
382}
383
384bool isFConstant(const MachineInstr *MI) {
385 if (!MI)
386 return false;
387 return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
388}
389
390bool isAnyConstant(const MachineInstr *MI) {
391 if (!MI)
392 return false;
393 unsigned Opc = MI->getOpcode();
394 return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
395}
396
397std::optional<APInt> getCImmAsAPInt(const MachineInstr *MI) {
398 const MachineOperand &CstVal = MI->getOperand(1);
399 if (CstVal.isCImm())
400 return CstVal.getCImm()->getValue();
401 return std::nullopt;
402}
403
404std::optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) {
405 const MachineOperand &CstVal = MI->getOperand(1);
406 if (CstVal.isCImm())
407 return CstVal.getCImm()->getValue();
408 if (CstVal.isFPImm())
409 return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
410 return std::nullopt;
411}
412
413} // end anonymous namespace
414
416 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
417 return getConstantVRegValWithLookThrough(VReg, MRI, isIConstant,
418 getCImmAsAPInt, LookThroughInstrs);
419}
420
422 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
423 bool LookThroughAnyExt) {
424 return getConstantVRegValWithLookThrough(
425 VReg, MRI, isAnyConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs,
426 LookThroughAnyExt);
427}
428
429std::optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
430 Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
431 auto Reg = getConstantVRegValWithLookThrough(
432 VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs);
433 if (!Reg)
434 return std::nullopt;
436 Reg->VReg};
437}
438
439const ConstantFP *
441 MachineInstr *MI = MRI.getVRegDef(VReg);
442 if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
443 return nullptr;
444 return MI->getOperand(1).getFPImm();
445}
446
447std::optional<DefinitionAndSourceRegister>
449 Register DefSrcReg = Reg;
450 auto *DefMI = MRI.getVRegDef(Reg);
451 auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
452 if (!DstTy.isValid())
453 return std::nullopt;
454 unsigned Opc = DefMI->getOpcode();
455 while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
456 Register SrcReg = DefMI->getOperand(1).getReg();
457 auto SrcTy = MRI.getType(SrcReg);
458 if (!SrcTy.isValid())
459 break;
460 DefMI = MRI.getVRegDef(SrcReg);
461 DefSrcReg = SrcReg;
462 Opc = DefMI->getOpcode();
463 }
464 return DefinitionAndSourceRegister{DefMI, DefSrcReg};
465}
466
468 const MachineRegisterInfo &MRI) {
469 std::optional<DefinitionAndSourceRegister> DefSrcReg =
471 return DefSrcReg ? DefSrcReg->MI : nullptr;
472}
473
475 const MachineRegisterInfo &MRI) {
476 std::optional<DefinitionAndSourceRegister> DefSrcReg =
478 return DefSrcReg ? DefSrcReg->Reg : Register();
479}
480
481void llvm::extractParts(Register Reg, LLT Ty, int NumParts,
483 MachineIRBuilder &MIRBuilder,
485 for (int i = 0; i < NumParts; ++i)
486 VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
487 MIRBuilder.buildUnmerge(VRegs, Reg);
488}
489
490bool llvm::extractParts(Register Reg, LLT RegTy, LLT MainTy, LLT &LeftoverTy,
492 SmallVectorImpl<Register> &LeftoverRegs,
493 MachineIRBuilder &MIRBuilder,
495 assert(!LeftoverTy.isValid() && "this is an out argument");
496
497 unsigned RegSize = RegTy.getSizeInBits();
498 unsigned MainSize = MainTy.getSizeInBits();
499 unsigned NumParts = RegSize / MainSize;
500 unsigned LeftoverSize = RegSize - NumParts * MainSize;
501
502 // Use an unmerge when possible.
503 if (LeftoverSize == 0) {
504 for (unsigned I = 0; I < NumParts; ++I)
505 VRegs.push_back(MRI.createGenericVirtualRegister(MainTy));
506 MIRBuilder.buildUnmerge(VRegs, Reg);
507 return true;
508 }
509
510 // Try to use unmerge for irregular vector split where possible
511 // For example when splitting a <6 x i32> into <4 x i32> with <2 x i32>
512 // leftover, it becomes:
513 // <2 x i32> %2, <2 x i32>%3, <2 x i32> %4 = G_UNMERGE_VALUE <6 x i32> %1
514 // <4 x i32> %5 = G_CONCAT_VECTOR <2 x i32> %2, <2 x i32> %3
515 if (RegTy.isVector() && MainTy.isVector()) {
516 unsigned RegNumElts = RegTy.getNumElements();
517 unsigned MainNumElts = MainTy.getNumElements();
518 unsigned LeftoverNumElts = RegNumElts % MainNumElts;
519 // If can unmerge to LeftoverTy, do it
520 if (MainNumElts % LeftoverNumElts == 0 &&
521 RegNumElts % LeftoverNumElts == 0 &&
522 RegTy.getScalarSizeInBits() == MainTy.getScalarSizeInBits() &&
523 LeftoverNumElts > 1) {
524 LeftoverTy =
525 LLT::fixed_vector(LeftoverNumElts, RegTy.getScalarSizeInBits());
526
527 // Unmerge the SrcReg to LeftoverTy vectors
528 SmallVector<Register, 4> UnmergeValues;
529 extractParts(Reg, LeftoverTy, RegNumElts / LeftoverNumElts, UnmergeValues,
530 MIRBuilder, MRI);
531
532 // Find how many LeftoverTy makes one MainTy
533 unsigned LeftoverPerMain = MainNumElts / LeftoverNumElts;
534 unsigned NumOfLeftoverVal =
535 ((RegNumElts % MainNumElts) / LeftoverNumElts);
536
537 // Create as many MainTy as possible using unmerged value
538 SmallVector<Register, 4> MergeValues;
539 for (unsigned I = 0; I < UnmergeValues.size() - NumOfLeftoverVal; I++) {
540 MergeValues.push_back(UnmergeValues[I]);
541 if (MergeValues.size() == LeftoverPerMain) {
542 VRegs.push_back(
543 MIRBuilder.buildMergeLikeInstr(MainTy, MergeValues).getReg(0));
544 MergeValues.clear();
545 }
546 }
547 // Populate LeftoverRegs with the leftovers
548 for (unsigned I = UnmergeValues.size() - NumOfLeftoverVal;
549 I < UnmergeValues.size(); I++) {
550 LeftoverRegs.push_back(UnmergeValues[I]);
551 }
552 return true;
553 }
554 }
555 // Perform irregular split. Leftover is last element of RegPieces.
556 if (MainTy.isVector()) {
557 SmallVector<Register, 8> RegPieces;
558 extractVectorParts(Reg, MainTy.getNumElements(), RegPieces, MIRBuilder,
559 MRI);
560 for (unsigned i = 0; i < RegPieces.size() - 1; ++i)
561 VRegs.push_back(RegPieces[i]);
562 LeftoverRegs.push_back(RegPieces[RegPieces.size() - 1]);
563 LeftoverTy = MRI.getType(LeftoverRegs[0]);
564 return true;
565 }
566
567 LeftoverTy = LLT::scalar(LeftoverSize);
568 // For irregular sizes, extract the individual parts.
569 for (unsigned I = 0; I != NumParts; ++I) {
570 Register NewReg = MRI.createGenericVirtualRegister(MainTy);
571 VRegs.push_back(NewReg);
572 MIRBuilder.buildExtract(NewReg, Reg, MainSize * I);
573 }
574
575 for (unsigned Offset = MainSize * NumParts; Offset < RegSize;
576 Offset += LeftoverSize) {
577 Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
578 LeftoverRegs.push_back(NewReg);
579 MIRBuilder.buildExtract(NewReg, Reg, Offset);
580 }
581
582 return true;
583}
584
585void llvm::extractVectorParts(Register Reg, unsigned NumElts,
587 MachineIRBuilder &MIRBuilder,
589 LLT RegTy = MRI.getType(Reg);
590 assert(RegTy.isVector() && "Expected a vector type");
591
592 LLT EltTy = RegTy.getElementType();
593 LLT NarrowTy = (NumElts == 1) ? EltTy : LLT::fixed_vector(NumElts, EltTy);
594 unsigned RegNumElts = RegTy.getNumElements();
595 unsigned LeftoverNumElts = RegNumElts % NumElts;
596 unsigned NumNarrowTyPieces = RegNumElts / NumElts;
597
598 // Perfect split without leftover
599 if (LeftoverNumElts == 0)
600 return extractParts(Reg, NarrowTy, NumNarrowTyPieces, VRegs, MIRBuilder,
601 MRI);
602
603 // Irregular split. Provide direct access to all elements for artifact
604 // combiner using unmerge to elements. Then build vectors with NumElts
605 // elements. Remaining element(s) will be (used to build vector) Leftover.
607 extractParts(Reg, EltTy, RegNumElts, Elts, MIRBuilder, MRI);
608
609 unsigned Offset = 0;
610 // Requested sub-vectors of NarrowTy.
611 for (unsigned i = 0; i < NumNarrowTyPieces; ++i, Offset += NumElts) {
612 ArrayRef<Register> Pieces(&Elts[Offset], NumElts);
613 VRegs.push_back(MIRBuilder.buildMergeLikeInstr(NarrowTy, Pieces).getReg(0));
614 }
615
616 // Leftover element(s).
617 if (LeftoverNumElts == 1) {
618 VRegs.push_back(Elts[Offset]);
619 } else {
620 LLT LeftoverTy = LLT::fixed_vector(LeftoverNumElts, EltTy);
621 ArrayRef<Register> Pieces(&Elts[Offset], LeftoverNumElts);
622 VRegs.push_back(
623 MIRBuilder.buildMergeLikeInstr(LeftoverTy, Pieces).getReg(0));
624 }
625}
626
628 const MachineRegisterInfo &MRI) {
630 return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
631}
632
633APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
634 if (Size == 32)
635 return APFloat(float(Val));
636 if (Size == 64)
637 return APFloat(Val);
638 if (Size != 16)
639 llvm_unreachable("Unsupported FPConstant size");
640 bool Ignored;
641 APFloat APF(Val);
642 APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
643 return APF;
644}
645
646std::optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode,
647 const Register Op1,
648 const Register Op2,
649 const MachineRegisterInfo &MRI) {
650 auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
651 if (!MaybeOp2Cst)
652 return std::nullopt;
653
654 auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
655 if (!MaybeOp1Cst)
656 return std::nullopt;
657
658 const APInt &C1 = MaybeOp1Cst->Value;
659 const APInt &C2 = MaybeOp2Cst->Value;
660 switch (Opcode) {
661 default:
662 break;
663 case TargetOpcode::G_ADD:
664 return C1 + C2;
665 case TargetOpcode::G_PTR_ADD:
666 // Types can be of different width here.
667 // Result needs to be the same width as C1, so trunc or sext C2.
668 return C1 + C2.sextOrTrunc(C1.getBitWidth());
669 case TargetOpcode::G_AND:
670 return C1 & C2;
671 case TargetOpcode::G_ASHR:
672 return C1.ashr(C2);
673 case TargetOpcode::G_LSHR:
674 return C1.lshr(C2);
675 case TargetOpcode::G_MUL:
676 return C1 * C2;
677 case TargetOpcode::G_OR:
678 return C1 | C2;
679 case TargetOpcode::G_SHL:
680 return C1 << C2;
681 case TargetOpcode::G_SUB:
682 return C1 - C2;
683 case TargetOpcode::G_XOR:
684 return C1 ^ C2;
685 case TargetOpcode::G_UDIV:
686 if (!C2.getBoolValue())
687 break;
688 return C1.udiv(C2);
689 case TargetOpcode::G_SDIV:
690 if (!C2.getBoolValue())
691 break;
692 return C1.sdiv(C2);
693 case TargetOpcode::G_UREM:
694 if (!C2.getBoolValue())
695 break;
696 return C1.urem(C2);
697 case TargetOpcode::G_SREM:
698 if (!C2.getBoolValue())
699 break;
700 return C1.srem(C2);
701 case TargetOpcode::G_SMIN:
702 return APIntOps::smin(C1, C2);
703 case TargetOpcode::G_SMAX:
704 return APIntOps::smax(C1, C2);
705 case TargetOpcode::G_UMIN:
706 return APIntOps::umin(C1, C2);
707 case TargetOpcode::G_UMAX:
708 return APIntOps::umax(C1, C2);
709 }
710
711 return std::nullopt;
712}
713
714std::optional<APFloat>
715llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
716 const Register Op2, const MachineRegisterInfo &MRI) {
717 const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
718 if (!Op2Cst)
719 return std::nullopt;
720
721 const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
722 if (!Op1Cst)
723 return std::nullopt;
724
725 APFloat C1 = Op1Cst->getValueAPF();
726 const APFloat &C2 = Op2Cst->getValueAPF();
727 switch (Opcode) {
728 case TargetOpcode::G_FADD:
729 C1.add(C2, APFloat::rmNearestTiesToEven);
730 return C1;
731 case TargetOpcode::G_FSUB:
732 C1.subtract(C2, APFloat::rmNearestTiesToEven);
733 return C1;
734 case TargetOpcode::G_FMUL:
735 C1.multiply(C2, APFloat::rmNearestTiesToEven);
736 return C1;
737 case TargetOpcode::G_FDIV:
738 C1.divide(C2, APFloat::rmNearestTiesToEven);
739 return C1;
740 case TargetOpcode::G_FREM:
741 C1.mod(C2);
742 return C1;
743 case TargetOpcode::G_FCOPYSIGN:
744 C1.copySign(C2);
745 return C1;
746 case TargetOpcode::G_FMINNUM:
747 return minnum(C1, C2);
748 case TargetOpcode::G_FMAXNUM:
749 return maxnum(C1, C2);
750 case TargetOpcode::G_FMINIMUM:
751 return minimum(C1, C2);
752 case TargetOpcode::G_FMAXIMUM:
753 return maximum(C1, C2);
754 case TargetOpcode::G_FMINNUM_IEEE:
755 case TargetOpcode::G_FMAXNUM_IEEE:
756 // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
757 // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
758 // and currently there isn't a nice wrapper in APFloat for the version with
759 // correct snan handling.
760 break;
761 default:
762 break;
763 }
764
765 return std::nullopt;
766}
767
769llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
770 const Register Op2,
771 const MachineRegisterInfo &MRI) {
772 auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
773 if (!SrcVec2)
774 return SmallVector<APInt>();
775
776 auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
777 if (!SrcVec1)
778 return SmallVector<APInt>();
779
780 SmallVector<APInt> FoldedElements;
781 for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
782 auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
783 SrcVec2->getSourceReg(Idx), MRI);
784 if (!MaybeCst)
785 return SmallVector<APInt>();
786 FoldedElements.push_back(*MaybeCst);
787 }
788 return FoldedElements;
789}
790
792 bool SNaN) {
793 const MachineInstr *DefMI = MRI.getVRegDef(Val);
794 if (!DefMI)
795 return false;
796
797 const TargetMachine& TM = DefMI->getMF()->getTarget();
798 if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
799 return true;
800
801 // If the value is a constant, we can obviously see if it is a NaN or not.
802 if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
803 return !FPVal->getValueAPF().isNaN() ||
804 (SNaN && !FPVal->getValueAPF().isSignaling());
805 }
806
807 if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
808 for (const auto &Op : DefMI->uses())
809 if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
810 return false;
811 return true;
812 }
813
814 switch (DefMI->getOpcode()) {
815 default:
816 break;
817 case TargetOpcode::G_FADD:
818 case TargetOpcode::G_FSUB:
819 case TargetOpcode::G_FMUL:
820 case TargetOpcode::G_FDIV:
821 case TargetOpcode::G_FREM:
822 case TargetOpcode::G_FSIN:
823 case TargetOpcode::G_FCOS:
824 case TargetOpcode::G_FMA:
825 case TargetOpcode::G_FMAD:
826 if (SNaN)
827 return true;
828
829 // TODO: Need isKnownNeverInfinity
830 return false;
831 case TargetOpcode::G_FMINNUM_IEEE:
832 case TargetOpcode::G_FMAXNUM_IEEE: {
833 if (SNaN)
834 return true;
835 // This can return a NaN if either operand is an sNaN, or if both operands
836 // are NaN.
837 return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
841 }
842 case TargetOpcode::G_FMINNUM:
843 case TargetOpcode::G_FMAXNUM: {
844 // Only one needs to be known not-nan, since it will be returned if the
845 // other ends up being one.
846 return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
848 }
849 }
850
851 if (SNaN) {
852 // FP operations quiet. For now, just handle the ones inserted during
853 // legalization.
854 switch (DefMI->getOpcode()) {
855 case TargetOpcode::G_FPEXT:
856 case TargetOpcode::G_FPTRUNC:
857 case TargetOpcode::G_FCANONICALIZE:
858 return true;
859 default:
860 return false;
861 }
862 }
863
864 return false;
865}
866
868 const MachinePointerInfo &MPO) {
869 auto PSV = dyn_cast_if_present<const PseudoSourceValue *>(MPO.V);
870 if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
871 MachineFrameInfo &MFI = MF.getFrameInfo();
872 return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
873 MPO.Offset);
874 }
875
876 if (const Value *V = dyn_cast_if_present<const Value *>(MPO.V)) {
877 const Module *M = MF.getFunction().getParent();
878 return V->getPointerAlignment(M->getDataLayout());
879 }
880
881 return Align(1);
882}
883
885 const TargetInstrInfo &TII,
886 MCRegister PhysReg,
887 const TargetRegisterClass &RC,
888 const DebugLoc &DL, LLT RegTy) {
889 MachineBasicBlock &EntryMBB = MF.front();
891 Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
892 if (LiveIn) {
893 MachineInstr *Def = MRI.getVRegDef(LiveIn);
894 if (Def) {
895 // FIXME: Should the verifier check this is in the entry block?
896 assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
897 return LiveIn;
898 }
899
900 // It's possible the incoming argument register and copy was added during
901 // lowering, but later deleted due to being/becoming dead. If this happens,
902 // re-insert the copy.
903 } else {
904 // The live in register was not present, so add it.
905 LiveIn = MF.addLiveIn(PhysReg, &RC);
906 if (RegTy.isValid())
907 MRI.setType(LiveIn, RegTy);
908 }
909
910 BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
911 .addReg(PhysReg);
912 if (!EntryMBB.isLiveIn(PhysReg))
913 EntryMBB.addLiveIn(PhysReg);
914 return LiveIn;
915}
916
917std::optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode,
918 const Register Op1, uint64_t Imm,
919 const MachineRegisterInfo &MRI) {
920 auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
921 if (MaybeOp1Cst) {
922 switch (Opcode) {
923 default:
924 break;
925 case TargetOpcode::G_SEXT_INREG: {
926 LLT Ty = MRI.getType(Op1);
927 return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
928 }
929 }
930 }
931 return std::nullopt;
932}
933
934std::optional<APInt> llvm::ConstantFoldCastOp(unsigned Opcode, LLT DstTy,
935 const Register Op0,
936 const MachineRegisterInfo &MRI) {
937 std::optional<APInt> Val = getIConstantVRegVal(Op0, MRI);
938 if (!Val)
939 return Val;
940
941 const unsigned DstSize = DstTy.getScalarSizeInBits();
942
943 switch (Opcode) {
944 case TargetOpcode::G_SEXT:
945 return Val->sext(DstSize);
946 case TargetOpcode::G_ZEXT:
947 case TargetOpcode::G_ANYEXT:
948 // TODO: DAG considers target preference when constant folding any_extend.
949 return Val->zext(DstSize);
950 default:
951 break;
952 }
953
954 llvm_unreachable("unexpected cast opcode to constant fold");
955}
956
957std::optional<APFloat>
958llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src,
959 const MachineRegisterInfo &MRI) {
960 assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
961 if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
962 APFloat DstVal(getFltSemanticForLLT(DstTy));
963 DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
964 APFloat::rmNearestTiesToEven);
965 return DstVal;
966 }
967 return std::nullopt;
968}
969
970std::optional<SmallVector<unsigned>>
972 std::function<unsigned(APInt)> CB) {
973 LLT Ty = MRI.getType(Src);
974 SmallVector<unsigned> FoldedCTLZs;
975 auto tryFoldScalar = [&](Register R) -> std::optional<unsigned> {
976 auto MaybeCst = getIConstantVRegVal(R, MRI);
977 if (!MaybeCst)
978 return std::nullopt;
979 return CB(*MaybeCst);
980 };
981 if (Ty.isVector()) {
982 // Try to constant fold each element.
983 auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
984 if (!BV)
985 return std::nullopt;
986 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
987 if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
988 FoldedCTLZs.emplace_back(*MaybeFold);
989 continue;
990 }
991 return std::nullopt;
992 }
993 return FoldedCTLZs;
994 }
995 if (auto MaybeCst = tryFoldScalar(Src)) {
996 FoldedCTLZs.emplace_back(*MaybeCst);
997 return FoldedCTLZs;
998 }
999 return std::nullopt;
1000}
1001
1002std::optional<SmallVector<APInt>>
1003llvm::ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2,
1004 const MachineRegisterInfo &MRI) {
1005 LLT Ty = MRI.getType(Op1);
1006 if (Ty != MRI.getType(Op2))
1007 return std::nullopt;
1008
1009 auto TryFoldScalar = [&MRI, Pred](Register LHS,
1010 Register RHS) -> std::optional<APInt> {
1011 auto LHSCst = getIConstantVRegVal(LHS, MRI);
1012 auto RHSCst = getIConstantVRegVal(RHS, MRI);
1013 if (!LHSCst || !RHSCst)
1014 return std::nullopt;
1015
1016 switch (Pred) {
1017 case CmpInst::Predicate::ICMP_EQ:
1018 return APInt(/*numBits=*/1, LHSCst->eq(*RHSCst));
1019 case CmpInst::Predicate::ICMP_NE:
1020 return APInt(/*numBits=*/1, LHSCst->ne(*RHSCst));
1021 case CmpInst::Predicate::ICMP_UGT:
1022 return APInt(/*numBits=*/1, LHSCst->ugt(*RHSCst));
1023 case CmpInst::Predicate::ICMP_UGE:
1024 return APInt(/*numBits=*/1, LHSCst->uge(*RHSCst));
1025 case CmpInst::Predicate::ICMP_ULT:
1026 return APInt(/*numBits=*/1, LHSCst->ult(*RHSCst));
1027 case CmpInst::Predicate::ICMP_ULE:
1028 return APInt(/*numBits=*/1, LHSCst->ule(*RHSCst));
1029 case CmpInst::Predicate::ICMP_SGT:
1030 return APInt(/*numBits=*/1, LHSCst->sgt(*RHSCst));
1031 case CmpInst::Predicate::ICMP_SGE:
1032 return APInt(/*numBits=*/1, LHSCst->sge(*RHSCst));
1033 case CmpInst::Predicate::ICMP_SLT:
1034 return APInt(/*numBits=*/1, LHSCst->slt(*RHSCst));
1035 case CmpInst::Predicate::ICMP_SLE:
1036 return APInt(/*numBits=*/1, LHSCst->sle(*RHSCst));
1037 default:
1038 return std::nullopt;
1039 }
1040 };
1041
1042 SmallVector<APInt> FoldedICmps;
1043
1044 if (Ty.isVector()) {
1045 // Try to constant fold each element.
1046 auto *BV1 = getOpcodeDef<GBuildVector>(Op1, MRI);
1047 auto *BV2 = getOpcodeDef<GBuildVector>(Op2, MRI);
1048 if (!BV1 || !BV2)
1049 return std::nullopt;
1050 assert(BV1->getNumSources() == BV2->getNumSources() && "Invalid vectors");
1051 for (unsigned I = 0; I < BV1->getNumSources(); ++I) {
1052 if (auto MaybeFold =
1053 TryFoldScalar(BV1->getSourceReg(I), BV2->getSourceReg(I))) {
1054 FoldedICmps.emplace_back(*MaybeFold);
1055 continue;
1056 }
1057 return std::nullopt;
1058 }
1059 return FoldedICmps;
1060 }
1061
1062 if (auto MaybeCst = TryFoldScalar(Op1, Op2)) {
1063 FoldedICmps.emplace_back(*MaybeCst);
1064 return FoldedICmps;
1065 }
1066
1067 return std::nullopt;
1068}
1069
1071 GISelKnownBits *KB) {
1072 std::optional<DefinitionAndSourceRegister> DefSrcReg =
1074 if (!DefSrcReg)
1075 return false;
1076
1077 const MachineInstr &MI = *DefSrcReg->MI;
1078 const LLT Ty = MRI.getType(Reg);
1079
1080 switch (MI.getOpcode()) {
1081 case TargetOpcode::G_CONSTANT: {
1082 unsigned BitWidth = Ty.getScalarSizeInBits();
1083 const ConstantInt *CI = MI.getOperand(1).getCImm();
1084 return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
1085 }
1086 case TargetOpcode::G_SHL: {
1087 // A left-shift of a constant one will have exactly one bit set because
1088 // shifting the bit off the end is undefined.
1089
1090 // TODO: Constant splat
1091 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1092 if (*ConstLHS == 1)
1093 return true;
1094 }
1095
1096 break;
1097 }
1098 case TargetOpcode::G_LSHR: {
1099 if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1100 if (ConstLHS->isSignMask())
1101 return true;
1102 }
1103
1104 break;
1105 }
1106 case TargetOpcode::G_BUILD_VECTOR: {
1107 // TODO: Probably should have a recursion depth guard since you could have
1108 // bitcasted vector elements.
1109 for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
1110 if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB))
1111 return false;
1112
1113 return true;
1114 }
1115 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1116 // Only handle constants since we would need to know if number of leading
1117 // zeros is greater than the truncation amount.
1118 const unsigned BitWidth = Ty.getScalarSizeInBits();
1119 for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
1120 auto Const = getIConstantVRegVal(MO.getReg(), MRI);
1121 if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
1122 return false;
1123 }
1124
1125 return true;
1126 }
1127 default:
1128 break;
1129 }
1130
1131 if (!KB)
1132 return false;
1133
1134 // More could be done here, though the above checks are enough
1135 // to handle some common cases.
1136
1137 // Fall back to computeKnownBits to catch other known cases.
1138 KnownBits Known = KB->getKnownBits(Reg);
1139 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
1140}
1141
1144}
1145
1146LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
1147 if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1148 return OrigTy;
1149
1150 if (OrigTy.isVector() && TargetTy.isVector()) {
1151 LLT OrigElt = OrigTy.getElementType();
1152 LLT TargetElt = TargetTy.getElementType();
1153
1154 // TODO: The docstring for this function says the intention is to use this
1155 // function to build MERGE/UNMERGE instructions. It won't be the case that
1156 // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1157 // could implement getLCMType between the two in the future if there was a
1158 // need, but it is not worth it now as this function should not be used in
1159 // that way.
1160 assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1161 (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1162 "getLCMType not implemented between fixed and scalable vectors.");
1163
1164 if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
1165 int GCDMinElts = std::gcd(OrigTy.getElementCount().getKnownMinValue(),
1166 TargetTy.getElementCount().getKnownMinValue());
1167 // Prefer the original element type.
1169 TargetTy.getElementCount().getKnownMinValue());
1170 return LLT::vector(Mul.divideCoefficientBy(GCDMinElts),
1171 OrigTy.getElementType());
1172 }
1173 unsigned LCM = std::lcm(OrigTy.getSizeInBits().getKnownMinValue(),
1174 TargetTy.getSizeInBits().getKnownMinValue());
1175 return LLT::vector(
1176 ElementCount::get(LCM / OrigElt.getSizeInBits(), OrigTy.isScalable()),
1177 OrigElt);
1178 }
1179
1180 // One type is scalar, one type is vector
1181 if (OrigTy.isVector() || TargetTy.isVector()) {
1182 LLT VecTy = OrigTy.isVector() ? OrigTy : TargetTy;
1183 LLT ScalarTy = OrigTy.isVector() ? TargetTy : OrigTy;
1184 LLT EltTy = VecTy.getElementType();
1185 LLT OrigEltTy = OrigTy.isVector() ? OrigTy.getElementType() : OrigTy;
1186
1187 // Prefer scalar type from OrigTy.
1188 if (EltTy.getSizeInBits() == ScalarTy.getSizeInBits())
1189 return LLT::vector(VecTy.getElementCount(), OrigEltTy);
1190
1191 // Different size scalars. Create vector with the same total size.
1192 // LCM will take fixed/scalable from VecTy.
1193 unsigned LCM = std::lcm(EltTy.getSizeInBits().getFixedValue() *
1195 ScalarTy.getSizeInBits().getFixedValue());
1196 // Prefer type from OrigTy
1197 return LLT::vector(ElementCount::get(LCM / OrigEltTy.getSizeInBits(),
1198 VecTy.getElementCount().isScalable()),
1199 OrigEltTy);
1200 }
1201
1202 // At this point, both types are scalars of different size
1203 unsigned LCM = std::lcm(OrigTy.getSizeInBits().getFixedValue(),
1204 TargetTy.getSizeInBits().getFixedValue());
1205 // Preserve pointer types.
1206 if (LCM == OrigTy.getSizeInBits())
1207 return OrigTy;
1208 if (LCM == TargetTy.getSizeInBits())
1209 return TargetTy;
1210 return LLT::scalar(LCM);
1211}
1212
1213LLT llvm::getCoverTy(LLT OrigTy, LLT TargetTy) {
1214
1215 if ((OrigTy.isScalableVector() && TargetTy.isFixedVector()) ||
1216 (OrigTy.isFixedVector() && TargetTy.isScalableVector()))
1218 "getCoverTy not implemented between fixed and scalable vectors.");
1219
1220 if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy ||
1221 (OrigTy.getScalarSizeInBits() != TargetTy.getScalarSizeInBits()))
1222 return getLCMType(OrigTy, TargetTy);
1223
1224 unsigned OrigTyNumElts = OrigTy.getElementCount().getKnownMinValue();
1225 unsigned TargetTyNumElts = TargetTy.getElementCount().getKnownMinValue();
1226 if (OrigTyNumElts % TargetTyNumElts == 0)
1227 return OrigTy;
1228
1229 unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts);
1231 OrigTy.getElementType());
1232}
1233
1234LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
1235 if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1236 return OrigTy;
1237
1238 if (OrigTy.isVector() && TargetTy.isVector()) {
1239 LLT OrigElt = OrigTy.getElementType();
1240
1241 // TODO: The docstring for this function says the intention is to use this
1242 // function to build MERGE/UNMERGE instructions. It won't be the case that
1243 // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1244 // could implement getGCDType between the two in the future if there was a
1245 // need, but it is not worth it now as this function should not be used in
1246 // that way.
1247 assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1248 (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1249 "getGCDType not implemented between fixed and scalable vectors.");
1250
1251 unsigned GCD = std::gcd(OrigTy.getSizeInBits().getKnownMinValue(),
1252 TargetTy.getSizeInBits().getKnownMinValue());
1253 if (GCD == OrigElt.getSizeInBits())
1255 OrigElt);
1256
1257 // Cannot produce original element type, but both have vscale in common.
1258 if (GCD < OrigElt.getSizeInBits())
1260 GCD);
1261
1262 return LLT::vector(
1264 OrigTy.isScalable()),
1265 OrigElt);
1266 }
1267
1268 // If one type is vector and the element size matches the scalar size, then
1269 // the gcd is the scalar type.
1270 if (OrigTy.isVector() &&
1271 OrigTy.getElementType().getSizeInBits() == TargetTy.getSizeInBits())
1272 return OrigTy.getElementType();
1273 if (TargetTy.isVector() &&
1274 TargetTy.getElementType().getSizeInBits() == OrigTy.getSizeInBits())
1275 return OrigTy;
1276
1277 // At this point, both types are either scalars of different type or one is a
1278 // vector and one is a scalar. If both types are scalars, the GCD type is the
1279 // GCD between the two scalar sizes. If one is vector and one is scalar, then
1280 // the GCD type is the GCD between the scalar and the vector element size.
1281 LLT OrigScalar = OrigTy.getScalarType();
1282 LLT TargetScalar = TargetTy.getScalarType();
1283 unsigned GCD = std::gcd(OrigScalar.getSizeInBits().getFixedValue(),
1284 TargetScalar.getSizeInBits().getFixedValue());
1285 return LLT::scalar(GCD);
1286}
1287
1289 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
1290 "Only G_SHUFFLE_VECTOR can have a splat index!");
1291 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
1292 auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
1293
1294 // If all elements are undefined, this shuffle can be considered a splat.
1295 // Return 0 for better potential for callers to simplify.
1296 if (FirstDefinedIdx == Mask.end())
1297 return 0;
1298
1299 // Make sure all remaining elements are either undef or the same
1300 // as the first non-undef value.
1301 int SplatValue = *FirstDefinedIdx;
1302 if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
1303 [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
1304 return std::nullopt;
1305
1306 return SplatValue;
1307}
1308
1309static bool isBuildVectorOp(unsigned Opcode) {
1310 return Opcode == TargetOpcode::G_BUILD_VECTOR ||
1311 Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
1312}
1313
1314namespace {
1315
1316std::optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
1317 const MachineRegisterInfo &MRI,
1318 bool AllowUndef) {
1320 if (!MI)
1321 return std::nullopt;
1322
1323 bool isConcatVectorsOp = MI->getOpcode() == TargetOpcode::G_CONCAT_VECTORS;
1324 if (!isBuildVectorOp(MI->getOpcode()) && !isConcatVectorsOp)
1325 return std::nullopt;
1326
1327 std::optional<ValueAndVReg> SplatValAndReg;
1328 for (MachineOperand &Op : MI->uses()) {
1329 Register Element = Op.getReg();
1330 // If we have a G_CONCAT_VECTOR, we recursively look into the
1331 // vectors that we're concatenating to see if they're splats.
1332 auto ElementValAndReg =
1333 isConcatVectorsOp
1334 ? getAnyConstantSplat(Element, MRI, AllowUndef)
1336
1337 // If AllowUndef, treat undef as value that will result in a constant splat.
1338 if (!ElementValAndReg) {
1339 if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
1340 continue;
1341 return std::nullopt;
1342 }
1343
1344 // Record splat value
1345 if (!SplatValAndReg)
1346 SplatValAndReg = ElementValAndReg;
1347
1348 // Different constant than the one already recorded, not a constant splat.
1349 if (SplatValAndReg->Value != ElementValAndReg->Value)
1350 return std::nullopt;
1351 }
1352
1353 return SplatValAndReg;
1354}
1355
1356} // end anonymous namespace
1357
1359 const MachineRegisterInfo &MRI,
1360 int64_t SplatValue, bool AllowUndef) {
1361 if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef))
1362 return mi_match(SplatValAndReg->VReg, MRI, m_SpecificICst(SplatValue));
1363 return false;
1364}
1365
1367 const MachineRegisterInfo &MRI,
1368 int64_t SplatValue, bool AllowUndef) {
1369 return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1370 AllowUndef);
1371}
1372
1373std::optional<APInt>
1375 if (auto SplatValAndReg =
1376 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) {
1377 if (std::optional<ValueAndVReg> ValAndVReg =
1378 getIConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI))
1379 return ValAndVReg->Value;
1380 }
1381
1382 return std::nullopt;
1383}
1384
1385std::optional<APInt>
1387 const MachineRegisterInfo &MRI) {
1388 return getIConstantSplatVal(MI.getOperand(0).getReg(), MRI);
1389}
1390
1391std::optional<int64_t>
1393 const MachineRegisterInfo &MRI) {
1394 if (auto SplatValAndReg =
1395 getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false))
1396 return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1397 return std::nullopt;
1398}
1399
1400std::optional<int64_t>
1402 const MachineRegisterInfo &MRI) {
1403 return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI);
1404}
1405
1406std::optional<FPValueAndVReg>
1408 bool AllowUndef) {
1409 if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1410 return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1411 return std::nullopt;
1412}
1413
1415 const MachineRegisterInfo &MRI,
1416 bool AllowUndef) {
1417 return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1418}
1419
1421 const MachineRegisterInfo &MRI,
1422 bool AllowUndef) {
1423 return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1424}
1425
1426std::optional<RegOrConstant>
1428 unsigned Opc = MI.getOpcode();
1429 if (!isBuildVectorOp(Opc))
1430 return std::nullopt;
1431 if (auto Splat = getIConstantSplatSExtVal(MI, MRI))
1432 return RegOrConstant(*Splat);
1433 auto Reg = MI.getOperand(1).getReg();
1434 if (any_of(drop_begin(MI.operands(), 2),
1435 [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1436 return std::nullopt;
1437 return RegOrConstant(Reg);
1438}
1439
1441 const MachineRegisterInfo &MRI,
1442 bool AllowFP = true,
1443 bool AllowOpaqueConstants = true) {
1444 switch (MI.getOpcode()) {
1445 case TargetOpcode::G_CONSTANT:
1446 case TargetOpcode::G_IMPLICIT_DEF:
1447 return true;
1448 case TargetOpcode::G_FCONSTANT:
1449 return AllowFP;
1450 case TargetOpcode::G_GLOBAL_VALUE:
1451 case TargetOpcode::G_FRAME_INDEX:
1452 case TargetOpcode::G_BLOCK_ADDR:
1453 case TargetOpcode::G_JUMP_TABLE:
1454 return AllowOpaqueConstants;
1455 default:
1456 return false;
1457 }
1458}
1459
1461 const MachineRegisterInfo &MRI) {
1462 Register Def = MI.getOperand(0).getReg();
1463 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1464 return true;
1465 GBuildVector *BV = dyn_cast<GBuildVector>(&MI);
1466 if (!BV)
1467 return false;
1468 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1470 getOpcodeDef<GImplicitDef>(BV->getSourceReg(SrcIdx), MRI))
1471 continue;
1472 return false;
1473 }
1474 return true;
1475}
1476
1478 const MachineRegisterInfo &MRI,
1479 bool AllowFP, bool AllowOpaqueConstants) {
1480 if (isConstantScalar(MI, MRI, AllowFP, AllowOpaqueConstants))
1481 return true;
1482
1483 if (!isBuildVectorOp(MI.getOpcode()))
1484 return false;
1485
1486 const unsigned NumOps = MI.getNumOperands();
1487 for (unsigned I = 1; I != NumOps; ++I) {
1488 const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg());
1489 if (!isConstantScalar(*ElementDef, MRI, AllowFP, AllowOpaqueConstants))
1490 return false;
1491 }
1492
1493 return true;
1494}
1495
1496std::optional<APInt>
1498 const MachineRegisterInfo &MRI) {
1499 Register Def = MI.getOperand(0).getReg();
1500 if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1501 return C->Value;
1502 auto MaybeCst = getIConstantSplatSExtVal(MI, MRI);
1503 if (!MaybeCst)
1504 return std::nullopt;
1505 const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1506 return APInt(ScalarSize, *MaybeCst, true);
1507}
1508
1510 const MachineRegisterInfo &MRI, bool AllowUndefs) {
1511 switch (MI.getOpcode()) {
1512 case TargetOpcode::G_IMPLICIT_DEF:
1513 return AllowUndefs;
1514 case TargetOpcode::G_CONSTANT:
1515 return MI.getOperand(1).getCImm()->isNullValue();
1516 case TargetOpcode::G_FCONSTANT: {
1517 const ConstantFP *FPImm = MI.getOperand(1).getFPImm();
1518 return FPImm->isZero() && !FPImm->isNegative();
1519 }
1520 default:
1521 if (!AllowUndefs) // TODO: isBuildVectorAllZeros assumes undef is OK already
1522 return false;
1523 return isBuildVectorAllZeros(MI, MRI);
1524 }
1525}
1526
1528 const MachineRegisterInfo &MRI,
1529 bool AllowUndefs) {
1530 switch (MI.getOpcode()) {
1531 case TargetOpcode::G_IMPLICIT_DEF:
1532 return AllowUndefs;
1533 case TargetOpcode::G_CONSTANT:
1534 return MI.getOperand(1).getCImm()->isAllOnesValue();
1535 default:
1536 if (!AllowUndefs) // TODO: isBuildVectorAllOnes assumes undef is OK already
1537 return false;
1538 return isBuildVectorAllOnes(MI, MRI);
1539 }
1540}
1541
1543 const MachineRegisterInfo &MRI, Register Reg,
1544 std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1545
1546 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1547 if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1548 return Match(nullptr);
1549
1550 // TODO: Also handle fconstant
1551 if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1552 return Match(Def->getOperand(1).getCImm());
1553
1554 if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1555 return false;
1556
1557 for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1558 Register SrcElt = Def->getOperand(I).getReg();
1559 const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1560 if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1561 if (!Match(nullptr))
1562 return false;
1563 continue;
1564 }
1565
1566 if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1567 !Match(SrcDef->getOperand(1).getCImm()))
1568 return false;
1569 }
1570
1571 return true;
1572}
1573
1574bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1575 bool IsFP) {
1576 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1577 case TargetLowering::UndefinedBooleanContent:
1578 return Val & 0x1;
1579 case TargetLowering::ZeroOrOneBooleanContent:
1580 return Val == 1;
1581 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1582 return Val == -1;
1583 }
1584 llvm_unreachable("Invalid boolean contents");
1585}
1586
1587bool llvm::isConstFalseVal(const TargetLowering &TLI, int64_t Val,
1588 bool IsVector, bool IsFP) {
1589 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1590 case TargetLowering::UndefinedBooleanContent:
1591 return ~Val & 0x1;
1592 case TargetLowering::ZeroOrOneBooleanContent:
1593 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1594 return Val == 0;
1595 }
1596 llvm_unreachable("Invalid boolean contents");
1597}
1598
1599int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1600 bool IsFP) {
1601 switch (TLI.getBooleanContents(IsVector, IsFP)) {
1602 case TargetLowering::UndefinedBooleanContent:
1603 case TargetLowering::ZeroOrOneBooleanContent:
1604 return 1;
1605 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1606 return -1;
1607 }
1608 llvm_unreachable("Invalid boolean contents");
1609}
1610
1613 const auto &F = MBB.getParent()->getFunction();
1614 return F.hasOptSize() || F.hasMinSize() ||
1616}
1617
1619 LostDebugLocObserver *LocObserver,
1620 SmallInstListTy &DeadInstChain) {
1621 for (MachineOperand &Op : MI.uses()) {
1622 if (Op.isReg() && Op.getReg().isVirtual())
1623 DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1624 }
1625 LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1626 DeadInstChain.remove(&MI);
1627 MI.eraseFromParent();
1628 if (LocObserver)
1629 LocObserver->checkpoint(false);
1630}
1631
1634 LostDebugLocObserver *LocObserver) {
1635 SmallInstListTy DeadInstChain;
1636 for (MachineInstr *MI : DeadInstrs)
1637 saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1638
1639 while (!DeadInstChain.empty()) {
1640 MachineInstr *Inst = DeadInstChain.pop_back_val();
1641 if (!isTriviallyDead(*Inst, MRI))
1642 continue;
1643 saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1644 }
1645}
1646
1648 LostDebugLocObserver *LocObserver) {
1649 return eraseInstrs({&MI}, MRI, LocObserver);
1650}
1651
1653 for (auto &Def : MI.defs()) {
1654 assert(Def.isReg() && "Must be a reg");
1655
1657 for (auto &MOUse : MRI.use_operands(Def.getReg())) {
1658 MachineInstr *DbgValue = MOUse.getParent();
1659 // Ignore partially formed DBG_VALUEs.
1660 if (DbgValue->isNonListDebugValue() && DbgValue->getNumOperands() == 4) {
1661 DbgUsers.push_back(&MOUse);
1662 }
1663 }
1664
1665 if (!DbgUsers.empty()) {
1667 }
1668 }
1669}
1670
1672 switch (Opc) {
1673 case TargetOpcode::G_FABS:
1674 case TargetOpcode::G_FADD:
1675 case TargetOpcode::G_FCANONICALIZE:
1676 case TargetOpcode::G_FCEIL:
1677 case TargetOpcode::G_FCONSTANT:
1678 case TargetOpcode::G_FCOPYSIGN:
1679 case TargetOpcode::G_FCOS:
1680 case TargetOpcode::G_FDIV:
1681 case TargetOpcode::G_FEXP2:
1682 case TargetOpcode::G_FEXP:
1683 case TargetOpcode::G_FFLOOR:
1684 case TargetOpcode::G_FLOG10:
1685 case TargetOpcode::G_FLOG2:
1686 case TargetOpcode::G_FLOG:
1687 case TargetOpcode::G_FMA:
1688 case TargetOpcode::G_FMAD:
1689 case TargetOpcode::G_FMAXIMUM:
1690 case TargetOpcode::G_FMAXNUM:
1691 case TargetOpcode::G_FMAXNUM_IEEE:
1692 case TargetOpcode::G_FMINIMUM:
1693 case TargetOpcode::G_FMINNUM:
1694 case TargetOpcode::G_FMINNUM_IEEE:
1695 case TargetOpcode::G_FMUL:
1696 case TargetOpcode::G_FNEARBYINT:
1697 case TargetOpcode::G_FNEG:
1698 case TargetOpcode::G_FPEXT:
1699 case TargetOpcode::G_FPOW:
1700 case TargetOpcode::G_FPTRUNC:
1701 case TargetOpcode::G_FREM:
1702 case TargetOpcode::G_FRINT:
1703 case TargetOpcode::G_FSIN:
1704 case TargetOpcode::G_FSQRT:
1705 case TargetOpcode::G_FSUB:
1706 case TargetOpcode::G_INTRINSIC_ROUND:
1707 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
1708 case TargetOpcode::G_INTRINSIC_TRUNC:
1709 return true;
1710 default:
1711 return false;
1712 }
1713}
1714
1715namespace {
1716enum class UndefPoisonKind {
1717 PoisonOnly = (1 << 0),
1718 UndefOnly = (1 << 1),
1720};
1721}
1722
1723[[maybe_unused]] static bool includesPoison(UndefPoisonKind Kind) {
1724 return (unsigned(Kind) & unsigned(UndefPoisonKind::PoisonOnly)) != 0;
1725}
1726
1727[[maybe_unused]] static bool includesUndef(UndefPoisonKind Kind) {
1728 return (unsigned(Kind) & unsigned(UndefPoisonKind::UndefOnly)) != 0;
1729}
1730
1732 bool ConsiderFlagsAndMetadata,
1733 UndefPoisonKind Kind) {
1734 MachineInstr *RegDef = MRI.getVRegDef(Reg);
1735
1736 switch (RegDef->getOpcode()) {
1737 case TargetOpcode::G_FREEZE:
1738 return false;
1739 default:
1740 return true;
1741 }
1742}
1743
1745 const MachineRegisterInfo &MRI,
1746 unsigned Depth,
1747 UndefPoisonKind Kind) {
1749 return false;
1750
1751 MachineInstr *RegDef = MRI.getVRegDef(Reg);
1752
1753 switch (RegDef->getOpcode()) {
1754 case TargetOpcode::G_FREEZE:
1755 return true;
1756 case TargetOpcode::G_IMPLICIT_DEF:
1757 return !includesUndef(Kind);
1758 default:
1759 return false;
1760 }
1761}
1762
1764 bool ConsiderFlagsAndMetadata) {
1765 return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,
1767}
1768
1770 bool ConsiderFlagsAndMetadata = true) {
1771 return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,
1773}
1774
1776 const MachineRegisterInfo &MRI,
1777 unsigned Depth) {
1778 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
1780}
1781
1783 const MachineRegisterInfo &MRI,
1784 unsigned Depth) {
1785 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
1787}
1788
1790 const MachineRegisterInfo &MRI,
1791 unsigned Depth) {
1792 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
1794}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
unsigned RegSize
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
basic Basic Alias true
static bool canCreateUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI, bool ConsiderFlagsAndMetadata, UndefPoisonKind Kind)
Definition: Utils.cpp:1731
static bool isGuaranteedNotToBeUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI, unsigned Depth, UndefPoisonKind Kind)
Definition: Utils.cpp:1744
static bool includesPoison(UndefPoisonKind Kind)
Definition: Utils.cpp:1723
static bool includesUndef(UndefPoisonKind Kind)
Definition: Utils.cpp:1727
static void reportGISelDiagnostic(DiagnosticSeverity Severity, MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Definition: Utils.cpp:251
bool canCreatePoison(Register Reg, const MachineRegisterInfo &MRI, bool ConsiderFlagsAndMetadata=true)
Definition: Utils.cpp:1769
static bool isBuildVectorOp(unsigned Opcode)
Definition: Utils.cpp:1309
static bool isConstantScalar(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Definition: Utils.cpp:1440
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Size
This contains common code to allow clients to notify changes to machine instr.
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Tracks DebugLocs between checkpoints and verifies that they are transferred.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
===- MachineOptimizationRemarkEmitter.h - Opt Diagnostics -*- C++ -*-—===//
unsigned const TargetRegisterInfo * TRI
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
UndefPoisonKind
static const char PassName[]
Value * RHS
Value * LHS
BinaryOperator * Mul
Class recording the (high level) value of a variable.
opStatus divide(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:1069
void copySign(const APFloat &RHS)
Definition: APFloat.h:1163
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition: APFloat.cpp:5196
opStatus subtract(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:1051
opStatus add(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:1042
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
Definition: APFloat.h:1193
opStatus multiply(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:1060
APInt bitcastToAPInt() const
Definition: APFloat.h:1210
opStatus mod(const APFloat &RHS)
Definition: APFloat.h:1087
Class for arbitrary precision integers.
Definition: APInt.h:76
APInt udiv(const APInt &RHS) const
Unsigned division operation.
Definition: APInt.cpp:1543
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:981
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:1002
APInt trunc(unsigned width) const
Truncate to new width.
Definition: APInt.cpp:906
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition: APInt.cpp:1636
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1439
APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
Definition: APInt.cpp:1614
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1010
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition: APInt.h:805
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition: APInt.cpp:1706
APInt sext(unsigned width) const
Sign extend to a new width.
Definition: APInt.cpp:954
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:418
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:829
Represent the analysis usage information of a pass.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:268
const APFloat & getValueAPF() const
Definition: Constants.h:311
bool isNegative() const
Return true if the sign bit is set.
Definition: Constants.h:318
bool isZero() const
Return true if the value is positive or negative zero.
Definition: Constants.h:315
This is the shared class of boolean and integer constants.
Definition: Constants.h:80
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:145
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:296
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Definition: TypeSize.h:302
Represents a G_BUILD_VECTOR.
Abstract class that contains various methods for clients to notify about changes.
KnownBits getKnownBits(Register R)
void insert(MachineInstr *I)
Add the specified instruction to the worklist if it isn't already in it.
Definition: GISelWorkList.h:74
MachineInstr * pop_back_val()
bool empty() const
Definition: GISelWorkList.h:38
void remove(const MachineInstr *I)
Remove I from the worklist if it exists.
Definition: GISelWorkList.h:83
Register getSourceReg(unsigned I) const
Returns the I'th source register.
unsigned getNumSources() const
Returns the number of source registers.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:182
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:267
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
Definition: LowLevelType.h:64
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr bool isValid() const
Definition: LowLevelType.h:145
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:159
constexpr bool isVector() const
Definition: LowLevelType.h:148
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:170
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:193
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:290
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:184
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelType.h:100
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
Definition: LowLevelType.h:178
constexpr LLT getScalarType() const
Definition: LowLevelType.h:208
static constexpr LLT scalarOrVector(ElementCount EC, LLT ScalarTy)
Definition: LowLevelType.h:124
void checkpoint(bool CheckDebugLocs=true)
Call this to indicate that it's a good point to assess whether locations have been lost.
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:230
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
MachineFunctionProperties & set(Property P)
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
GISelChangeObserver * getObserver() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
Helper class to build MachineInstr.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:558
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:341
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
Definition: MachineInstr.h:391
iterator_range< mop_iterator > uses()
Returns a range that includes all operands that are register uses.
Definition: MachineInstr.h:722
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:487
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:568
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setReg(Register Reg)
Change the register this operand corresponds to.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
Diagnostic information for missed-optimization remarks.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Analysis providing profile information.
Represents a value which can be a Register or a constant.
Definition: Utils.h:391
Holds all the information related to register banks.
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
This class implements the register bank concept.
Definition: RegisterBank.h:28
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetInstrInfo - Interface to description of machine instruction set.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:76
Target-Independent Code Generator Pass Configuration Options.
bool isGlobalISelAbortEnabled() const
Check whether or not GlobalISel should abort on error.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
LLVM Value Representation.
Definition: Value.h:74
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:187
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
Definition: TypeSize.h:243
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
Definition: APInt.h:2178
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
Definition: APInt.h:2183
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition: APInt.h:2188
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
Definition: APInt.h:2193
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
SpecificConstantMatch m_SpecificICst(int64_t RequestedValue)
Matches a constant equal to RequestedValue.
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
DiagnosticInfoMIROptimization::MachineArgument MNV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII, MCRegister PhysReg, const TargetRegisterClass &RC, const DebugLoc &DL, LLT RegTy=LLT())
Return a virtual register corresponding to the incoming argument register PhysReg.
Definition: Utils.cpp:884
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:456
bool isBuildVectorAllZeros(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Definition: Utils.cpp:1414
Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
Definition: Utils.cpp:56
MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
Definition: Utils.cpp:627
const ConstantFP * getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:440
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
Definition: Utils.cpp:295
std::optional< APFloat > ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:958
std::optional< APInt > getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:1374
bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
Definition: Utils.cpp:1527
const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
std::optional< APFloat > ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:715
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1652
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:155
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
std::optional< SmallVector< unsigned > > ConstantFoldCountZeros(Register Src, const MachineRegisterInfo &MRI, std::function< unsigned(APInt)> CB)
Tries to constant fold a counting-zero operation (G_CTLZ or G_CTTZ) on Src.
Definition: Utils.cpp:971
std::optional< APInt > ConstantFoldExtOp(unsigned Opcode, const Register Op1, uint64_t Imm, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:917
std::optional< RegOrConstant > getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:1427
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
Definition: APFloat.h:1436
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to have exactly one bit set when defined.
std::optional< APInt > isConstantOrConstantSplatVector(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a constant integer or a splat vector of constant integers.
Definition: Utils.cpp:1497
bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
Definition: Utils.cpp:1509
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
Definition: Utils.cpp:467
bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg, std::function< bool(const Constant *ConstVal)> Match, bool AllowUndefs=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant G_B...
Definition: Utils.cpp:1542
bool isPreISelGenericOptimizationHint(unsigned Opcode)
Definition: TargetOpcodes.h:42
bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Returns true if given the TargetLowering's boolean contents information, the value Val contains a tru...
Definition: Utils.cpp:1574
LLVM_READNONE LLT getLCMType(LLT OrigTy, LLT TargetTy)
Return the least common multiple type of OrigTy and TargetTy, by changing the number of vector elemen...
Definition: Utils.cpp:1146
std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition: Utils.cpp:307
std::optional< APInt > ConstantFoldBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:646
bool shouldOptForSize(const MachineBasicBlock &MBB, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
Returns true if the given block should be optimized for size.
Definition: Utils.cpp:1611
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 maximumNumber semantics.
Definition: APFloat.h:1410
bool isConstantOrConstantVector(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Return true if the specified instruction is known to be a constant, or a vector of constants.
Definition: Utils.cpp:1477
constexpr unsigned MaxAnalysisRecursionDepth
Definition: ValueTracking.h:48
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI)
Check if DstReg can be replaced with SrcReg depending on the register constraints.
Definition: Utils.cpp:201
void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver, SmallInstListTy &DeadInstChain)
Definition: Utils.cpp:1618
void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition: Utils.cpp:275
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
std::optional< SmallVector< APInt > > ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:1003
std::optional< ValueAndVReg > getAnyConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true, bool LookThroughAnyExt=false)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT or G_FCONST...
Definition: Utils.cpp:421
bool isBuildVectorAllOnes(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Definition: Utils.cpp:1420
bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
SmallVector< APInt > ConstantFoldVectorBinop(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Tries to constant fold a vector binop with sources Op1 and Op2.
Definition: Utils.cpp:769
std::optional< FPValueAndVReg > getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI, bool AllowUndef=true)
Returns a floating point scalar constant of a build vector splat if it exists.
Definition: Utils.cpp:1407
std::optional< APInt > ConstantFoldCastOp(unsigned Opcode, LLT DstTy, const Register Op0, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:934
void extractParts(Register Reg, LLT Ty, int NumParts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Helper function to split a wide generic register into bitwise blocks with the given Type (which impli...
Definition: Utils.cpp:481
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
Definition: Utils.cpp:1142
LLVM_READNONE LLT getCoverTy(LLT OrigTy, LLT TargetTy)
Return smallest type that covers both OrigTy and TargetTy and is multiple of TargetTy.
Definition: Utils.cpp:1213
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 minimumNumber semantics.
Definition: APFloat.h:1396
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
bool isTargetSpecificOpcode(unsigned Opcode)
Check whether the given Opcode is a target-specific opcode.
Definition: TargetOpcodes.h:36
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
std::optional< FPValueAndVReg > getFConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_FCONSTANT returns it...
Definition: Utils.cpp:429
bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Definition: Utils.cpp:1587
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition: Utils.cpp:633
bool isBuildVectorConstantSplat(const Register Reg, const MachineRegisterInfo &MRI, int64_t SplatValue, bool AllowUndef)
Return true if the specified register is defined by G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all ...
Definition: Utils.cpp:1358
void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
Definition: Utils.cpp:1647
DiagnosticSeverity
Defines the different supported severity of a diagnostic.
@ DS_Warning
@ DS_Error
Register constrainRegToClass(MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, Register Reg, const TargetRegisterClass &RegClass)
Try to constrain Reg to the specified register class.
Definition: Utils.cpp:46
int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP)
Returns an integer representing true, as defined by the TargetBooleanContents.
Definition: Utils.cpp:1599
std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition: Utils.cpp:415
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1749
bool isPreISelGenericFloatingPointOpcode(unsigned Opc)
Returns whether opcode Opc is a pre-isel generic floating-point opcode, having only floating-point op...
Definition: Utils.cpp:1671
bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI)
Returns true if Val can be assumed to never be a signaling NaN.
Definition: Utils.h:334
std::optional< DefinitionAndSourceRegister > getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, and underlying value Register folding away any copies.
Definition: Utils.cpp:448
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:212
void eraseInstrs(ArrayRef< MachineInstr * > DeadInstrs, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
Definition: Utils.cpp:1632
void salvageDebugInfoForDbgValue(const MachineRegisterInfo &MRI, MachineInstr &MI, ArrayRef< MachineOperand * > DbgUsers)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
bool isKnownNeverNaN(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the source register for Reg, folding away any trivial copies.
Definition: Utils.cpp:474
LLVM_READNONE LLT getGCDType(LLT OrigTy, LLT TargetTy)
Return a type where the total size is the greatest common divisor of OrigTy and TargetTy.
Definition: Utils.cpp:1234
bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
Definition: APFloat.h:1423
std::optional< int64_t > getIConstantSplatSExtVal(const Register Reg, const MachineRegisterInfo &MRI)
Definition: Utils.cpp:1392
void extractVectorParts(Register Reg, unsigned NumElts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Version which handles irregular sub-vector splits.
Definition: Utils.cpp:585
int getSplatIndex(ArrayRef< int > Mask)
If all non-negative Mask elements are the same value, return that value.
bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...
Definition: Utils.cpp:222
Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
Definition: Utils.cpp:867
void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel warning as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition: Utils.cpp:269
#define MORE()
Definition: regcomp.c:252
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Simple struct used to hold a Register value and the instruction which defines it.
Definition: Utils.h:224
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
Definition: KnownBits.h:285
unsigned countMinPopulation() const
Returns the number of bits known to be one.
Definition: KnownBits.h:282
This class contains a discriminated union of information about pointers in memory operands,...
int64_t Offset
Offset - This is an offset from the base Value*.
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
Simple struct used to hold a constant integer value and a virtual register.
Definition: Utils.h:183