21 for (
auto Instr : Res) {
23 bool Compressed =
false;
24 switch (Instr.getOpcode()) {
32 Compressed = isInt<6>(Instr.getImm());
51 bool IsRV64 = STI.
hasFeature(RISCV::Feature64Bit);
55 (!isInt<32>(Val) || Val == 0x800)) {
68 int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
69 int64_t Lo12 = SignExtend64<12>(Val);
74 if (Lo12 || Hi20 == 0) {
75 unsigned AddiOpc = (IsRV64 && Hi20) ? RISCV::ADDIW : RISCV::ADDI;
81 assert(IsRV64 &&
"Can't emit >32-bit imm for non-RV64 target");
106 int64_t Lo12 = SignExtend64<12>(Val);
113 if (!isInt<32>(Val)) {
119 if (ShiftAmount > 12 && !isInt<12>(Val)) {
120 if (isInt<32>((
uint64_t)Val << 12)) {
124 }
else if (isUInt<32>((
uint64_t)Val << 12) &&
129 Val = ((
uint64_t)Val << 12) | (0xffffffffull << 32);
139 Val = ((
uint64_t)Val) | (0xffffffffull << 32);
148 unsigned Opc =
Unsigned ? RISCV::SLLI_UW : RISCV::SLLI;
160 if (TrailingOnes > 0 && TrailingOnes < 64 &&
161 (LeadingOnes + TrailingOnes) > (64 - 12))
162 return 64 - TrailingOnes;
167 if (UpperTrailingOnes < 32 &&
168 (UpperTrailingOnes + LowerLeadingOnes) > (64 - 12))
169 return 32 - UpperTrailingOnes;
176 assert(Val > 0 &&
"Expected postive val");
183 ShiftedVal |= maskTrailingOnes<uint64_t>(LeadingZeros);
189 if ((TmpSeq.
size() + 1) < Res.
size() ||
196 ShiftedVal &= maskTrailingZeros<uint64_t>(LeadingZeros);
201 if ((TmpSeq.
size() + 1) < Res.
size() ||
209 if (LeadingZeros == 32 && STI.
hasFeature(RISCV::FeatureStdExtZba)) {
211 uint64_t LeadingOnesVal = Val | maskLeadingOnes<uint64_t>(LeadingZeros);
216 if ((TmpSeq.
size() + 1) < Res.
size() ||
232 if ((Val & 0xfff) != 0 && (Val & 1) == 0 && Res.
size() >= 2) {
234 int64_t ShiftedVal = Val >> TrailingZeros;
239 bool IsShiftedCompressible =
240 isInt<6>(ShiftedVal) && !STI.
hasFeature(RISCV::TuneLUIADDIFusion);
245 if ((TmpSeq.
size() + 1) < Res.
size() || IsShiftedCompressible) {
257 "Expected RV32 to only need 2 instructions");
264 if ((Val & 0xfff) != 0 && (Val & 0x1800) == 0x1000) {
265 int64_t Imm12 = -(0x800 - (Val & 0xfff));
266 int64_t AdjustedVal = Val - Imm12;
271 if ((TmpSeq.
size() + 1) < Res.
size()) {
279 if (Val > 0 && Res.
size() > 2) {
285 if (Val < 0 && Res.
size() > 3) {
301 int64_t LoVal = SignExtend64<32>(Val);
302 int64_t HiVal = SignExtend64<32>(Val >> 32);
303 if (LoVal == HiVal) {
306 if ((TmpSeq.
size() + 1) < Res.
size()) {
325 NewVal = Val | 0x80000000ll;
328 NewVal = Val & ~0x80000000ll;
330 if (isInt<32>(NewVal)) {
333 if ((TmpSeq.
size() + 1) < Res.
size()) {
372 if ((Val % 3) == 0 && isInt<32>(Val / 3)) {
375 }
else if ((Val % 5) == 0 && isInt<32>(Val / 5)) {
378 }
else if ((Val % 9) == 0 && isInt<32>(Val / 9)) {
385 if ((TmpSeq.
size() + 1) < Res.
size()) {
391 int64_t Hi52 = ((
uint64_t)Val + 0x800ull) & ~0xfffull;
392 int64_t Lo12 = SignExtend64<12>(Val);
394 if (isInt<32>(Hi52 / 3) && (Hi52 % 3) == 0) {
397 }
else if (isInt<32>(Hi52 / 5) && (Hi52 % 5) == 0) {
400 }
else if (isInt<32>(Hi52 / 9) && (Hi52 % 9) == 0) {
409 "unexpected instruction sequence for immediate materialisation");
412 if ((TmpSeq.
size() + 2) < Res.
size()) {
424 STI.
hasFeature(RISCV::FeatureVendorXTHeadBb))) {
427 uint64_t NegImm12 = llvm::rotl<uint64_t>(Val, Rotate);
428 assert(isInt<12>(NegImm12));
478 unsigned &ShiftAmt,
unsigned &AddOpc) {
479 int64_t LoVal = SignExtend64<32>(Val);
493 assert(TzLo < 32 && TzHi >= 32);
494 ShiftAmt = TzHi - TzLo;
497 if (Tmp == ((
uint64_t)LoVal << ShiftAmt))
503 AddOpc = RISCV::ADD_UW;
511 bool CompressionCost) {
512 bool IsRV64 = STI.
hasFeature(RISCV::Feature64Bit);
515 int PlatRegSize = IsRV64 ? 64 : 32;
520 for (
unsigned ShiftVal = 0; ShiftVal <
Size; ShiftVal += PlatRegSize) {
525 return std::max(1,
Cost);
This file implements a class to represent arbitrary precision integral constant values and operations...
static void generateInstSeqLeadingZeros(int64_t Val, const MCSubtargetInfo &STI, RISCVMatInt::InstSeq &Res)
static void generateInstSeqImpl(int64_t Val, const MCSubtargetInfo &STI, RISCVMatInt::InstSeq &Res)
static unsigned extractRotateInfo(int64_t Val)
static int getInstSeqCost(RISCVMatInt::InstSeq &Res, bool HasRVC)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
int64_t getSExtValue() const
Get sign extended value.
MCInstBuilder & addReg(unsigned Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Wrapper class representing physical registers. Should be passed by value.
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
unsigned getOpcode() const
OpndKind getOpndKind() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
int getIntMatCost(const APInt &Val, unsigned Size, const MCSubtargetInfo &STI, bool CompressionCost)
SmallVector< Inst, 8 > InstSeq
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
void generateMCInstSeq(int64_t Val, const MCSubtargetInfo &STI, MCRegister DestReg, SmallVectorImpl< MCInst > &Insts)
This is an optimization pass for GlobalISel generic memory operations.
int popcount(T Value) noexcept
Count the number of set bits in a value.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
int countl_one(T Value)
Count the number of ones from the most significant bit to the first zero bit.
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.