LLVM 18.0.0git
RISCVMatInt.cpp
Go to the documentation of this file.
1//===- RISCVMatInt.cpp - Immediate materialisation -------------*- C++ -*--===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "RISCVMatInt.h"
11#include "llvm/ADT/APInt.h"
13using namespace llvm;
14
16 if (!HasRVC)
17 return Res.size();
18
19 int Cost = 0;
20 for (auto Instr : Res) {
21 // Assume instructions that aren't listed aren't compressible.
22 bool Compressed = false;
23 switch (Instr.getOpcode()) {
24 case RISCV::SLLI:
25 case RISCV::SRLI:
26 Compressed = true;
27 break;
28 case RISCV::ADDI:
29 case RISCV::ADDIW:
30 case RISCV::LUI:
31 Compressed = isInt<6>(Instr.getImm());
32 break;
33 }
34 // Two RVC instructions take the same space as one RVI instruction, but
35 // can take longer to execute than the single RVI instruction. Thus, we
36 // consider that two RVC instruction are slightly more costly than one
37 // RVI instruction. For longer sequences of RVC instructions the space
38 // savings can be worth it, though. The costs below try to model that.
39 if (!Compressed)
40 Cost += 100; // Baseline cost of one RVI instruction: 100%.
41 else
42 Cost += 70; // 70% cost of baseline.
43 }
44 return Cost;
45}
46
47// Recursively generate a sequence for materializing an integer.
48static void generateInstSeqImpl(int64_t Val, const MCSubtargetInfo &STI,
50 bool IsRV64 = STI.hasFeature(RISCV::Feature64Bit);
51
52 // Use BSETI for a single bit that can't be expressed by a single LUI or ADDI.
53 if (STI.hasFeature(RISCV::FeatureStdExtZbs) && isPowerOf2_64(Val) &&
54 (!isInt<32>(Val) || Val == 0x800)) {
55 Res.emplace_back(RISCV::BSETI, Log2_64(Val));
56 return;
57 }
58
59 if (isInt<32>(Val)) {
60 // Depending on the active bits in the immediate Value v, the following
61 // instruction sequences are emitted:
62 //
63 // v == 0 : ADDI
64 // v[0,12) != 0 && v[12,32) == 0 : ADDI
65 // v[0,12) == 0 && v[12,32) != 0 : LUI
66 // v[0,32) != 0 : LUI+ADDI(W)
67 int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
68 int64_t Lo12 = SignExtend64<12>(Val);
69
70 if (Hi20)
71 Res.emplace_back(RISCV::LUI, Hi20);
72
73 if (Lo12 || Hi20 == 0) {
74 unsigned AddiOpc = (IsRV64 && Hi20) ? RISCV::ADDIW : RISCV::ADDI;
75 Res.emplace_back(AddiOpc, Lo12);
76 }
77 return;
78 }
79
80 assert(IsRV64 && "Can't emit >32-bit imm for non-RV64 target");
81
82 // In the worst case, for a full 64-bit constant, a sequence of 8 instructions
83 // (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be emitted. Note
84 // that the first two instructions (LUI+ADDIW) can contribute up to 32 bits
85 // while the following ADDI instructions contribute up to 12 bits each.
86 //
87 // On the first glance, implementing this seems to be possible by simply
88 // emitting the most significant 32 bits (LUI+ADDIW) followed by as many left
89 // shift (SLLI) and immediate additions (ADDI) as needed. However, due to the
90 // fact that ADDI performs a sign extended addition, doing it like that would
91 // only be possible when at most 11 bits of the ADDI instructions are used.
92 // Using all 12 bits of the ADDI instructions, like done by GAS, actually
93 // requires that the constant is processed starting with the least significant
94 // bit.
95 //
96 // In the following, constants are processed from LSB to MSB but instruction
97 // emission is performed from MSB to LSB by recursively calling
98 // generateInstSeq. In each recursion, first the lowest 12 bits are removed
99 // from the constant and the optimal shift amount, which can be greater than
100 // 12 bits if the constant is sparse, is determined. Then, the shifted
101 // remaining constant is processed recursively and gets emitted as soon as it
102 // fits into 32 bits. The emission of the shifts and additions is subsequently
103 // performed when the recursion returns.
104
105 int64_t Lo12 = SignExtend64<12>(Val);
106 Val = (uint64_t)Val - (uint64_t)Lo12;
107
108 int ShiftAmount = 0;
109 bool Unsigned = false;
110
111 // Val might now be valid for LUI without needing a shift.
112 if (!isInt<32>(Val)) {
113 ShiftAmount = llvm::countr_zero((uint64_t)Val);
114 Val >>= ShiftAmount;
115
116 // If the remaining bits don't fit in 12 bits, we might be able to reduce the
117 // shift amount in order to use LUI which will zero the lower 12 bits.
118 if (ShiftAmount > 12 && !isInt<12>(Val)) {
119 if (isInt<32>((uint64_t)Val << 12)) {
120 // Reduce the shift amount and add zeros to the LSBs so it will match LUI.
121 ShiftAmount -= 12;
122 Val = (uint64_t)Val << 12;
123 } else if (isUInt<32>((uint64_t)Val << 12) &&
124 STI.hasFeature(RISCV::FeatureStdExtZba)) {
125 // Reduce the shift amount and add zeros to the LSBs so it will match
126 // LUI, then shift left with SLLI.UW to clear the upper 32 set bits.
127 ShiftAmount -= 12;
128 Val = ((uint64_t)Val << 12) | (0xffffffffull << 32);
129 Unsigned = true;
130 }
131 }
132
133 // Try to use SLLI_UW for Val when it is uint32 but not int32.
134 if (isUInt<32>((uint64_t)Val) && !isInt<32>((uint64_t)Val) &&
135 STI.hasFeature(RISCV::FeatureStdExtZba)) {
136 // Use LUI+ADDI or LUI to compose, then clear the upper 32 bits with
137 // SLLI_UW.
138 Val = ((uint64_t)Val) | (0xffffffffull << 32);
139 Unsigned = true;
140 }
141 }
142
143 generateInstSeqImpl(Val, STI, Res);
144
145 // Skip shift if we were able to use LUI directly.
146 if (ShiftAmount) {
147 unsigned Opc = Unsigned ? RISCV::SLLI_UW : RISCV::SLLI;
148 Res.emplace_back(Opc, ShiftAmount);
149 }
150
151 if (Lo12)
152 Res.emplace_back(RISCV::ADDI, Lo12);
153}
154
155static unsigned extractRotateInfo(int64_t Val) {
156 // for case: 0b111..1..xxxxxx1..1..
157 unsigned LeadingOnes = llvm::countl_one((uint64_t)Val);
158 unsigned TrailingOnes = llvm::countr_one((uint64_t)Val);
159 if (TrailingOnes > 0 && TrailingOnes < 64 &&
160 (LeadingOnes + TrailingOnes) > (64 - 12))
161 return 64 - TrailingOnes;
162
163 // for case: 0bxxx1..1..1...xxx
164 unsigned UpperTrailingOnes = llvm::countr_one(Hi_32(Val));
165 unsigned LowerLeadingOnes = llvm::countl_one(Lo_32(Val));
166 if (UpperTrailingOnes < 32 &&
167 (UpperTrailingOnes + LowerLeadingOnes) > (64 - 12))
168 return 32 - UpperTrailingOnes;
169
170 return 0;
171}
172
173static void generateInstSeqLeadingZeros(int64_t Val, const MCSubtargetInfo &STI,
175 assert(Val > 0 && "Expected postive val");
176
177 unsigned LeadingZeros = llvm::countl_zero((uint64_t)Val);
178 uint64_t ShiftedVal = (uint64_t)Val << LeadingZeros;
179 // Fill in the bits that will be shifted out with 1s. An example where this
180 // helps is trailing one masks with 32 or more ones. This will generate
181 // ADDI -1 and an SRLI.
182 ShiftedVal |= maskTrailingOnes<uint64_t>(LeadingZeros);
183
185 generateInstSeqImpl(ShiftedVal, STI, TmpSeq);
186
187 // Keep the new sequence if it is an improvement or the original is empty.
188 if ((TmpSeq.size() + 1) < Res.size() ||
189 (Res.empty() && TmpSeq.size() < 8)) {
190 TmpSeq.emplace_back(RISCV::SRLI, LeadingZeros);
191 Res = TmpSeq;
192 }
193
194 // Some cases can benefit from filling the lower bits with zeros instead.
195 ShiftedVal &= maskTrailingZeros<uint64_t>(LeadingZeros);
196 TmpSeq.clear();
197 generateInstSeqImpl(ShiftedVal, STI, TmpSeq);
198
199 // Keep the new sequence if it is an improvement or the original is empty.
200 if ((TmpSeq.size() + 1) < Res.size() ||
201 (Res.empty() && TmpSeq.size() < 8)) {
202 TmpSeq.emplace_back(RISCV::SRLI, LeadingZeros);
203 Res = TmpSeq;
204 }
205
206 // If we have exactly 32 leading zeros and Zba, we can try using zext.w at
207 // the end of the sequence.
208 if (LeadingZeros == 32 && STI.hasFeature(RISCV::FeatureStdExtZba)) {
209 // Try replacing upper bits with 1.
210 uint64_t LeadingOnesVal = Val | maskLeadingOnes<uint64_t>(LeadingZeros);
211 TmpSeq.clear();
212 generateInstSeqImpl(LeadingOnesVal, STI, TmpSeq);
213
214 // Keep the new sequence if it is an improvement.
215 if ((TmpSeq.size() + 1) < Res.size() ||
216 (Res.empty() && TmpSeq.size() < 8)) {
217 TmpSeq.emplace_back(RISCV::ADD_UW, 0);
218 Res = TmpSeq;
219 }
220 }
221}
222
224InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI) {
226 generateInstSeqImpl(Val, STI, Res);
227
228 // If the low 12 bits are non-zero, the first expansion may end with an ADDI
229 // or ADDIW. If there are trailing zeros, try generating a sign extended
230 // constant with no trailing zeros and use a final SLLI to restore them.
231 if ((Val & 0xfff) != 0 && (Val & 1) == 0 && Res.size() >= 2) {
232 unsigned TrailingZeros = llvm::countr_zero((uint64_t)Val);
233 int64_t ShiftedVal = Val >> TrailingZeros;
234 // If we can use C.LI+C.SLLI instead of LUI+ADDI(W) prefer that since
235 // its more compressible. But only if LUI+ADDI(W) isn't fusable.
236 // NOTE: We don't check for C extension to minimize differences in generated
237 // code.
238 bool IsShiftedCompressible =
239 isInt<6>(ShiftedVal) && !STI.hasFeature(RISCV::TuneLUIADDIFusion);
241 generateInstSeqImpl(ShiftedVal, STI, TmpSeq);
242
243 // Keep the new sequence if it is an improvement.
244 if ((TmpSeq.size() + 1) < Res.size() || IsShiftedCompressible) {
245 TmpSeq.emplace_back(RISCV::SLLI, TrailingZeros);
246 Res = TmpSeq;
247 }
248 }
249
250 // If we have a 1 or 2 instruction sequence this is the best we can do. This
251 // will always be true for RV32 and will often be true for RV64.
252 if (Res.size() <= 2)
253 return Res;
254
255 assert(STI.hasFeature(RISCV::Feature64Bit) &&
256 "Expected RV32 to only need 2 instructions");
257
258 // If the lower 13 bits are something like 0x17ff, try to add 1 to change the
259 // lower 13 bits to 0x1800. We can restore this with an ADDI of -1 at the end
260 // of the sequence. Call generateInstSeqImpl on the new constant which may
261 // subtract 0xfffffffffffff800 to create another ADDI. This will leave a
262 // constant with more than 12 trailing zeros for the next recursive step.
263 if ((Val & 0xfff) != 0 && (Val & 0x1800) == 0x1000) {
264 int64_t Imm12 = -(0x800 - (Val & 0xfff));
265 int64_t AdjustedVal = Val - Imm12;
267 generateInstSeqImpl(AdjustedVal, STI, TmpSeq);
268
269 // Keep the new sequence if it is an improvement.
270 if ((TmpSeq.size() + 1) < Res.size()) {
271 TmpSeq.emplace_back(RISCV::ADDI, Imm12);
272 Res = TmpSeq;
273 }
274 }
275
276 // If the constant is positive we might be able to generate a shifted constant
277 // with no leading zeros and use a final SRLI to restore them.
278 if (Val > 0 && Res.size() > 2) {
279 generateInstSeqLeadingZeros(Val, STI, Res);
280 }
281
282 // If the constant is negative, trying inverting and using our trailing zero
283 // optimizations. Use an xori to invert the final value.
284 if (Val < 0 && Res.size() > 3) {
285 uint64_t InvertedVal = ~(uint64_t)Val;
287 generateInstSeqLeadingZeros(InvertedVal, STI, TmpSeq);
288
289 // Keep it if we found a sequence that is smaller after inverting.
290 if (!TmpSeq.empty() && (TmpSeq.size() + 1) < Res.size()) {
291 TmpSeq.emplace_back(RISCV::XORI, -1);
292 Res = TmpSeq;
293 }
294 }
295
296 // If the Low and High halves are the same, use pack. The pack instruction
297 // packs the XLEN/2-bit lower halves of rs1 and rs2 into rd, with rs1 in the
298 // lower half and rs2 in the upper half.
299 if (Res.size() > 2 && STI.hasFeature(RISCV::FeatureStdExtZbkb)) {
300 int64_t LoVal = SignExtend64<32>(Val);
301 int64_t HiVal = SignExtend64<32>(Val >> 32);
302 if (LoVal == HiVal) {
304 generateInstSeqImpl(LoVal, STI, TmpSeq);
305 if ((TmpSeq.size() + 1) < Res.size()) {
306 TmpSeq.emplace_back(RISCV::PACK, 0);
307 Res = TmpSeq;
308 }
309 }
310 }
311
312 // Perform optimization with BCLRI/BSETI in the Zbs extension.
313 if (Res.size() > 2 && STI.hasFeature(RISCV::FeatureStdExtZbs)) {
314 // 1. For values in range 0xffffffff 7fffffff ~ 0xffffffff 00000000,
315 // call generateInstSeqImpl with Val|0x80000000 (which is expected be
316 // an int32), then emit (BCLRI r, 31).
317 // 2. For values in range 0x80000000 ~ 0xffffffff, call generateInstSeqImpl
318 // with Val&~0x80000000 (which is expected to be an int32), then
319 // emit (BSETI r, 31).
320 int64_t NewVal;
321 unsigned Opc;
322 if (Val < 0) {
323 Opc = RISCV::BCLRI;
324 NewVal = Val | 0x80000000ll;
325 } else {
326 Opc = RISCV::BSETI;
327 NewVal = Val & ~0x80000000ll;
328 }
329 if (isInt<32>(NewVal)) {
331 generateInstSeqImpl(NewVal, STI, TmpSeq);
332 if ((TmpSeq.size() + 1) < Res.size()) {
333 TmpSeq.emplace_back(Opc, 31);
334 Res = TmpSeq;
335 }
336 }
337
338 // Try to use BCLRI for upper 32 bits if the original lower 32 bits are
339 // negative int32, or use BSETI for upper 32 bits if the original lower
340 // 32 bits are positive int32.
341 int32_t Lo = Lo_32(Val);
342 uint32_t Hi = Hi_32(Val);
343 Opc = 0;
345 generateInstSeqImpl(Lo, STI, TmpSeq);
346 // Check if it is profitable to use BCLRI/BSETI.
347 if (Lo > 0 && TmpSeq.size() + llvm::popcount(Hi) < Res.size()) {
348 Opc = RISCV::BSETI;
349 } else if (Lo < 0 && TmpSeq.size() + llvm::popcount(~Hi) < Res.size()) {
350 Opc = RISCV::BCLRI;
351 Hi = ~Hi;
352 }
353 // Search for each bit and build corresponding BCLRI/BSETI.
354 if (Opc > 0) {
355 while (Hi != 0) {
356 unsigned Bit = llvm::countr_zero(Hi);
357 TmpSeq.emplace_back(Opc, Bit + 32);
358 Hi &= (Hi - 1); // Clear lowest set bit.
359 }
360 if (TmpSeq.size() < Res.size())
361 Res = TmpSeq;
362 }
363 }
364
365 // Perform optimization with SH*ADD in the Zba extension.
366 if (Res.size() > 2 && STI.hasFeature(RISCV::FeatureStdExtZba)) {
367 int64_t Div = 0;
368 unsigned Opc = 0;
370 // Select the opcode and divisor.
371 if ((Val % 3) == 0 && isInt<32>(Val / 3)) {
372 Div = 3;
373 Opc = RISCV::SH1ADD;
374 } else if ((Val % 5) == 0 && isInt<32>(Val / 5)) {
375 Div = 5;
376 Opc = RISCV::SH2ADD;
377 } else if ((Val % 9) == 0 && isInt<32>(Val / 9)) {
378 Div = 9;
379 Opc = RISCV::SH3ADD;
380 }
381 // Build the new instruction sequence.
382 if (Div > 0) {
383 generateInstSeqImpl(Val / Div, STI, TmpSeq);
384 if ((TmpSeq.size() + 1) < Res.size()) {
385 TmpSeq.emplace_back(Opc, 0);
386 Res = TmpSeq;
387 }
388 } else {
389 // Try to use LUI+SH*ADD+ADDI.
390 int64_t Hi52 = ((uint64_t)Val + 0x800ull) & ~0xfffull;
391 int64_t Lo12 = SignExtend64<12>(Val);
392 Div = 0;
393 if (isInt<32>(Hi52 / 3) && (Hi52 % 3) == 0) {
394 Div = 3;
395 Opc = RISCV::SH1ADD;
396 } else if (isInt<32>(Hi52 / 5) && (Hi52 % 5) == 0) {
397 Div = 5;
398 Opc = RISCV::SH2ADD;
399 } else if (isInt<32>(Hi52 / 9) && (Hi52 % 9) == 0) {
400 Div = 9;
401 Opc = RISCV::SH3ADD;
402 }
403 // Build the new instruction sequence.
404 if (Div > 0) {
405 // For Val that has zero Lo12 (implies Val equals to Hi52) should has
406 // already been processed to LUI+SH*ADD by previous optimization.
407 assert(Lo12 != 0 &&
408 "unexpected instruction sequence for immediate materialisation");
409 assert(TmpSeq.empty() && "Expected empty TmpSeq");
410 generateInstSeqImpl(Hi52 / Div, STI, TmpSeq);
411 if ((TmpSeq.size() + 2) < Res.size()) {
412 TmpSeq.emplace_back(Opc, 0);
413 TmpSeq.emplace_back(RISCV::ADDI, Lo12);
414 Res = TmpSeq;
415 }
416 }
417 }
418 }
419
420 // Perform optimization with rori in the Zbb and th.srri in the XTheadBb
421 // extension.
422 if (Res.size() > 2 && (STI.hasFeature(RISCV::FeatureStdExtZbb) ||
423 STI.hasFeature(RISCV::FeatureVendorXTHeadBb))) {
424 if (unsigned Rotate = extractRotateInfo(Val)) {
426 uint64_t NegImm12 = llvm::rotl<uint64_t>(Val, Rotate);
427 assert(isInt<12>(NegImm12));
428 TmpSeq.emplace_back(RISCV::ADDI, NegImm12);
429 TmpSeq.emplace_back(STI.hasFeature(RISCV::FeatureStdExtZbb)
430 ? RISCV::RORI
431 : RISCV::TH_SRRI,
432 Rotate);
433 Res = TmpSeq;
434 }
435 }
436 return Res;
437}
438
440 unsigned &ShiftAmt, unsigned &AddOpc) {
441 int64_t LoVal = SignExtend64<32>(Val);
442 if (LoVal == 0)
443 return RISCVMatInt::InstSeq();
444
445 // Subtract the LoVal to emulate the effect of the final ADD.
446 uint64_t Tmp = (uint64_t)Val - (uint64_t)LoVal;
447 assert(Tmp != 0);
448
449 // Use trailing zero counts to figure how far we need to shift LoVal to line
450 // up with the remaining constant.
451 // TODO: This algorithm assumes all non-zero bits in the low 32 bits of the
452 // final constant come from LoVal.
453 unsigned TzLo = llvm::countr_zero((uint64_t)LoVal);
454 unsigned TzHi = llvm::countr_zero(Tmp);
455 assert(TzLo < 32 && TzHi >= 32);
456 ShiftAmt = TzHi - TzLo;
457 AddOpc = RISCV::ADD;
458
459 if (Tmp == ((uint64_t)LoVal << ShiftAmt))
460 return RISCVMatInt::generateInstSeq(LoVal, STI);
461
462 // If we have Zba, we can use (ADD_UW X, (SLLI X, 32)).
463 if (STI.hasFeature(RISCV::FeatureStdExtZba) && Lo_32(Val) == Hi_32(Val)) {
464 ShiftAmt = 32;
465 AddOpc = RISCV::ADD_UW;
466 return RISCVMatInt::generateInstSeq(LoVal, STI);
467 }
468
469 return RISCVMatInt::InstSeq();
470}
471
472int getIntMatCost(const APInt &Val, unsigned Size, const MCSubtargetInfo &STI,
473 bool CompressionCost) {
474 bool IsRV64 = STI.hasFeature(RISCV::Feature64Bit);
475 bool HasRVC = CompressionCost && (STI.hasFeature(RISCV::FeatureStdExtC) ||
476 STI.hasFeature(RISCV::FeatureStdExtZca));
477 int PlatRegSize = IsRV64 ? 64 : 32;
478
479 // Split the constant into platform register sized chunks, and calculate cost
480 // of each chunk.
481 int Cost = 0;
482 for (unsigned ShiftVal = 0; ShiftVal < Size; ShiftVal += PlatRegSize) {
483 APInt Chunk = Val.ashr(ShiftVal).sextOrTrunc(PlatRegSize);
484 InstSeq MatSeq = generateInstSeq(Chunk.getSExtValue(), STI);
485 Cost += getInstSeqCost(MatSeq, HasRVC);
486 }
487 return std::max(1, Cost);
488}
489
491 switch (Opc) {
492 default:
493 llvm_unreachable("Unexpected opcode!");
494 case RISCV::LUI:
495 return RISCVMatInt::Imm;
496 case RISCV::ADD_UW:
497 return RISCVMatInt::RegX0;
498 case RISCV::SH1ADD:
499 case RISCV::SH2ADD:
500 case RISCV::SH3ADD:
501 case RISCV::PACK:
502 return RISCVMatInt::RegReg;
503 case RISCV::ADDI:
504 case RISCV::ADDIW:
505 case RISCV::XORI:
506 case RISCV::SLLI:
507 case RISCV::SRLI:
508 case RISCV::SLLI_UW:
509 case RISCV::RORI:
510 case RISCV::BSETI:
511 case RISCV::BCLRI:
512 case RISCV::TH_SRRI:
513 return RISCVMatInt::RegImm;
514 }
515}
516
517} // namespace llvm::RISCVMatInt
This file implements a class to represent arbitrary precision integral constant values and operations...
uint64_t Size
bool HasRVC
Definition: ELF_riscv.cpp:500
static void generateInstSeqLeadingZeros(int64_t Val, const MCSubtargetInfo &STI, RISCVMatInt::InstSeq &Res)
static void generateInstSeqImpl(int64_t Val, const MCSubtargetInfo &STI, RISCVMatInt::InstSeq &Res)
Definition: RISCVMatInt.cpp:48
static unsigned extractRotateInfo(int64_t Val)
static int getInstSeqCost(RISCVMatInt::InstSeq &Res, bool HasRVC)
Definition: RISCVMatInt.cpp:15
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
Definition: APInt.h:76
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1010
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition: APInt.h:805
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1507
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
OpndKind getOpndKind() const
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
int getIntMatCost(const APInt &Val, unsigned Size, const MCSubtargetInfo &STI, bool CompressionCost)
SmallVector< Inst, 8 > InstSeq
Definition: RISCVMatInt.h:42
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition: bit.h:385
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
Definition: bit.h:307
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:269
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:319
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
Definition: bit.h:281
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
Definition: MathExtras.h:136
int countl_one(T Value)
Count the number of ones from the most significant bit to the first zero bit.
Definition: bit.h:294
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
Definition: MathExtras.h:141