23 assert(ChunkIdx < 4 &&
"Out of range chunk index specified!");
25 return (Imm >> (ChunkIdx * 16)) & 0xFFFF;
31 Chunk = (Chunk << 48) | (Chunk << 32) | (Chunk << 16) | Chunk;
50 for (
unsigned Idx = 0; Idx < 4; ++Idx)
54 for (
const auto &Chunk : Counts) {
55 const uint64_t ChunkVal = Chunk.first;
56 const unsigned Count = Chunk.second;
65 const bool CountThree =
Count == 3;
67 Insn.
push_back({ AArch64::ORRXri, 0, Encoding });
69 unsigned ShiftAmt = 0;
72 for (; ShiftAmt < 64; ShiftAmt += 16) {
73 Imm16 = (UImm >> ShiftAmt) & 0xFFFF;
75 if (Imm16 != ChunkVal)
89 for (ShiftAmt += 16; ShiftAmt < 64; ShiftAmt += 16) {
90 Imm16 = (UImm >> ShiftAmt) & 0xFFFF;
92 if (Imm16 != ChunkVal)
107 if (Chunk == 0 || Chunk == std::numeric_limits<uint64_t>::max())
117 if (Chunk == 0 || Chunk == std::numeric_limits<uint64_t>::max())
129 Imm &= ~(Mask << (Idx * 16));
132 Imm |= Mask << (Idx * 16);
152 const int NotSet = -1;
155 int StartIdx = NotSet;
158 for (
int Idx = 0; Idx < 4; ++Idx) {
159 int64_t Chunk =
getChunk(UImm, Idx);
161 Chunk = (Chunk << 48) >> 48;
170 if (StartIdx == NotSet || EndIdx == NotSet)
181 if (StartIdx > EndIdx) {
187 int FirstMovkIdx = NotSet;
188 int SecondMovkIdx = NotSet;
192 for (
int Idx = 0; Idx < 4; ++Idx) {
197 if ((Idx < StartIdx || EndIdx < Idx) && Chunk != Outside) {
198 OrrImm =
updateImm(OrrImm, Idx, Outside == 0);
201 if (FirstMovkIdx == NotSet)
208 }
else if (Idx > StartIdx && Idx < EndIdx && Chunk != Inside) {
209 OrrImm =
updateImm(OrrImm, Idx, Inside != Mask);
212 if (FirstMovkIdx == NotSet)
218 assert(FirstMovkIdx != NotSet &&
"Constant materializable with single ORR!");
223 Insn.
push_back({ AArch64::ORRXri, 0, Encoding });
225 const bool SingleMovk = SecondMovkIdx == NotSet;
228 FirstMovkIdx * 16) });
237 SecondMovkIdx * 16) });
271 const unsigned Mask = 0xffff;
273 auto tryExpansion = [&](
unsigned Opc,
uint64_t C,
unsigned N) {
274 assert((
C >> 32) == 0xffffffffULL &&
"Invalid immediate");
275 const unsigned Imm0 =
C & Mask;
276 const unsigned Imm16 = (
C >> 16) & Mask;
277 if (Imm0 != Mask && Imm16 != Mask && !AllowThreeSequence)
281 Insn.
push_back({AArch64::MOVNXi, Imm0 ^ Mask, 0});
283 Insn.
push_back({AArch64::MOVKXi, Imm16, 16});
285 Insn.
push_back({AArch64::MOVNXi, Imm16 ^ Mask, 16});
292 for (
unsigned N = 17;
N < 48; ++
N) {
294 uint64_t C = 0xffffffff00000000ULL | (Imm ^ (Imm <<
N));
295 if ((
C ^ (
C <<
N)) == Imm && tryExpansion(AArch64::EORXrs,
C,
N))
299 C = 0xffffffff00000000ULL | (Imm ^ ~(~Imm <<
N));
300 if ((
C ^ ~(
C <<
N)) == Imm && tryExpansion(AArch64::EONXrs,
C,
N))
312 UnshiftedOnes = ~0ULL;
314 UnshiftedOnes = (1ULL << NumOnes) - 1;
316 return UnshiftedOnes << StartPosition;
324 uint64_t Rotation = 1ULL << (6 - i);
326 if (Closure != (Closure & V)) {
352static std::optional<std::pair<uint64_t, uint64_t>>
354 if (UImm == 0 || ~UImm == 0)
365 uint64_t RemainingBits = RotatedBits & ~MaximalImm1;
372 if (RemainingBits & ~MaximalImm2)
376 return std::make_pair(
rotl(MaximalImm1, InitialTrailingOnes),
377 rotl(MaximalImm2, InitialTrailingOnes));
384 if (MaybeDecomposition == std::nullopt)
386 uint64_t Imm1 = MaybeDecomposition->first;
387 uint64_t Imm2 = MaybeDecomposition->second;
393 if (Imm1Success && Imm2Success) {
395 Insn.
push_back({AArch64::ORRXri, 0, Encoding1});
396 Insn.
push_back({AArch64::ORRXri, 1, Encoding2});
410 if (MaybeDecomposition == std::nullopt)
412 uint64_t Imm1 = MaybeDecomposition->first;
413 uint64_t Imm2 = MaybeDecomposition->second;
419 if (Imm1Success && Imm2Success) {
421 Insn.
push_back({AArch64::ORRXri, 0, Encoding1});
423 Insn.
push_back({AArch64::ANDXri, 1, Encoding2});
442 unsigned BigSize = 64;
446 uint64_t Mask = (1ULL << BigSize) - 1;
448 if ((Imm & Mask) != ((Imm >> BigSize) & Mask)) {
452 }
while (BigSize > 2);
466 int RunsPerBigChunk =
popcount(RunStarts & BigMask);
468 static const int8_t BigToSmallSizeTable[32] = {
469 -1, -1, 0, 1, 2, 2, -1, 3, 3, 3, -1, -1, -1, -1, -1, 4,
470 4, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5,
473 int BigToSmallShift = BigToSmallSizeTable[RunsPerBigChunk];
477 if (BigToSmallShift == -1)
480 unsigned SmallSize = BigSize >> BigToSmallShift;
483 static const uint64_t RepeatedOnesTable[] = {
484 0xffffffffffffffff, 0x5555555555555555, 0x1111111111111111,
485 0x0101010101010101, 0x0001000100010001, 0x0000000100000001,
502 for (
int Attempt = 0; Attempt < 3; ++Attempt) {
516 Insn.
push_back({AArch64::ORRXri, 0, SmallEncoding});
517 Insn.
push_back({AArch64::EORXri, 1, BigEncoding});
532 unsigned OneChunks,
unsigned ZeroChunks,
534 const unsigned Mask = 0xFFFF;
543 if (OneChunks > ZeroChunks) {
550 Imm &= (1LL << 32) - 1;
551 FirstOpc = (
isNeg ? AArch64::MOVNWi : AArch64::MOVZWi);
553 FirstOpc = (
isNeg ? AArch64::MOVNXi : AArch64::MOVZXi);
556 unsigned LastShift = 0;
560 Shift = (TZ / 16) * 16;
561 LastShift = ((63 - LZ) / 16) * 16;
563 unsigned Imm16 = (Imm >> Shift) & Mask;
568 if (Shift == LastShift)
576 unsigned Opc = (BitSize == 32 ? AArch64::MOVKWi : AArch64::MOVKXi);
577 while (Shift < LastShift) {
579 Imm16 = (Imm >> Shift) & Mask;
580 if (Imm16 == (
isNeg ? Mask : 0))
589 if (Insn.
size() > 2 && (Imm >> 32) == (Imm & 0xffffffffULL)) {
592 Insn.
push_back({AArch64::ORRXrs, 0, 32});
600 const unsigned Mask = 0xFFFF;
604 unsigned OneChunks = 0;
605 unsigned ZeroChunks = 0;
606 for (
unsigned Shift = 0; Shift < BitSize; Shift += 16) {
607 const unsigned Chunk = (Imm >> Shift) & Mask;
615 if ((BitSize / 16) - OneChunks <= 1 || (BitSize / 16) - ZeroChunks <= 1) {
618 "Move of immediate should have expanded to a single MOVZ/MOVN");
623 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
626 unsigned Opc = (BitSize == 32 ? AArch64::ORRWri : AArch64::ORRXri);
635 if (OneChunks >= (BitSize / 16) - 2 || ZeroChunks >= (BitSize / 16) - 2) {
640 assert(BitSize == 64 &&
"All 32-bit immediates can be expanded with a"
651 for (
unsigned Shift = 0; Shift < BitSize; Shift += 16) {
652 uint64_t ShiftedMask = (0xFFFFULL << Shift);
653 uint64_t ZeroChunk = UImm & ~ShiftedMask;
654 uint64_t OneChunk = UImm | ShiftedMask;
656 uint64_t ReplicateChunk = ZeroChunk | (RotatedImm & ShiftedMask);
662 Insn.
push_back({ AArch64::ORRXri, 0, Encoding });
665 const unsigned Imm16 =
getChunk(UImm, Shift / 16);
696 if (OneChunks || ZeroChunks) {
static bool tryCopyWithNegation(uint64_t Imm, bool AllowThreeSequence, SmallVectorImpl< ImmInsnModel > &Insn)
static uint64_t GetRunOfOnesStartingAt(uint64_t V, uint64_t StartPosition)
static void expandMOVImmSimple(uint64_t Imm, unsigned BitSize, unsigned OneChunks, unsigned ZeroChunks, SmallVectorImpl< ImmInsnModel > &Insn)
Expand a MOVi32imm or MOVi64imm pseudo instruction to a MOVZ or MOVN of width BitSize followed by up ...
static uint64_t updateImm(uint64_t Imm, unsigned Idx, bool Clear)
Clear or set all bits in the chunk at the given index.
static bool canUseOrr(uint64_t Chunk, uint64_t &Encoding)
Check whether the given 16-bit chunk replicated to full 64-bit width can be materialized with an ORR ...
static bool tryToreplicateChunks(uint64_t UImm, SmallVectorImpl< ImmInsnModel > &Insn)
Check for identical 16-bit chunks within the constant and if so materialize them with a single ORR in...
static bool trySequenceOfOnes(uint64_t UImm, SmallVectorImpl< ImmInsnModel > &Insn)
Check whether the constant contains a sequence of contiguous ones, which might be interrupted by one ...
static uint64_t MaximallyReplicateSubImmediate(uint64_t V, uint64_t Subset)
static bool tryAndOfLogicalImmediates(uint64_t UImm, SmallVectorImpl< ImmInsnModel > &Insn)
static uint64_t getChunk(uint64_t Imm, unsigned ChunkIdx)
Helper function which extracts the specified 16-bit chunk from a 64-bit value.
static bool tryEorOfLogicalImmediates(uint64_t Imm, SmallVectorImpl< ImmInsnModel > &Insn)
static uint64_t maximalLogicalImmWithin(uint64_t RemainingBits, uint64_t OriginalBits)
static bool isStartChunk(uint64_t Chunk)
Check whether this chunk matches the pattern '1...0...'.
static bool isEndChunk(uint64_t Chunk)
Check whether this chunk matches the pattern '0...1...' This pattern ends a contiguous sequence of on...
static bool tryOrrOfLogicalImmediates(uint64_t UImm, SmallVectorImpl< ImmInsnModel > &Insn)
static std::optional< std::pair< uint64_t, uint64_t > > decomposeIntoOrrOfLogicalImmediates(uint64_t UImm)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool isNeg(Value *V)
Returns true if the operation is a negation of V, and it works for both integers and floats.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
static bool processLogicalImmediate(uint64_t Imm, unsigned RegSize, uint64_t &Encoding)
processLogicalImmediate - Determine if an immediate value can be encoded as the immediate operand of ...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void expandMOVImm(uint64_t Imm, unsigned BitSize, SmallVectorImpl< ImmInsnModel > &Insn)
Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more real move-immediate instructions to...
@ C
The default llvm calling convention, compatible with C.
This is an optimization pass for GlobalISel generic memory operations.
constexpr T rotr(T V, int R)
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
FunctionAddr VTableAddr Count
constexpr T rotl(T V, int R)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.