LLVM 22.0.0git
AMDGPULowerVGPREncoding.cpp
Go to the documentation of this file.
1//===- AMDGPULowerVGPREncoding.cpp - lower VGPRs above v255 ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// Lower VGPRs above first 256 on gfx1250.
11///
12/// The pass scans used VGPRs and inserts S_SET_VGPR_MSB instructions to switch
13/// VGPR addressing mode. The mode change is effective until the next change.
14/// This instruction provides high bits of a VGPR address for four of the
15/// operands: vdst, src0, src1, and src2, or other 4 operands depending on the
16/// instruction encoding. If bits are set they are added as MSB to the
17/// corresponding operand VGPR number.
18///
19/// There is no need to replace actual register operands because encoding of the
20/// high and low VGPRs is the same. I.e. v0 has the encoding 0x100, so does
21/// v256. v1 has the encoding 0x101 and v257 has the same encoding. So high
22/// VGPRs will survive until actual encoding and will result in a same actual
23/// bit encoding.
24///
25/// As a result the pass only inserts S_SET_VGPR_MSB to provide an actual offset
26/// to a VGPR address of the subseqent instructions. The InstPrinter will take
27/// care of the printing a low VGPR instead of a high one. In prinicple this
28/// shall be viable to print actual high VGPR numbers, but that would disagree
29/// with a disasm printing and create a situation where asm text is not
30/// deterministic.
31///
32/// This pass creates a convention where non-fall through basic blocks shall
33/// start with all 4 MSBs zero. Otherwise a disassembly would not be readable.
34/// An optimization here is possible but deemed not desirable because of the
35/// readbility concerns.
36///
37/// Consequentially the ABI is set to expect all 4 MSBs to be zero on entry.
38/// The pass must run very late in the pipeline to make sure no changes to VGPR
39/// operands will be made after it.
40//
41//===----------------------------------------------------------------------===//
42
44#include "AMDGPU.h"
45#include "GCNSubtarget.h"
47#include "SIDefines.h"
48#include "SIInstrInfo.h"
50#include "llvm/ADT/bit.h"
52
53using namespace llvm;
54
55#define DEBUG_TYPE "amdgpu-lower-vgpr-encoding"
56
57namespace {
58
59class AMDGPULowerVGPREncoding {
60 static constexpr unsigned OpNum = 4;
61 static constexpr unsigned BitsPerField = 2;
62 static constexpr unsigned NumFields = 4;
63 static constexpr unsigned FieldMask = (1 << BitsPerField) - 1;
64 static constexpr unsigned ModeWidth = NumFields * BitsPerField;
65 static constexpr unsigned ModeMask = (1 << ModeWidth) - 1;
66 using ModeType = PackedVector<unsigned, BitsPerField,
67 std::bitset<BitsPerField * NumFields>>;
68
69 static constexpr unsigned VGPRMSBShift =
71
72 class ModeTy : public ModeType {
73 public:
74 // bitset constructor will set all bits to zero
75 ModeTy() : ModeType(0) {}
76
77 operator int64_t() const { return raw_bits().to_ulong(); }
78
79 static ModeTy fullMask() {
80 ModeTy M;
81 M.raw_bits().flip();
82 return M;
83 }
84 };
85
86public:
87 bool run(MachineFunction &MF);
88
89private:
90 const SIInstrInfo *TII;
91 const SIRegisterInfo *TRI;
92
93 // Current basic block.
95
96 /// Most recent s_set_* instruction.
97 MachineInstr *MostRecentModeSet;
98
99 /// Current mode bits.
100 ModeTy CurrentMode;
101
102 /// Current mask of mode bits that instructions since MostRecentModeSet care
103 /// about.
104 ModeTy CurrentMask;
105
106 /// Number of current hard clause instructions.
107 unsigned ClauseLen;
108
109 /// Number of hard clause instructions remaining.
110 unsigned ClauseRemaining;
111
112 /// Clause group breaks.
113 unsigned ClauseBreaks;
114
115 /// Last hard clause instruction.
117
118 /// Insert mode change before \p I. \returns true if mode was changed.
119 bool setMode(ModeTy NewMode, ModeTy Mask,
121
122 /// Reset mode to default.
123 void resetMode(MachineBasicBlock::instr_iterator I) {
124 setMode(ModeTy(), ModeTy::fullMask(), I);
125 }
126
127 /// If \p MO references VGPRs, return the MSBs. Otherwise, return nullopt.
128 std::optional<unsigned> getMSBs(const MachineOperand &MO) const;
129
130 /// Handle single \p MI. \return true if changed.
131 bool runOnMachineInstr(MachineInstr &MI);
132
133 /// Compute the mode and mode mask for a single \p MI given \p Ops operands
134 /// bit mapping. Optionally takes second array \p Ops2 for VOPD.
135 /// If provided and an operand from \p Ops is not a VGPR, then \p Ops2
136 /// is checked.
137 void computeMode(ModeTy &NewMode, ModeTy &Mask, MachineInstr &MI,
138 const AMDGPU::OpName Ops[OpNum],
139 const AMDGPU::OpName *Ops2 = nullptr);
140
141 /// Check if an instruction \p I is within a clause and returns a suitable
142 /// iterator to insert mode change. It may also modify the S_CLAUSE
143 /// instruction to extend it or drop the clause if it cannot be adjusted.
146
147 /// Check if an instruction \p I is immediately after another program state
148 /// instruction which it cannot coissue with. If so, insert before that
149 /// instruction to encourage more coissuing.
152
153 /// Handle S_SETREG_IMM32_B32 targeting MODE register. On certain hardware,
154 /// this instruction clobbers VGPR MSB bits[12:19], so we need to restore
155 /// the current mode. \returns true if the instruction was modified or a
156 /// new one was inserted.
157 bool handleSetregMode(MachineInstr &MI);
158
159 /// Update bits[12:19] of the imm operand in S_SETREG_IMM32_B32 to contain
160 /// the VGPR MSB mode value. \returns true if the immediate was changed.
161 bool updateSetregModeImm(MachineInstr &MI, int64_t ModeValue);
162};
163
164bool AMDGPULowerVGPREncoding::setMode(ModeTy NewMode, ModeTy Mask,
166 assert((NewMode.raw_bits() & ~Mask.raw_bits()).none());
167
168 auto Delta = NewMode.raw_bits() ^ CurrentMode.raw_bits();
169
170 if ((Delta & Mask.raw_bits()).none()) {
171 CurrentMask |= Mask;
172 return false;
173 }
174
175 if (MostRecentModeSet && (Delta & CurrentMask.raw_bits()).none()) {
176 CurrentMode |= NewMode;
177 CurrentMask |= Mask;
178
179 // Update MostRecentModeSet with the new mode. It can be either
180 // S_SET_VGPR_MSB or S_SETREG_IMM32_B32 (with Size <= 12).
181 if (MostRecentModeSet->getOpcode() == AMDGPU::S_SET_VGPR_MSB) {
182 MachineOperand &Op = MostRecentModeSet->getOperand(0);
183 // Carry old mode bits from the existing instruction.
184 int64_t OldModeBits = Op.getImm() & (ModeMask << ModeWidth);
185 Op.setImm(CurrentMode | OldModeBits);
186 } else {
187 assert(MostRecentModeSet->getOpcode() == AMDGPU::S_SETREG_IMM32_B32 &&
188 "unexpected MostRecentModeSet opcode");
189 updateSetregModeImm(*MostRecentModeSet, CurrentMode);
190 }
191
192 return true;
193 }
194
195 // Record previous mode into high 8 bits of the immediate.
196 int64_t OldModeBits = CurrentMode << ModeWidth;
197
198 I = handleClause(I);
199 I = handleCoissue(I);
200 MostRecentModeSet = BuildMI(*MBB, I, {}, TII->get(AMDGPU::S_SET_VGPR_MSB))
201 .addImm(NewMode | OldModeBits);
202
203 CurrentMode = NewMode;
204 CurrentMask = Mask;
205 return true;
206}
207
208std::optional<unsigned>
209AMDGPULowerVGPREncoding::getMSBs(const MachineOperand &MO) const {
210 if (!MO.isReg())
211 return std::nullopt;
212
213 MCRegister Reg = MO.getReg();
214 const TargetRegisterClass *RC = TRI->getPhysRegBaseClass(Reg);
215 if (!RC || !TRI->isVGPRClass(RC))
216 return std::nullopt;
217
218 unsigned Idx = TRI->getHWRegIndex(Reg);
219 return Idx >> 8;
220}
221
222void AMDGPULowerVGPREncoding::computeMode(ModeTy &NewMode, ModeTy &Mask,
224 const AMDGPU::OpName Ops[OpNum],
225 const AMDGPU::OpName *Ops2) {
226 NewMode = {};
227 Mask = {};
228
229 for (unsigned I = 0; I < OpNum; ++I) {
230 MachineOperand *Op = TII->getNamedOperand(MI, Ops[I]);
231
232 std::optional<unsigned> MSBits;
233 if (Op)
234 MSBits = getMSBs(*Op);
235
236#if !defined(NDEBUG)
237 if (MSBits.has_value() && Ops2) {
238 auto Op2 = TII->getNamedOperand(MI, Ops2[I]);
239 if (Op2) {
240 std::optional<unsigned> MSBits2;
241 MSBits2 = getMSBs(*Op2);
242 if (MSBits2.has_value() && MSBits != MSBits2)
243 llvm_unreachable("Invalid VOPD pair was created");
244 }
245 }
246#endif
247
248 if (!MSBits.has_value() && Ops2) {
249 Op = TII->getNamedOperand(MI, Ops2[I]);
250 if (Op)
251 MSBits = getMSBs(*Op);
252 }
253
254 if (!MSBits.has_value())
255 continue;
256
257 // Skip tied uses of src2 of VOP2, these will be handled along with defs and
258 // only vdst bit affects these operands. We cannot skip tied uses of VOP3,
259 // these uses are real even if must match the vdst.
260 if (Ops[I] == AMDGPU::OpName::src2 && !Op->isDef() && Op->isTied() &&
263 TII->hasVALU32BitEncoding(MI.getOpcode()))))
264 continue;
265
266 NewMode[I] = MSBits.value();
267 Mask[I] = FieldMask;
268 }
269}
270
271bool AMDGPULowerVGPREncoding::runOnMachineInstr(MachineInstr &MI) {
273 if (Ops.first) {
274 ModeTy NewMode, Mask;
275 computeMode(NewMode, Mask, MI, Ops.first, Ops.second);
276 return setMode(NewMode, Mask, MI.getIterator());
277 }
278 assert(!TII->hasVGPRUses(MI) || MI.isMetaInstruction() || MI.isPseudo());
279
280 return false;
281}
282
284AMDGPULowerVGPREncoding::handleClause(MachineBasicBlock::instr_iterator I) {
285 if (!ClauseRemaining)
286 return I;
287
288 // A clause cannot start with a special instruction, place it right before
289 // the clause.
290 if (ClauseRemaining == ClauseLen) {
291 I = Clause->getPrevNode()->getIterator();
292 assert(I->isBundle());
293 return I;
294 }
295
296 // If a clause defines breaks each group cannot start with a mode change.
297 // just drop the clause.
298 if (ClauseBreaks) {
299 Clause->eraseFromBundle();
300 ClauseRemaining = 0;
301 return I;
302 }
303
304 // Otherwise adjust a number of instructions in the clause if it fits.
305 // If it does not clause will just become shorter. Since the length
306 // recorded in the clause is one less, increment the length after the
307 // update. Note that SIMM16[5:0] must be 1-62, not 0 or 63.
308 if (ClauseLen < 63)
309 Clause->getOperand(0).setImm(ClauseLen | (ClauseBreaks << 8));
310
311 ++ClauseLen;
312
313 return I;
314}
315
317AMDGPULowerVGPREncoding::handleCoissue(MachineBasicBlock::instr_iterator I) {
318 if (I.isEnd())
319 return I;
320
321 if (I == I->getParent()->begin())
322 return I;
323
324 MachineBasicBlock::instr_iterator Prev = std::prev(I);
325 auto isProgramStateSALU = [this](MachineInstr *MI) {
326 return TII->isBarrier(MI->getOpcode()) ||
327 TII->isWaitcnt(MI || (SIInstrInfo::isProgramStateSALU(*MI) &&
328 MI->getOpcode() != AMDGPU::S_SET_VGPR_MSB));
329 };
330
331 if (!isProgramStateSALU(&*Prev))
332 return I;
333
334 while (!Prev.isEnd() && (Prev != Prev->getParent()->begin()) &&
335 isProgramStateSALU(&*Prev)) {
336 --Prev;
337 }
338 return Prev;
339}
340
341/// Convert mode value from S_SET_VGPR_MSB format to MODE register format.
342/// S_SET_VGPR_MSB uses: (src0[0-1], src1[2-3], src2[4-5], dst[6-7])
343/// MODE register uses: (dst[0-1], src0[2-3], src1[4-5], src2[6-7])
344/// This is a left rotation by 2 bits on an 8-bit value.
345static int64_t convertModeToSetregFormat(int64_t Mode) {
346 assert(isUInt<8>(Mode) && "Mode expected to be 8-bit");
347 return llvm::rotl<uint8_t>(static_cast<uint8_t>(Mode), /*R=*/2);
348}
349
350bool AMDGPULowerVGPREncoding::updateSetregModeImm(MachineInstr &MI,
351 int64_t ModeValue) {
352 assert(MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32);
353
354 // Convert from S_SET_VGPR_MSB format to MODE register format
355 int64_t SetregMode = convertModeToSetregFormat(ModeValue);
356
357 MachineOperand *ImmOp = TII->getNamedOperand(MI, AMDGPU::OpName::imm);
358 int64_t OldImm = ImmOp->getImm();
359 int64_t NewImm =
360 (OldImm & ~AMDGPU::Hwreg::VGPR_MSB_MASK) | (SetregMode << VGPRMSBShift);
361 ImmOp->setImm(NewImm);
362 return NewImm != OldImm;
363}
364
365bool AMDGPULowerVGPREncoding::handleSetregMode(MachineInstr &MI) {
366 using namespace AMDGPU::Hwreg;
367
368 assert(MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 &&
369 "only S_SETREG_IMM32_B32 needs to be handled");
370
371 MachineOperand *SIMM16Op = TII->getNamedOperand(MI, AMDGPU::OpName::simm16);
372 assert(SIMM16Op && "SIMM16Op must be present");
373
374 auto [HwRegId, Offset, Size] = HwregEncoding::decode(SIMM16Op->getImm());
375 (void)Offset;
376 if (HwRegId != ID_MODE)
377 return false;
378
379 int64_t ModeValue = static_cast<int64_t>(CurrentMode);
380
381 // Case 1: Size <= 12 - the original instruction uses imm32[0:Size-1], so
382 // imm32[12:19] is unused. Safe to set imm32[12:19] to the correct VGPR
383 // MSBs.
384 if (Size <= VGPRMSBShift) {
385 // This instruction now acts as MostRecentModeSet so it can be updated if
386 // CurrentMode changes via piggybacking.
387 MostRecentModeSet = &MI;
388 return updateSetregModeImm(MI, ModeValue);
389 }
390
391 // Case 2: Size > 12 - the original instruction uses bits beyond 11, so we
392 // cannot arbitrarily modify imm32[12:19]. Check if it already matches VGPR
393 // MSBs. Note: imm32[12:19] is in MODE register format, while ModeValue is
394 // in S_SET_VGPR_MSB format, so we need to convert before comparing.
395 MachineOperand *ImmOp = TII->getNamedOperand(MI, AMDGPU::OpName::imm);
396 assert(ImmOp && "ImmOp must be present");
397 int64_t ImmBits12To19 = (ImmOp->getImm() & VGPR_MSB_MASK) >> VGPRMSBShift;
398 int64_t SetregModeValue = convertModeToSetregFormat(ModeValue);
399 if (ImmBits12To19 == SetregModeValue) {
400 // Already correct, but we must invalidate MostRecentModeSet because this
401 // instruction will overwrite mode[12:19]. We can't update this instruction
402 // via piggybacking (bits[12:19] are meaningful), so if CurrentMode changes,
403 // a new s_set_vgpr_msb will be inserted after this instruction.
404 MostRecentModeSet = nullptr;
405 return false;
406 }
407
408 // imm32[12:19] doesn't match VGPR MSBs - insert s_set_vgpr_msb after
409 // the original instruction to restore the correct value.
410 MachineBasicBlock::iterator InsertPt = std::next(MI.getIterator());
411 MostRecentModeSet = BuildMI(*MBB, InsertPt, MI.getDebugLoc(),
412 TII->get(AMDGPU::S_SET_VGPR_MSB))
413 .addImm(ModeValue);
414 return true;
415}
416
417bool AMDGPULowerVGPREncoding::run(MachineFunction &MF) {
418 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
419 if (!ST.has1024AddressableVGPRs())
420 return false;
421
422 TII = ST.getInstrInfo();
423 TRI = ST.getRegisterInfo();
424
425 bool Changed = false;
426 ClauseLen = ClauseRemaining = 0;
427 CurrentMode.reset();
428 CurrentMask.reset();
429 for (auto &MBB : MF) {
430 MostRecentModeSet = nullptr;
431 this->MBB = &MBB;
432
433 for (auto &MI : llvm::make_early_inc_range(MBB.instrs())) {
434 if (MI.isMetaInstruction())
435 continue;
436
437 if (MI.isTerminator() || MI.isCall()) {
438 if (MI.getOpcode() == AMDGPU::S_ENDPGM ||
439 MI.getOpcode() == AMDGPU::S_ENDPGM_SAVED)
440 CurrentMode.reset();
441 else
442 resetMode(MI.getIterator());
443 continue;
444 }
445
446 if (MI.isInlineAsm()) {
447 if (TII->hasVGPRUses(MI))
448 resetMode(MI.getIterator());
449 continue;
450 }
451
452 if (MI.getOpcode() == AMDGPU::S_CLAUSE) {
453 assert(!ClauseRemaining && "Nested clauses are not supported");
454 ClauseLen = MI.getOperand(0).getImm();
455 ClauseBreaks = (ClauseLen >> 8) & 15;
456 ClauseLen = ClauseRemaining = (ClauseLen & 63) + 1;
457 Clause = &MI;
458 continue;
459 }
460
461 if (MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 &&
462 ST.hasSetregVGPRMSBFixup()) {
463 Changed |= handleSetregMode(MI);
464 continue;
465 }
466
467 Changed |= runOnMachineInstr(MI);
468
469 if (ClauseRemaining)
470 --ClauseRemaining;
471 }
472
473 // Reset the mode if we are falling through.
474 resetMode(MBB.instr_end());
475 }
476
477 return Changed;
478}
479
480class AMDGPULowerVGPREncodingLegacy : public MachineFunctionPass {
481public:
482 static char ID;
483
484 AMDGPULowerVGPREncodingLegacy() : MachineFunctionPass(ID) {}
485
486 bool runOnMachineFunction(MachineFunction &MF) override {
487 return AMDGPULowerVGPREncoding().run(MF);
488 }
489
490 void getAnalysisUsage(AnalysisUsage &AU) const override {
491 AU.setPreservesCFG();
493 }
494};
495
496} // namespace
497
498char AMDGPULowerVGPREncodingLegacy::ID = 0;
499
500char &llvm::AMDGPULowerVGPREncodingLegacyID = AMDGPULowerVGPREncodingLegacy::ID;
501
502INITIALIZE_PASS(AMDGPULowerVGPREncodingLegacy, DEBUG_TYPE,
503 "AMDGPU Lower VGPR Encoding", false, false)
504
508 if (!AMDGPULowerVGPREncoding().run(MF))
509 return PreservedAnalyses::all();
510
512}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Provides AMDGPU specific target descriptions.
MachineBasicBlock & MBB
AMD GCN specific subclass of TargetSubtarget.
#define DEBUG_TYPE
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Register const TargetRegisterInfo * TRI
This file implements the PackedVector class.
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
Interface definition for SIInstrInfo.
This file implements the C++20 <bit> header.
Represent the analysis usage information of a pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
Instructions::iterator instr_iterator
MachineInstrBundleIterator< MachineInstr > iterator
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
Store a vector of values using a specific number of bits for each value.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
static bool isVOP2(const MachineInstr &MI)
static bool isProgramStateSALU(const MachineInstr &MI)
static bool isVOP3(const MCInstrDesc &Desc)
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
std::pair< const AMDGPU::OpName *, const AMDGPU::OpName * > getVGPRLoweringOperandTables(const MCInstrDesc &Desc)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
DWARFExpression::Operation Op
constexpr int countr_zero_constexpr(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:188
char & AMDGPULowerVGPREncodingLegacyID
constexpr T rotl(T V, int R)
Definition bit.h:369