31#include "llvm/IR/IntrinsicsAArch64.h"
36#define GET_TARGET_REGBANK_IMPL
37#include "AArch64GenRegisterBank.inc"
40#include "AArch64GenRegisterBankInfo.def"
48 static auto InitializeRegisterBankOnce = [&]() {
57 assert(&AArch64::GPRRegBank == &RBGPR &&
58 "The order in RegBanks is messed up");
62 assert(&AArch64::FPRRegBank == &RBFPR &&
63 "The order in RegBanks is messed up");
67 assert(&AArch64::CCRegBank == &RBCCR &&
68 "The order in RegBanks is messed up");
73 "Subclass not added?");
74 assert(RBGPR.
getSize() == 128 &&
"GPRs should hold up to 128-bit");
79 "Subclass not added?");
81 "Subclass not added?");
83 "FPRs should hold up to 512-bit via QQQQ sequence");
87 assert(RBCCR.
getSize() == 32 &&
"CCR should hold up to 32-bit");
93 "PartialMappingIdx's are incorrectly ordered");
97 "PartialMappingIdx's are incorrectly ordered");
100#define CHECK_PARTIALMAP(Idx, ValStartIdx, ValLength, RB) \
103 checkPartialMap(PartialMappingIdx::Idx, ValStartIdx, ValLength, RB) && \
104 #Idx " is incorrectly initialized"); \
118#define CHECK_VALUEMAP_IMPL(RBName, Size, Offset) \
120 assert(checkValueMapImpl(PartialMappingIdx::PMI_##RBName##Size, \
121 PartialMappingIdx::PMI_First##RBName, Size, \
123 #RBName #Size " " #Offset " is incorrectly initialized"); \
126#define CHECK_VALUEMAP(RBName, Size) CHECK_VALUEMAP_IMPL(RBName, Size, 0)
140#define CHECK_VALUEMAP_3OPS(RBName, Size) \
142 CHECK_VALUEMAP_IMPL(RBName, Size, 0); \
143 CHECK_VALUEMAP_IMPL(RBName, Size, 1); \
144 CHECK_VALUEMAP_IMPL(RBName, Size, 2); \
156#define CHECK_VALUEMAP_CROSSREGCPY(RBNameDst, RBNameSrc, Size) \
158 unsigned PartialMapDstIdx = PMI_##RBNameDst##Size - PMI_Min; \
159 unsigned PartialMapSrcIdx = PMI_##RBNameSrc##Size - PMI_Min; \
160 (void)PartialMapDstIdx; \
161 (void)PartialMapSrcIdx; \
162 const ValueMapping *Map = getCopyMapping( \
163 AArch64::RBNameDst##RegBankID, AArch64::RBNameSrc##RegBankID, Size); \
165 assert(Map[0].BreakDown == \
166 &AArch64GenRegisterBankInfo::PartMappings[PartialMapDstIdx] && \
167 Map[0].NumBreakDowns == 1 && #RBNameDst #Size \
168 " Dst is incorrectly initialized"); \
169 assert(Map[1].BreakDown == \
170 &AArch64GenRegisterBankInfo::PartMappings[PartialMapSrcIdx] && \
171 Map[1].NumBreakDowns == 1 && #RBNameSrc #Size \
172 " Src is incorrectly initialized"); \
185#define CHECK_VALUEMAP_FPEXT(DstSize, SrcSize) \
187 unsigned PartialMapDstIdx = PMI_FPR##DstSize - PMI_Min; \
188 unsigned PartialMapSrcIdx = PMI_FPR##SrcSize - PMI_Min; \
189 (void)PartialMapDstIdx; \
190 (void)PartialMapSrcIdx; \
191 const ValueMapping *Map = getFPExtMapping(DstSize, SrcSize); \
193 assert(Map[0].BreakDown == \
194 &AArch64GenRegisterBankInfo::PartMappings[PartialMapDstIdx] && \
195 Map[0].NumBreakDowns == 1 && "FPR" #DstSize \
196 " Dst is incorrectly initialized"); \
197 assert(Map[1].BreakDown == \
198 &AArch64GenRegisterBankInfo::PartMappings[PartialMapSrcIdx] && \
199 Map[1].NumBreakDowns == 1 && "FPR" #SrcSize \
200 " Src is incorrectly initialized"); \
212 llvm::call_once(InitializeRegisterBankFlag, InitializeRegisterBankOnce);
217 unsigned Size)
const {
226 if (&
A == &AArch64::GPRRegBank && &
B == &AArch64::FPRRegBank)
229 if (&
A == &AArch64::FPRRegBank && &
B == &AArch64::GPRRegBank)
239 switch (RC.
getID()) {
240 case AArch64::FPR8RegClassID:
241 case AArch64::FPR16RegClassID:
242 case AArch64::FPR16_loRegClassID:
243 case AArch64::FPR32_with_hsub_in_FPR16_loRegClassID:
244 case AArch64::FPR32RegClassID:
245 case AArch64::FPR64RegClassID:
246 case AArch64::FPR64_loRegClassID:
247 case AArch64::FPR128RegClassID:
248 case AArch64::FPR128_loRegClassID:
249 case AArch64::DDRegClassID:
250 case AArch64::DDDRegClassID:
251 case AArch64::DDDDRegClassID:
252 case AArch64::QQRegClassID:
253 case AArch64::QQQRegClassID:
254 case AArch64::QQQQRegClassID:
256 case AArch64::GPR32commonRegClassID:
257 case AArch64::GPR32RegClassID:
258 case AArch64::GPR32spRegClassID:
259 case AArch64::GPR32sponlyRegClassID:
260 case AArch64::GPR32argRegClassID:
261 case AArch64::GPR32allRegClassID:
262 case AArch64::GPR64commonRegClassID:
263 case AArch64::GPR64RegClassID:
264 case AArch64::GPR64spRegClassID:
265 case AArch64::GPR64sponlyRegClassID:
266 case AArch64::GPR64argRegClassID:
267 case AArch64::GPR64allRegClassID:
268 case AArch64::GPR64noipRegClassID:
269 case AArch64::GPR64common_and_GPR64noipRegClassID:
270 case AArch64::GPR64noip_and_tcGPR64RegClassID:
271 case AArch64::tcGPR64RegClassID:
272 case AArch64::rtcGPR64RegClassID:
273 case AArch64::WSeqPairsClassRegClassID:
274 case AArch64::XSeqPairsClassRegClassID:
275 case AArch64::MatrixIndexGPR32_8_11RegClassID:
276 case AArch64::MatrixIndexGPR32_12_15RegClassID:
277 case AArch64::GPR64_with_sub_32_in_MatrixIndexGPR32_8_11RegClassID:
278 case AArch64::GPR64_with_sub_32_in_MatrixIndexGPR32_12_15RegClassID:
280 case AArch64::CCRRegClassID:
295 switch (
MI.getOpcode()) {
296 case TargetOpcode::G_OR: {
305 if (
MI.getNumOperands() != 3)
319 case TargetOpcode::G_BITCAST: {
326 if (
MI.getNumOperands() != 2)
340 copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank,
Size),
345 copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank,
Size),
355 case TargetOpcode::G_LOAD: {
362 if (
MI.getNumOperands() != 2)
389void AArch64RegisterBankInfo::applyMappingImpl(
390 const OperandsMapper &OpdMapper)
const {
391 switch (OpdMapper.getMI().getOpcode()) {
392 case TargetOpcode::G_OR:
393 case TargetOpcode::G_BITCAST:
394 case TargetOpcode::G_LOAD:
396 assert((OpdMapper.getInstrMapping().getID() >= 1 &&
397 OpdMapper.getInstrMapping().getID() <= 4) &&
398 "Don't know how to handle that ID");
409 case TargetOpcode::G_FADD:
410 case TargetOpcode::G_FSUB:
411 case TargetOpcode::G_FMUL:
412 case TargetOpcode::G_FMA:
413 case TargetOpcode::G_FDIV:
414 case TargetOpcode::G_FCONSTANT:
415 case TargetOpcode::G_FPEXT:
416 case TargetOpcode::G_FPTRUNC:
417 case TargetOpcode::G_FCEIL:
418 case TargetOpcode::G_FFLOOR:
419 case TargetOpcode::G_FNEARBYINT:
420 case TargetOpcode::G_FNEG:
421 case TargetOpcode::G_FCOS:
422 case TargetOpcode::G_FSIN:
423 case TargetOpcode::G_FLOG10:
424 case TargetOpcode::G_FLOG:
425 case TargetOpcode::G_FLOG2:
426 case TargetOpcode::G_FSQRT:
427 case TargetOpcode::G_FABS:
428 case TargetOpcode::G_FEXP:
429 case TargetOpcode::G_FRINT:
430 case TargetOpcode::G_INTRINSIC_TRUNC:
431 case TargetOpcode::G_INTRINSIC_ROUND:
432 case TargetOpcode::G_FMAXNUM:
433 case TargetOpcode::G_FMINNUM:
434 case TargetOpcode::G_FMAXIMUM:
435 case TargetOpcode::G_FMINIMUM:
442AArch64RegisterBankInfo::getSameKindOfOperandsMapping(
444 const unsigned Opc =
MI.getOpcode();
448 unsigned NumOperands =
MI.getNumOperands();
449 assert(NumOperands <= 3 &&
450 "This code is for instructions with 3 or less operands");
452 LLT Ty =
MRI.getType(
MI.getOperand(0).getReg());
467 for (
unsigned Idx = 1;
Idx != NumOperands; ++
Idx) {
468 LLT OpTy =
MRI.getType(
MI.getOperand(
Idx).getReg());
473 "Operand has incompatible size");
476 assert(IsFPR == OpIsFPR &&
"Operand has incompatible type");
490 case Intrinsic::aarch64_neon_uaddlv:
495bool AArch64RegisterBankInfo::hasFPConstraints(
const MachineInstr &
MI,
498 unsigned Depth)
const {
499 unsigned Op =
MI.getOpcode();
500 if (Op == TargetOpcode::G_INTRINSIC &&
isFPIntrinsic(
MI.getIntrinsicID()))
509 if (Op != TargetOpcode::COPY && !
MI.isPHI() &&
515 if (RB == &AArch64::FPRRegBank)
517 if (RB == &AArch64::GPRRegBank)
524 if (!
MI.isPHI() ||
Depth > MaxFPRSearchDepth)
529 onlyDefinesFP(*MRI.getVRegDef(Op.getReg()), MRI, TRI, Depth + 1);
536 unsigned Depth)
const {
537 switch (
MI.getOpcode()) {
538 case TargetOpcode::G_FPTOSI:
539 case TargetOpcode::G_FPTOUI:
540 case TargetOpcode::G_FCMP:
541 case TargetOpcode::G_LROUND:
542 case TargetOpcode::G_LLROUND:
550bool AArch64RegisterBankInfo::onlyDefinesFP(
const MachineInstr &
MI,
553 unsigned Depth)
const {
554 switch (
MI.getOpcode()) {
556 case TargetOpcode::G_SITOFP:
557 case TargetOpcode::G_UITOFP:
558 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
559 case TargetOpcode::G_INSERT_VECTOR_ELT:
560 case TargetOpcode::G_BUILD_VECTOR:
561 case TargetOpcode::G_BUILD_VECTOR_TRUNC:
571 const unsigned Opc =
MI.getOpcode();
576 Opc == TargetOpcode::G_PHI) {
591 case TargetOpcode::G_ADD:
592 case TargetOpcode::G_SUB:
593 case TargetOpcode::G_PTR_ADD:
594 case TargetOpcode::G_MUL:
595 case TargetOpcode::G_SDIV:
596 case TargetOpcode::G_UDIV:
598 case TargetOpcode::G_AND:
599 case TargetOpcode::G_OR:
600 case TargetOpcode::G_XOR:
602 case TargetOpcode::G_FADD:
603 case TargetOpcode::G_FSUB:
604 case TargetOpcode::G_FMUL:
605 case TargetOpcode::G_FDIV:
606 case TargetOpcode::G_FMAXIMUM:
607 case TargetOpcode::G_FMINIMUM:
608 return getSameKindOfOperandsMapping(
MI);
609 case TargetOpcode::G_FPEXT: {
610 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
611 LLT SrcTy =
MRI.getType(
MI.getOperand(1).getReg());
618 case TargetOpcode::G_SHL:
619 case TargetOpcode::G_LSHR:
620 case TargetOpcode::G_ASHR: {
621 LLT ShiftAmtTy =
MRI.getType(
MI.getOperand(2).getReg());
622 LLT SrcTy =
MRI.getType(
MI.getOperand(1).getReg());
626 return getSameKindOfOperandsMapping(
MI);
628 case TargetOpcode::COPY: {
632 if ((DstReg.
isPhysical() || !
MRI.getType(DstReg).isValid()) ||
642 assert(DstRB && SrcRB &&
"Both RegBank were nullptr");
653 case TargetOpcode::G_BITCAST: {
654 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
655 LLT SrcTy =
MRI.getType(
MI.getOperand(1).getReg());
660 DstIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
662 SrcIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
667 Opc == TargetOpcode::G_BITCAST ? 2 : 1);
673 unsigned NumOperands =
MI.getNumOperands();
678 for (
unsigned Idx = 0;
Idx < NumOperands; ++
Idx) {
679 auto &MO =
MI.getOperand(
Idx);
680 if (!MO.isReg() || !MO.getReg())
683 LLT Ty =
MRI.getType(MO.getReg());
701 case AArch64::G_DUP: {
702 Register ScalarReg =
MI.getOperand(1).getReg();
703 LLT ScalarTy =
MRI.getType(ScalarReg);
704 auto ScalarDef =
MRI.getVRegDef(ScalarReg);
708 onlyDefinesFP(*ScalarDef,
MRI,
TRI)))
714 case TargetOpcode::G_TRUNC: {
715 LLT SrcTy =
MRI.getType(
MI.getOperand(1).getReg());
720 case TargetOpcode::G_SITOFP:
721 case TargetOpcode::G_UITOFP: {
722 if (
MRI.getType(
MI.getOperand(0).getReg()).isVector())
733 case TargetOpcode::G_FPTOSI:
734 case TargetOpcode::G_FPTOUI:
735 if (
MRI.getType(
MI.getOperand(0).getReg()).isVector())
739 case TargetOpcode::G_FCMP: {
744 OpRegBankIdx = {Idx0,
748 case TargetOpcode::G_BITCAST:
750 if (OpRegBankIdx[0] != OpRegBankIdx[1])
756 case TargetOpcode::G_LOAD:
768 if (cast<GLoad>(
MI).isAtomic()) {
777 if (
any_of(
MRI.use_nodbg_instructions(
MI.getOperand(0).getReg()),
786 return onlyUsesFP(UseMI, MRI, TRI) ||
787 onlyDefinesFP(UseMI, MRI, TRI);
791 case TargetOpcode::G_STORE:
803 case TargetOpcode::G_SELECT: {
810 LLT SrcTy =
MRI.getType(
MI.getOperand(2).getReg());
827 if (
any_of(
MRI.use_nodbg_instructions(
MI.getOperand(0).getReg()),
859 case TargetOpcode::G_UNMERGE_VALUES: {
865 LLT SrcTy =
MRI.getType(
MI.getOperand(
MI.getNumOperands()-1).getReg());
869 any_of(
MRI.use_nodbg_instructions(
MI.getOperand(0).getReg()),
872 for (
unsigned Idx = 0, NumOperands =
MI.getNumOperands();
878 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
886 case TargetOpcode::G_INSERT_VECTOR_ELT:
899 case TargetOpcode::G_EXTRACT: {
901 auto Src =
MI.getOperand(1).getReg();
902 LLT SrcTy =
MRI.getType(
MI.getOperand(1).getReg());
905 auto Idx =
MRI.getRegClassOrNull(Src) == &AArch64::XSeqPairsClassRegClass
908 OpRegBankIdx[0] =
Idx;
909 OpRegBankIdx[1] =
Idx;
912 case TargetOpcode::G_BUILD_VECTOR: {
928 const LLT SrcTy =
MRI.getType(VReg);
930 return Op.isDef() || MRI.getVRegDef(Op.getReg())->getOpcode() ==
931 TargetOpcode::G_CONSTANT;
939 unsigned NumOperands =
MI.getNumOperands();
940 for (
unsigned Idx = 0;
Idx < NumOperands; ++
Idx)
945 case TargetOpcode::G_VECREDUCE_FADD:
946 case TargetOpcode::G_VECREDUCE_FMUL:
947 case TargetOpcode::G_VECREDUCE_FMAX:
948 case TargetOpcode::G_VECREDUCE_FMIN:
949 case TargetOpcode::G_VECREDUCE_ADD:
950 case TargetOpcode::G_VECREDUCE_MUL:
951 case TargetOpcode::G_VECREDUCE_AND:
952 case TargetOpcode::G_VECREDUCE_OR:
953 case TargetOpcode::G_VECREDUCE_XOR:
954 case TargetOpcode::G_VECREDUCE_SMAX:
955 case TargetOpcode::G_VECREDUCE_SMIN:
956 case TargetOpcode::G_VECREDUCE_UMAX:
957 case TargetOpcode::G_VECREDUCE_UMIN:
962 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
963 case TargetOpcode::G_VECREDUCE_SEQ_FMUL:
968 case TargetOpcode::G_INTRINSIC: {
971 unsigned ID =
MI.getIntrinsicID();
975 for (
const auto &Op :
MI.explicit_operands()) {
982 case TargetOpcode::G_LROUND:
983 case TargetOpcode::G_LLROUND: {
992 for (
unsigned Idx = 0;
Idx < NumOperands; ++
Idx) {
993 if (
MI.getOperand(
Idx).isReg() &&
MI.getOperand(
Idx).getReg()) {
998 if (!Mapping->isValid())
1001 OpdsMapping[
Idx] = Mapping;
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
#define CHECK_VALUEMAP(RBName, Size)
#define CHECK_VALUEMAP_3OPS(RBName, Size)
static bool isPreISelGenericFloatingPointOpcode(unsigned Opc)
Returns whether opcode Opc is a pre-isel generic floating-point opcode, having only floating-point op...
#define CHECK_PARTIALMAP(Idx, ValStartIdx, ValLength, RB)
#define CHECK_VALUEMAP_CROSSREGCPY(RBNameDst, RBNameSrc, Size)
static bool isFPIntrinsic(unsigned ID)
#define CHECK_VALUEMAP_FPEXT(DstSize, SrcSize)
This file declares the targeting of the RegisterBankInfo class for AArch64.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
Implement a low-level type suitable for MachineInstr level instruction selection.
unsigned const TargetRegisterInfo * TRI
static const MCPhysReg FPR[]
FPR - The set of FP registers that should be allocated for arguments on Darwin and AIX.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
static RegisterBankInfo::PartialMapping PartMappings[]
static const RegisterBankInfo::ValueMapping * getValueMapping(PartialMappingIdx RBIdx, unsigned Size)
Get the pointer to the ValueMapping representing the RegisterBank at RBIdx with a size of Size.
static bool checkPartialMappingIdx(PartialMappingIdx FirstAlias, PartialMappingIdx LastAlias, ArrayRef< PartialMappingIdx > Order)
static RegisterBankInfo::ValueMapping ValMappings[]
static unsigned getRegBankBaseIdxOffset(unsigned RBIdx, unsigned Size)
static const RegisterBankInfo::ValueMapping * getFPExtMapping(unsigned DstSize, unsigned SrcSize)
Get the instruction mapping for G_FPEXT.
static const RegisterBankInfo::ValueMapping * getCopyMapping(unsigned DstBankID, unsigned SrcBankID, unsigned Size)
Get the pointer to the ValueMapping of the operands of a copy instruction from the SrcBankID register...
const RegisterBank & getRegBankFromRegClass(const TargetRegisterClass &RC, LLT) const override
Get a register bank that covers RC.
InstructionMappings getInstrAlternativeMappings(const MachineInstr &MI) const override
Get the alternative mappings for MI.
AArch64RegisterBankInfo(const TargetRegisterInfo &TRI)
const InstructionMapping & getInstrMapping(const MachineInstr &MI) const override
Get the mapping of the different operands of MI on the register bank.
unsigned copyCost(const RegisterBank &A, const RegisterBank &B, unsigned Size) const override
Get the cost of a copy from B to A, or put differently, get the cost of A = COPY B.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Helper class that represents how the value of an instruction may be mapped and what is the related co...
bool isValid() const
Check whether this object is valid.
unsigned getSizeInBits(Register Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
virtual InstructionMappings getInstrAlternativeMappings(const MachineInstr &MI) const
Get the alternative mappings for MI.
const InstructionMapping & getInstructionMapping(unsigned ID, unsigned Cost, const ValueMapping *OperandsMapping, unsigned NumOperands) const
Method to get a uniquely generated InstructionMapping.
static void applyDefaultMapping(const OperandsMapper &OpdMapper)
Helper method to apply something that is like the default mapping.
const InstructionMapping & getInvalidInstructionMapping() const
Method to get a uniquely generated invalid InstructionMapping.
virtual unsigned copyCost(const RegisterBank &A, const RegisterBank &B, unsigned Size) const
Get the cost of a copy from B to A, or put differently, get the cost of A = COPY B.
RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
const ValueMapping * getOperandsMapping(Iterator Begin, Iterator End) const
Get the uniquely generated array of ValueMapping for the elements of between Begin and End.
static const unsigned DefaultMappingID
Identifier used when the related instruction mapping instance is generated by target independent code...
const InstructionMapping & getInstrMappingImpl(const MachineInstr &MI) const
Try to get the mapping of MI.
This class implements the register bank concept.
unsigned getSize() const
Get the maximal size in bits that fits in this register bank.
bool covers(const TargetRegisterClass &RC) const
Check whether this register bank covers RC.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
unsigned getID() const
Return the register class ID number.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
bool isPreISelGenericOptimizationHint(unsigned Opcode)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
The llvm::once_flag structure.