14#ifndef LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
15#define LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
618 unsigned ShuffleKind, SelectionDAG &DAG);
623 unsigned ShuffleKind, SelectionDAG &DAG);
628 unsigned ShuffleKind, SelectionDAG &DAG);
632 bool &Swap,
bool IsLE);
653 bool &Swap,
bool IsLE);
673 unsigned &InsertAtByte,
bool &Swap,
bool IsLE);
685 SDValue
get_VSPLTI_elt(SDNode *
N,
unsigned ByteSize, SelectionDAG &DAG);
813 unsigned Depth = 0)
const override;
817 EVT VT)
const override;
843 MaybeAlign EncodingAlignment = std::nullopt)
const;
890 const APInt &DemandedElts,
892 unsigned Depth = 0)
const override;
931 unsigned CmpOpcode = 0,
932 unsigned CmpPred = 0)
const;
937 unsigned CmpOpcode = 0,
938 unsigned CmpPred = 0)
const;
958 AsmOperandInfo &
info,
const char *constraint)
const override;
960 std::pair<unsigned, const TargetRegisterClass *>
973 std::string &Constraint,
974 std::vector<SDValue> &Ops,
979 if (ConstraintCode ==
"es")
981 else if (ConstraintCode ==
"Q")
983 else if (ConstraintCode ==
"Z")
985 else if (ConstraintCode ==
"Zy")
997 Type *Ty,
unsigned AS,
1025 Type *Ty)
const override;
1035 EVT VT)
const override {
1054 unsigned Intrinsic)
const override;
1066 unsigned *
Fast =
nullptr)
const override;
1073 EVT VT)
const override;
1088 unsigned DefinedValues)
const override;
1129 bool ForCodeSize)
const override;
1153 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID>
CC)
1174 bool IsVarArg)
const;
1178 struct ReuseLoadInfo {
1183 bool IsDereferenceable =
false;
1184 bool IsInvariant =
false;
1187 const MDNode *Ranges =
nullptr;
1189 ReuseLoadInfo() =
default;
1193 if (IsDereferenceable)
1202 std::map<PPC::AddrMode, SmallVector<unsigned, 16>> AddrModesMap;
1203 void initializeAddrModeMap();
1205 bool canReuseLoadAddress(SDValue Op, EVT MemVT, ReuseLoadInfo &RLI,
1208 void spliceIntoChain(SDValue ResChain, SDValue NewResChain,
1209 SelectionDAG &DAG)
const;
1211 void LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
1212 SelectionDAG &DAG,
const SDLoc &dl)
const;
1213 SDValue LowerFP_TO_INTDirectMove(SDValue Op, SelectionDAG &DAG,
1214 const SDLoc &dl)
const;
1216 bool directMoveIsProfitable(
const SDValue &Op)
const;
1217 SDValue LowerINT_TO_FPDirectMove(SDValue Op, SelectionDAG &DAG,
1218 const SDLoc &dl)
const;
1220 SDValue LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
1221 const SDLoc &dl)
const;
1223 SDValue LowerTRUNCATEVector(SDValue Op, SelectionDAG &DAG)
const;
1225 SDValue getFramePointerFrameIndex(SelectionDAG & DAG)
const;
1226 SDValue getReturnAddrFrameIndex(SelectionDAG & DAG)
const;
1228 bool IsEligibleForTailCallOptimization(
1231 const SmallVectorImpl<ISD::InputArg> &Ins)
const;
1233 bool IsEligibleForTailCallOptimization_64SVR4(
1236 const SmallVectorImpl<ISD::OutputArg> &Outs,
1237 const SmallVectorImpl<ISD::InputArg> &Ins,
const Function *CallerFunc,
1238 bool isCalleeExternalSymbol)
const;
1240 bool isEligibleForTCO(
const GlobalValue *CalleeGV,
CallingConv::ID CalleeCC,
1243 const SmallVectorImpl<ISD::OutputArg> &Outs,
1244 const SmallVectorImpl<ISD::InputArg> &Ins,
1245 const Function *CallerFunc,
1246 bool isCalleeExternalSymbol)
const;
1248 SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG &DAG,
int SPDiff,
1249 SDValue Chain, SDValue &LROpOut,
1251 const SDLoc &dl)
const;
1253 SDValue getTOCEntry(SelectionDAG &DAG,
const SDLoc &dl, SDValue GA)
const;
1255 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG)
const;
1256 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG)
const;
1257 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG)
const;
1258 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG)
const;
1259 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG)
const;
1260 SDValue LowerGlobalTLSAddressAIX(SDValue Op, SelectionDAG &DAG)
const;
1261 SDValue LowerGlobalTLSAddressLinux(SDValue Op, SelectionDAG &DAG)
const;
1262 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG)
const;
1263 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG)
const;
1264 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG)
const;
1265 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG)
const;
1266 SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG)
const;
1267 SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG)
const;
1268 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
const;
1269 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
const;
1270 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG)
const;
1271 SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG)
const;
1272 SDValue LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG)
const;
1273 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG)
const;
1274 SDValue LowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG)
const;
1275 SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
const;
1276 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
const;
1277 SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG)
const;
1278 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG)
const;
1279 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
1280 const SDLoc &dl)
const;
1281 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG)
const;
1282 SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG)
const;
1283 SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG)
const;
1284 SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG)
const;
1285 SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG)
const;
1286 SDValue LowerFunnelShift(SDValue Op, SelectionDAG &DAG)
const;
1287 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG)
const;
1288 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG)
const;
1289 SDValue LowerVPERM(SDValue Op, SelectionDAG &DAG, ArrayRef<int> PermMask,
1290 EVT VT, SDValue V1, SDValue V2)
const;
1291 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
const;
1292 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG)
const;
1293 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG)
const;
1294 SDValue LowerBSWAP(SDValue Op, SelectionDAG &DAG)
const;
1295 SDValue LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG)
const;
1296 SDValue lowerToLibCall(
const char *LibCallName, SDValue Op,
1297 SelectionDAG &DAG)
const;
1298 SDValue lowerLibCallBasedOnType(
const char *LibCallFloatName,
1299 const char *LibCallDoubleName, SDValue Op,
1300 SelectionDAG &DAG)
const;
1301 bool isLowringToMASSFiniteSafe(SDValue Op)
const;
1302 bool isLowringToMASSSafe(SDValue Op)
const;
1303 bool isScalarMASSConversionEnabled()
const;
1304 SDValue lowerLibCallBase(
const char *LibCallDoubleName,
1305 const char *LibCallFloatName,
1306 const char *LibCallDoubleNameFinite,
1307 const char *LibCallFloatNameFinite, SDValue Op,
1308 SelectionDAG &DAG)
const;
1309 SDValue lowerPow(SDValue Op, SelectionDAG &DAG)
const;
1310 SDValue lowerSin(SDValue Op, SelectionDAG &DAG)
const;
1311 SDValue lowerCos(SDValue Op, SelectionDAG &DAG)
const;
1312 SDValue lowerLog(SDValue Op, SelectionDAG &DAG)
const;
1313 SDValue lowerLog10(SDValue Op, SelectionDAG &DAG)
const;
1314 SDValue lowerExp(SDValue Op, SelectionDAG &DAG)
const;
1315 SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG)
const;
1316 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG)
const;
1317 SDValue LowerMUL(SDValue Op, SelectionDAG &DAG)
const;
1318 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG)
const;
1319 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG)
const;
1320 SDValue LowerROTL(SDValue Op, SelectionDAG &DAG)
const;
1322 SDValue LowerVectorLoad(SDValue Op, SelectionDAG &DAG)
const;
1323 SDValue LowerVectorStore(SDValue Op, SelectionDAG &DAG)
const;
1325 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
1327 const SmallVectorImpl<ISD::InputArg> &Ins,
1328 const SDLoc &dl, SelectionDAG &DAG,
1329 SmallVectorImpl<SDValue> &InVals)
const;
1331 SDValue FinishCall(CallFlags CFlags,
const SDLoc &dl, SelectionDAG &DAG,
1332 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
1333 SDValue InFlag, SDValue Chain, SDValue CallSeqStart,
1334 SDValue &
Callee,
int SPDiff,
unsigned NumBytes,
1335 const SmallVectorImpl<ISD::InputArg> &Ins,
1336 SmallVectorImpl<SDValue> &InVals,
1337 const CallBase *CB)
const;
1340 LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
1341 const SmallVectorImpl<ISD::InputArg> &Ins,
1342 const SDLoc &dl, SelectionDAG &DAG,
1343 SmallVectorImpl<SDValue> &InVals)
const override;
1345 SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
1346 SmallVectorImpl<SDValue> &InVals)
const override;
1350 const SmallVectorImpl<ISD::OutputArg> &Outs,
1351 LLVMContext &Context)
const override;
1353 SDValue LowerReturn(SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
1354 const SmallVectorImpl<ISD::OutputArg> &Outs,
1355 const SmallVectorImpl<SDValue> &OutVals,
1356 const SDLoc &dl, SelectionDAG &DAG)
const override;
1358 SDValue extendArgForPPC64(ISD::ArgFlagsTy
Flags, EVT ObjectVT,
1359 SelectionDAG &DAG, SDValue ArgVal,
1360 const SDLoc &dl)
const;
1362 SDValue LowerFormalArguments_AIX(
1364 const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl,
1365 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals)
const;
1366 SDValue LowerFormalArguments_64SVR4(
1368 const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl,
1369 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals)
const;
1370 SDValue LowerFormalArguments_32SVR4(
1372 const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl,
1373 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals)
const;
1375 SDValue createMemcpyOutsideCallSeq(SDValue
Arg, SDValue PtrOff,
1376 SDValue CallSeqStart,
1377 ISD::ArgFlagsTy
Flags, SelectionDAG &DAG,
1378 const SDLoc &dl)
const;
1380 SDValue LowerCall_64SVR4(SDValue Chain, SDValue
Callee, CallFlags CFlags,
1381 const SmallVectorImpl<ISD::OutputArg> &Outs,
1382 const SmallVectorImpl<SDValue> &OutVals,
1383 const SmallVectorImpl<ISD::InputArg> &Ins,
1384 const SDLoc &dl, SelectionDAG &DAG,
1385 SmallVectorImpl<SDValue> &InVals,
1386 const CallBase *CB)
const;
1387 SDValue LowerCall_32SVR4(SDValue Chain, SDValue
Callee, CallFlags CFlags,
1388 const SmallVectorImpl<ISD::OutputArg> &Outs,
1389 const SmallVectorImpl<SDValue> &OutVals,
1390 const SmallVectorImpl<ISD::InputArg> &Ins,
1391 const SDLoc &dl, SelectionDAG &DAG,
1392 SmallVectorImpl<SDValue> &InVals,
1393 const CallBase *CB)
const;
1394 SDValue LowerCall_AIX(SDValue Chain, SDValue
Callee, CallFlags CFlags,
1395 const SmallVectorImpl<ISD::OutputArg> &Outs,
1396 const SmallVectorImpl<SDValue> &OutVals,
1397 const SmallVectorImpl<ISD::InputArg> &Ins,
1398 const SDLoc &dl, SelectionDAG &DAG,
1399 SmallVectorImpl<SDValue> &InVals,
1400 const CallBase *CB)
const;
1402 SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG)
const;
1403 SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG)
const;
1404 SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG)
const;
1406 SDValue DAGCombineExtBoolTrunc(SDNode *
N, DAGCombinerInfo &DCI)
const;
1407 SDValue DAGCombineBuildVector(SDNode *
N, DAGCombinerInfo &DCI)
const;
1408 SDValue DAGCombineTruncBoolExt(SDNode *
N, DAGCombinerInfo &DCI)
const;
1409 SDValue combineStoreFPToInt(SDNode *
N, DAGCombinerInfo &DCI)
const;
1410 SDValue combineFPToIntToFP(SDNode *
N, DAGCombinerInfo &DCI)
const;
1411 SDValue combineSHL(SDNode *
N, DAGCombinerInfo &DCI)
const;
1412 SDValue combineSRA(SDNode *
N, DAGCombinerInfo &DCI)
const;
1413 SDValue combineSRL(SDNode *
N, DAGCombinerInfo &DCI)
const;
1414 SDValue combineMUL(SDNode *
N, DAGCombinerInfo &DCI)
const;
1415 SDValue combineADD(SDNode *
N, DAGCombinerInfo &DCI)
const;
1416 SDValue combineFMALike(SDNode *
N, DAGCombinerInfo &DCI)
const;
1417 SDValue combineTRUNCATE(SDNode *
N, DAGCombinerInfo &DCI)
const;
1418 SDValue combineSetCC(SDNode *
N, DAGCombinerInfo &DCI)
const;
1419 SDValue combineVectorShuffle(ShuffleVectorSDNode *SVN,
1420 SelectionDAG &DAG)
const;
1421 SDValue combineVReverseMemOP(ShuffleVectorSDNode *SVN, LSBaseSDNode *LSBase,
1422 DAGCombinerInfo &DCI)
const;
1427 SDValue ConvertSETCCToSubtract(SDNode *
N, DAGCombinerInfo &DCI)
const;
1429 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
int Enabled,
1430 int &RefinementSteps,
bool &UseOneConstNR,
1431 bool Reciprocal)
const override;
1432 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
int Enabled,
1433 int &RefinementSteps)
const override;
1434 SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
1435 const DenormalMode &
Mode)
const override;
1436 SDValue getSqrtResultForDenormInput(SDValue Operand,
1437 SelectionDAG &DAG)
const override;
1438 unsigned combineRepeatedFPDivisors()
const override;
1441 combineElementTruncationToVectorTruncation(SDNode *
N,
1442 DAGCombinerInfo &DCI)
const;
1448 SDValue lowerToVINSERTH(ShuffleVectorSDNode *
N, SelectionDAG &DAG)
const;
1453 SDValue lowerToVINSERTB(ShuffleVectorSDNode *
N, SelectionDAG &DAG)
const;
1457 SDValue lowerToXXSPLTI32DX(ShuffleVectorSDNode *
N, SelectionDAG &DAG)
const;
1462 bool mayBeEmittedAsTailCall(
const CallInst *CI)
const override;
1463 bool isMaskAndCmp0FoldingBeneficial(
const Instruction &AndI)
const override;
1473 unsigned computeMOFlags(
const SDNode *Parent, SDValue
N,
1474 SelectionDAG &DAG)
const;
1480 const TargetLibraryInfo *LibInfo);
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu Simplify well known AMD library false FunctionCallee Callee
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Function Alias Analysis Results
This file contains the simple types necessary to represent the attributes associated with functions a...
Analysis containing CSE Info
unsigned const TargetRegisterInfo * TRI
typename CallsiteContextGraph< DerivedCCG, FuncTy, CallTy >::FuncInfo FuncInfo
const char LLVMTargetMachineRef TM
static cl::opt< RegAllocEvictionAdvisorAnalysis::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Development, "development", "for training")))
This file describes how to lower LLVM code to machine code.
Class for arbitrary precision integers.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
Common base class shared among various IRBuilders.
This is an important class for using LLVM in a threaded context.
Context object for machine code objects.
Base class for the full range of assembler expressions which are needed for parsing.
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
Representation of each machine instruction.
Flags
Flags values. These may be or'd together.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOInvariant
The memory access always returns the same value (or traps).
A Module instance is used to store all the information related to an LLVM module.
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
CCAssignFn * ccAssignFnForCall(CallingConv::ID CC, bool Return, bool IsVarArg) const
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
isTruncateFree - Return true if it's free to truncate a value of type Ty1 to type Ty2.
Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const override
Perform a masked atomicrmw using a target-specific intrinsic.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
bool isFPExtFree(EVT DestVT, EVT SrcVT) const override
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
PPC::AddrMode SelectForceXFormMode(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG) const
SelectForceXFormMode - Given the specified address, force it to be represented as an indexed [r+r] op...
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
bool hasInlineStackProbe(const MachineFunction &MF) const override
MachineBasicBlock * emitEHSjLjSetJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName() - This method returns the name of a target specific DAG node.
bool supportsTailCallFor(const CallBase *CB) const
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
MachineBasicBlock * emitProbedAlloca(MachineInstr &MI, MachineBasicBlock *MBB) const
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
MachineBasicBlock * EmitPartwordAtomicBinary(MachineInstr &MI, MachineBasicBlock *MBB, bool is8bit, unsigned Opcode, unsigned CmpOpcode=0, unsigned CmpPred=0) const
SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const override
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, MaybeAlign EncodingAlignment) const
SelectAddressRegImm - Returns true if the address N can be represented by a base register plus a sign...
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
bool isCtlzFast() const override
Return true if ctlz instruction is fast.
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const override
Returns true if an argument of type Ty needs to be passed in a contiguous block of registers in calli...
bool isSelectSupported(SelectSupportKind Kind) const override
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const override
Target-specific splitting of values into parts that fit a register storing a legal type.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
TargetLowering::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG, MaybeAlign EncodingAlignment=std::nullopt) const
SelectAddressRegReg - Given the specified addressed, check to see if it can be more efficiently repre...
MachineBasicBlock * EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *MBB, unsigned AtomicSize, unsigned BinOpcode, unsigned CmpOpcode=0, unsigned CmpPred=0) const
bool hasAndNotCompare(SDValue) const override
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) !...
SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const override
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
SelectAddressRegRegOnly - Given the specified addressed, force it to be represented as an indexed [r+...
bool useSoftFloat() const override
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const override
Perform a masked cmpxchg using a target-specific intrinsic.
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override
getByValTypeAlignment - Return the desired alignment for ByVal aggregate function arguments in the ca...
bool enableAggressiveFMAFusion(EVT VT) const override
Return true if target always benefits from combining into FMA for a given value type.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const override
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
bool preferIncOfAddToSubOfNot(EVT VT) const override
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPreIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mod...
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
bool isProfitableToHoist(Instruction *I) const override
isProfitableToHoist - Check if it is profitable to hoist instruction I to its dominator block.
bool convertSelectOfConstantsToMath(EVT VT) const override
Return true if a select of constants (select Cond, C1, C2) should be transformed into simple math ops...
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
bool convertSetCCLogicToBitwiseLogic(EVT VT) const override
Use bitwise logic to make pairs of compares more efficient.
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint, return the type of constraint it is for this target.
const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const override
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode...
bool isEqualityCmpFoldedWithSignedCmp() const override
Return true if instruction generated for equality comparison is folded with instruction generated for...
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
It returns EVT::Other if the type should be determined using generic target-independent logic.
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
getPreferredVectorAction - The code we generate when vector types are legalized by promoting the inte...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const
void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const override
bool useLoadStackGuardNode() const override
Override to support customized stack guard loading.
unsigned getStackProbeSize(const MachineFunction &MF) const
TargetLowering::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
bool shouldKeepZExtForFP16Conv() const override
Does this target require the clearing of high-order bits in a register passed to the fp16 to fp conve...
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster than a pair of fmul and fadd i...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Is unaligned memory access allowed for the given type, and is it fast relative to software emulation.
bool shouldExpandBuildVectorWithShuffles(EVT VT, unsigned DefinedValues) const override
bool SelectAddressRegImm34(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG) const
Similar to the 16-bit case but for instructions that take a 34-bit displacement field (prefixed loads...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
bool isJumpTableRelative() const override
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
PPC::AddrMode SelectOptimalAddrMode(const SDNode *Parent, SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, MaybeAlign Align) const
SelectOptimalAddrMode - Based on a node N and it's Parent (a MemSDNode), compute the address flags of...
Value * getSDagStackGuard(const Module &M) const override
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
bool SelectAddressPCRel(SDValue N, SDValue &Base) const
SelectAddressPCRel - Represent the specified address as pc relative to be represented as [pc+imm].
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
bool SelectAddressEVXRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
SelectAddressEVXRegReg - Given the specified addressed, check to see if it can be more efficiently re...
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate,...
unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override
bool isAccessedAsGotIndirect(SDValue N) const
Align getPrefLoopAlignment(MachineLoop *ML) const override
Return the preferred loop alignment.
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const override
createFastISel - This method returns a target-specific FastISel object, or null if the target does no...
bool shouldInlineQuadwordAtomics() const
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
bool isLegalAddImmediate(int64_t Imm) const override
isLegalAddImmediate - Return true if the specified immediate is legal add immediate,...
Common code between 32-bit and 64-bit PowerPC targets.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
StringRef - Represent a constant reference to a string, i.e.
Provides information about what library functions are available for the current target.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
SelectSupportKind
Enum that describes what type of support for selects the target has.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
NegatibleCost
Enum that specifies when a float negation is beneficial.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isArrayTy() const
True if this is an instance of ArrayType.
LLVM Value Representation.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
static const int FIRST_TARGET_MEMORY_OPCODE
FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations which do not reference a specific me...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
static const int FIRST_TARGET_STRICTFP_OPCODE
FIRST_TARGET_STRICTFP_OPCODE - Target-specific pre-isel operations which cannot raise FP exceptions s...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
@ SEXT_LD_SPLAT
VSRC, CHAIN = SEXT_LD_SPLAT, CHAIN, Ptr - a splatting load memory that sign-extends.
@ FCTIDUZ
Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for unsigned integers with round ...
@ ADDI_TLSGD_L_ADDR
G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSGD_L and GET_TLS_ADDR unti...
@ FSQRT
Square root instruction.
@ STRICT_FCFID
Constrained integer-to-floating-point conversion instructions.
@ DYNALLOC
The following two target-specific nodes are used for calls through function pointers in the 64-bit SV...
@ COND_BRANCH
CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This corresponds to the COND_BRANCH pseudo ...
@ CALL_RM
The variants that implicitly define rounding mode for calls with strictfp semantics.
@ STORE_VEC_BE
CHAIN = STORE_VEC_BE CHAIN, VSRC, Ptr - Occurs only for little endian.
@ BDNZ
CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based loops.
@ MTVSRZ
Direct move from a GPR to a VSX register (zero)
@ SRL
These nodes represent PPC shifts.
@ VECINSERT
VECINSERT - The PPC vector insert instruction.
@ LXSIZX
GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an integer smaller than 64 bits into ...
@ FNMSUB
FNMSUB - Negated multiply-subtract instruction.
@ RFEBB
CHAIN = RFEBB CHAIN, State - Return from event-based branch.
@ FCTIDZ
FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64 operand, producing an f64 value...
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
@ GET_TLS_ADDR
x3 = GET_TLS_ADDR x3, Symbol - For the general-dynamic TLS model, produces a call to __tls_get_addr(s...
@ FP_TO_UINT_IN_VSR
Floating-point-to-integer conversion instructions.
@ XXSPLTI32DX
XXSPLTI32DX - The PPC XXSPLTI32DX instruction.
@ ANDI_rec_1_EQ_BIT
i1 = ANDI_rec_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the eq or gt bit of CR0 after ex...
@ FRE
Reciprocal estimate instructions (unary FP ops).
@ ADDIS_GOT_TPREL_HA
G8RC = ADDIS_GOT_TPREL_HA x2, Symbol - Used by the initial-exec TLS model, produces an ADDIS8 instruc...
@ CLRBHRB
CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
@ STORE_COND
CHAIN,Glue = STORE_COND CHAIN, GPR, Ptr The store conditional instruction ST[BHWD]ARX that produces a...
@ SINT_VEC_TO_FP
Extract a subvector from signed integer vector and convert to FP.
@ EXTRACT_SPE
Extract SPE register component, second argument is high or low.
@ XXSWAPD
VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little endian.
@ ADDI_TLSLD_L_ADDR
G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSLD_L and GET_TLSLD_ADDR un...
@ ATOMIC_CMP_SWAP_8
ATOMIC_CMP_SWAP - the exact same as the target-independent nodes except they ensure that the compare ...
@ ST_VSR_SCAL_INT
Store scalar integers from VSR.
@ VCMP
RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP* instructions.
@ BCTRL
CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a BCTRL instruction.
@ BUILD_SPE64
BUILD_SPE64 and EXTRACT_SPE are analogous to BUILD_PAIR and EXTRACT_ELEMENT but take f64 arguments in...
@ LFIWZX
GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point load which zero-extends from a 32-bit inte...
@ SCALAR_TO_VECTOR_PERMUTED
PowerPC instructions that have SCALAR_TO_VECTOR semantics tend to place the value into the least sign...
@ EXTRACT_VSX_REG
EXTRACT_VSX_REG = Extract one of the underlying vsx registers of an accumulator or pair register.
@ STXSIX
STXSIX - The STXSI[bh]X instruction.
@ MAT_PCREL_ADDR
MAT_PCREL_ADDR = Materialize a PC Relative address.
@ MFOCRF
R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
@ XXSPLT
XXSPLT - The PPC VSX splat instructions.
@ TOC_ENTRY
GPRC = TOC_ENTRY GA, TOC Loads the entry for GA from the TOC, where the TOC base is given by the last...
@ XXPERMDI
XXPERMDI - The PPC XXPERMDI instruction.
@ ADDIS_DTPREL_HA
G8RC = ADDIS_DTPREL_HA x3, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction t...
@ ADD_TLS
G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS model, produces an ADD instruction that ...
@ MTVSRA
Direct move from a GPR to a VSX register (algebraic)
@ VADD_SPLAT
VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded during instruction selection to optimi...
@ PPC32_GOT
GPRC = address of GLOBAL_OFFSET_TABLE.
@ ADDI_DTPREL_L
G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction ...
@ BCTRL_LOAD_TOC
CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl instruction and the TOC reload r...
@ PPC32_PICGOT
GPRC = address of GLOBAL_OFFSET_TABLE.
@ FCFID
FCFID - The FCFID instruction, taking an f64 operand and producing and f64 value containing the FP re...
@ CR6SET
ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
@ LBRX
GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a byte-swapping load instruction.
@ LD_VSX_LH
VSRC, CHAIN = LD_VSX_LH CHAIN, Ptr - This is a floating-point load of a v2f32 value into the lower ha...
@ PROBED_ALLOCA
To avoid stack clash, allocation is performed by block and each block is probed.
@ XXMFACC
XXMFACC = This corresponds to the xxmfacc instruction.
@ ADDIS_TLSGD_HA
G8RC = ADDIS_TLSGD_HA x2, Symbol - For the general-dynamic TLS model, produces an ADDIS8 instruction ...
@ ACC_BUILD
ACC_BUILD = Build an accumulator register from 4 VSX registers.
@ GlobalBaseReg
The result of the mflr at function entry, used for PIC code.
@ LXVD2X
VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
@ XSMAXC
XSMAXC[DQ]P, XSMINC[DQ]P - C-type min/max instructions.
@ CALL
CALL - A direct function call.
@ MTCTR
CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a MTCTR instruction.
@ TC_RETURN
TC_RETURN - A tail call return.
@ STFIWX
STFIWX - The STFIWX instruction.
@ LD_SPLAT
VSRC, CHAIN = LD_SPLAT, CHAIN, Ptr - a splatting load memory instructions such as LXVDSX,...
@ VCMP_rec
RESVEC, OUTFLAG = VCMP_rec(LHS, RHS, OPC) - Represents one of the altivec VCMP*_rec instructions.
@ MFFS
F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
@ PADDI_DTPREL
G8RC = PADDI_DTPREL x3, Symbol - For the pc-rel based local-dynamic TLS model, produces a PADDI8 inst...
@ BUILD_FP128
Direct move of 2 consecutive GPR to a VSX register.
@ VEXTS
VEXTS, ByteWidth - takes an input in VSFRC and produces an output in VSFRC that is sign-extended from...
@ TLS_LOCAL_EXEC_MAT_ADDR
TLS_LOCAL_EXEC_MAT_ADDR = Materialize an address for TLS global address when using local exec access ...
@ VPERM
VPERM - The PPC VPERM Instruction.
@ ADDIS_TLSLD_HA
G8RC = ADDIS_TLSLD_HA x2, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction th...
@ XXSPLTI_SP_TO_DP
XXSPLTI_SP_TO_DP - The PPC VSX splat instructions for immediates for converting immediate single prec...
@ GET_TLSLD_ADDR
x3 = GET_TLSLD_ADDR x3, Symbol - For the local-dynamic TLS model, produces a call to __tls_get_addr(s...
@ ADDI_TLSGD_L
x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS model, produces an ADDI8 instruction t...
@ DYNAREAOFFSET
This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to compute an offset from native ...
@ PAIR_BUILD
PAIR_BUILD = Build a vector pair register from 2 VSX registers.
@ STRICT_FADDRTZ
Constrained floating point add in round-to-zero mode.
@ FTSQRT
Test instruction for software square root.
@ FP_EXTEND_HALF
FP_EXTEND_HALF(VECTOR, IDX) - Custom extend upper (IDX=0) half or lower (IDX=1) half of v4f32 to v2f6...
@ RET_FLAG
Return with a flag operand, matched by 'blr'.
@ CMPB
The CMPB instruction (takes two operands of i32 or i64).
@ VECSHL
VECSHL - The PPC vector shift left instruction.
@ ADDI_TLSLD_L
x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction tha...
@ FADDRTZ
F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding towards zero.
@ ZEXT_LD_SPLAT
VSRC, CHAIN = ZEXT_LD_SPLAT, CHAIN, Ptr - a splatting load memory that zero-extends.
@ SRA_ADDZE
The combination of sra[wd]i and addze used to implemented signed integer division by a power of 2.
@ EXTSWSLI
EXTSWSLI = The PPC extswsli instruction, which does an extend-sign word and shift left immediate.
@ STXVD2X
CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
@ TLSGD_AIX
GPRC = TLSGD_AIX, TOC_ENTRY, TOC_ENTRY G8RC = TLSGD_AIX, TOC_ENTRY, TOC_ENTRY Op that combines two re...
@ UINT_VEC_TO_FP
Extract a subvector from unsigned integer vector and convert to FP.
@ LXVRZX
LXVRZX - Load VSX Vector Rightmost and Zero Extend This node represents v1i128 BUILD_VECTOR of a zero...
@ MFBHRBE
GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch history rolling buffer entry.
@ FCFIDU
Newer FCFID[US] integer-to-floating-point conversion instructions for unsigned integers and single-pr...
@ FSEL
FSEL - Traditional three-operand fsel node.
@ SWAP_NO_CHAIN
An SDNode for swaps that are not associated with any loads/stores and thereby have no chain.
@ LOAD_VEC_BE
VSRC, CHAIN = LOAD_VEC_BE CHAIN, Ptr - Occurs only for little endian.
@ LFIWAX
GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point load which sign-extends from a 32-bit inte...
@ STBRX
CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a byte-swapping store instruction.
@ LD_GOT_TPREL_L
G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec TLS model, produces a LD instruction ...
@ MFVSR
Direct move from a VSX register to a GPR.
@ TLS_DYNAMIC_MAT_PCREL_ADDR
TLS_DYNAMIC_MAT_PCREL_ADDR = Materialize a PC Relative address for TLS global address when using dyna...
@ Hi
Hi/Lo - These represent the high and low 16-bit parts of a global address respectively.
SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG)
get_VSPLTI_elt - If this is a build_vector of constants which can be formed by using a vspltis[bhw] i...
bool isXXBRDShuffleMask(ShuffleVectorSDNode *N)
isXXBRDShuffleMask - Return true if this is a shuffle mask suitable for a XXBRD instruction.
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo)
bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for a VRGH* instruction with the ...
bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a VPKUDUM instruction.
bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for a VMRGEW or VMRGOW instructi...
bool isXXBRQShuffleMask(ShuffleVectorSDNode *N)
isXXBRQShuffleMask - Return true if this is a shuffle mask suitable for a XXBRQ instruction.
bool isXXBRWShuffleMask(ShuffleVectorSDNode *N)
isXXBRWShuffleMask - Return true if this is a shuffle mask suitable for a XXBRW instruction.
bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable for a XXPERMDI instruction.
bool isXXBRHShuffleMask(ShuffleVectorSDNode *N)
isXXBRHShuffleMask - Return true if this is a shuffle mask suitable for a XXBRH instruction.
unsigned getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize, SelectionDAG &DAG)
getSplatIdxForPPCMnemonics - Return the splat index as a value that is appropriate for PPC mnemonics ...
bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable for a XXSLDWI instruction.
int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift amount, otherwise return -1.
bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for a VRGL* instruction with the ...
bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, unsigned &InsertAtByte, bool &Swap, bool IsLE)
isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by the XXINSERTW instruction intr...
bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize)
isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand specifies a splat of a singl...
bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a VPKUWUM instruction.
bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a VPKUHUM instruction.
This is an optimization pass for GlobalISel generic memory operations.
bool checkConvertToNonDenormSingle(APFloat &ArgAPFloat)
bool isIntS16Immediate(SDNode *N, int16_t &Imm)
isIntS16Immediate - This method tests to see if the node is either a 32-bit or 64-bit immediate,...
bool convertToNonDenormSingle(APInt &ArgAPInt)
AtomicOrdering
Atomic ordering for LLVM's memory model.
bool isIntS34Immediate(SDNode *N, int64_t &Imm)
isIntS34Immediate - This method tests if value of node given can be accurately represented as a sign ...
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
This class contains a discriminated union of information about pointers in memory operands,...
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Structure that collects some common arguments that get passed around between the functions for call l...
CallFlags(CallingConv::ID CC, bool IsTailCall, bool IsVarArg, bool IsPatchPoint, bool IsIndirect, bool HasNest, bool NoMerge)
const CallingConv::ID CallConv