LLVM 23.0.0git
AArch64ISelLowering.h
Go to the documentation of this file.
1//==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that AArch64 uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
15#define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16
21#include "llvm/IR/CallingConv.h"
22#include "llvm/IR/Instruction.h"
23
24namespace llvm {
25
27
28namespace AArch64 {
29/// Possible values of current rounding mode, which is specified in bits
30/// 23:22 of FPCR.
32 RN = 0, // Round to Nearest
33 RP = 1, // Round towards Plus infinity
34 RM = 2, // Round towards Minus infinity
35 RZ = 3, // Round towards Zero
36 rmMask = 3 // Bit mask selecting rounding mode
37};
38
39// Bit position of rounding mode bits in FPCR.
40const unsigned RoundingBitsPos = 22;
41
42// Reserved bits should be preserved when modifying FPCR.
43const uint64_t ReservedFPControlBits = 0xfffffffff80040f8;
44
45// Registers used to pass function arguments.
48
49/// Maximum allowed number of unprobed bytes above SP at an ABI
50/// boundary.
51const unsigned StackProbeMaxUnprobedStack = 1024;
52
53/// Maximum number of iterations to unroll for a constant size probing loop.
54const unsigned StackProbeMaxLoopUnroll = 4;
55
56} // namespace AArch64
57
58namespace ARM64AS {
59enum : unsigned { PTR32_SPTR = 270, PTR32_UPTR = 271, PTR64 = 272 };
60}
61
62class AArch64Subtarget;
63
65public:
66 explicit AArch64TargetLowering(const TargetMachine &TM,
67 const AArch64Subtarget &STI);
68
69 const AArch64TargetMachine &getTM() const;
70
71 /// Control the following reassociation of operands: (op (op x, c1), y) -> (op
72 /// (op x, y), c1) where N0 is (op x, c1) and N1 is y.
74 SDValue N1) const override;
75
76 /// Selects the correct CCAssignFn for a given CallingConvention value.
77 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
78
79 /// Selects the correct CCAssignFn for a given CallingConvention value.
81
82 /// Determine which of the bits specified in Mask are known to be either zero
83 /// or one and return them in the KnownZero/KnownOne bitsets.
85 const APInt &DemandedElts,
86 const SelectionDAG &DAG,
87 unsigned Depth = 0) const override;
88
90 const APInt &DemandedElts,
91 const SelectionDAG &DAG,
92 unsigned Depth) const override;
93
94 MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const override {
95 if ((AS == ARM64AS::PTR32_SPTR) || (AS == ARM64AS::PTR32_UPTR)) {
96 // These are 32-bit pointers created using the `__ptr32` extension or
97 // similar. They are handled by marking them as being in a different
98 // address space, and will be extended to 64-bits when used as the target
99 // of a load or store operation, or cast to a 64-bit pointer type.
100 return MVT::i32;
101 } else {
102 // Returning i64 unconditionally here (i.e. even for ILP32) means that the
103 // *DAG* representation of pointers will always be 64-bits. They will be
104 // truncated and extended when transferred to memory, but the 64-bit DAG
105 // allows us to use AArch64's addressing modes much more easily.
106 return MVT::i64;
107 }
108 }
109
110 unsigned getVectorIdxWidth(const DataLayout &DL) const override {
111 // The VectorIdx type is i64, with both normal and ilp32.
112 return 64;
113 }
114
116 const APInt &DemandedElts,
117 TargetLoweringOpt &TLO) const override;
118
119 MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
120
121 /// Returns true if the target allows unaligned memory accesses of the
122 /// specified type.
124 EVT VT, unsigned AddrSpace = 0, Align Alignment = Align(1),
126 unsigned *Fast = nullptr) const override;
127 /// LLT variant.
128 bool allowsMisalignedMemoryAccesses(LLT Ty, unsigned AddrSpace,
129 Align Alignment,
131 unsigned *Fast = nullptr) const override;
132
133 /// Provide custom lowering hooks for some operations.
134 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
135
136 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
137
138 /// This method returns a target specific FastISel object, or null if the
139 /// target does not support "fast" ISel.
140 FastISel *
142 const TargetLibraryInfo *libInfo,
143 const LibcallLoweringInfo *libcallLowering) const override;
144
145 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
146
147 bool isFPImmLegalAsFMov(const APFloat &Imm, EVT VT) const;
148
149 bool isFPImmLegal(const APFloat &Imm, EVT VT,
150 bool ForCodeSize) const override;
151
152 /// Return true if the given shuffle mask can be codegen'd directly, or if it
153 /// should be stack expanded.
154 bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
155
156 /// Similar to isShuffleMaskLegal. Return true is the given 'select with zero'
157 /// shuffle mask can be codegen'd directly.
158 bool isVectorClearMaskLegal(ArrayRef<int> M, EVT VT) const override;
159
160 /// Return the ISD::SETCC ValueType.
162 EVT VT) const override;
163
165
167 MachineBasicBlock *BB) const;
168
170 MachineBasicBlock *BB) const;
171
173 MachineBasicBlock *MBB) const;
174
176 MachineBasicBlock *MBB) const;
177
178 MachineBasicBlock *EmitTileLoad(unsigned Opc, unsigned BaseReg,
180 MachineBasicBlock *BB) const;
182 MachineBasicBlock *EmitZAInstr(unsigned Opc, unsigned BaseReg,
183 MachineInstr &MI, MachineBasicBlock *BB) const;
185 unsigned Opcode, bool Op0IsDef) const;
187
188 // Note: The following group of functions are only used as part of the old SME
189 // ABI lowering. They will be removed once -aarch64-new-sme-abi=true is the
190 // default.
192 MachineBasicBlock *BB) const;
194 MachineBasicBlock *BB) const;
196 MachineBasicBlock *BB) const;
198 MachineBasicBlock *BB) const;
200 MachineBasicBlock *BB) const;
201
202 /// Replace (0, vreg) discriminator components with the operands of blend
203 /// or with (immediate, NoRegister) when possible.
205 MachineOperand &IntDiscOp,
206 MachineOperand &AddrDiscOp,
207 const TargetRegisterClass *AddrDiscRC) const;
208
211 MachineBasicBlock *MBB) const override;
212
214 const CallBase &I, MachineFunction &MF,
215 unsigned Intrinsic) const override;
216
217 bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT,
218 std::optional<unsigned> ByteOffset) const override;
219
220 bool shouldRemoveRedundantExtend(SDValue Op) const override;
221
222 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
223 bool isTruncateFree(EVT VT1, EVT VT2) const override;
224
225 bool isProfitableToHoist(Instruction *I) const override;
226
227 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
228 bool isZExtFree(EVT VT1, EVT VT2) const override;
229 bool isZExtFree(SDValue Val, EVT VT2) const override;
230
232 Instruction *I, Loop *L, const TargetTransformInfo &TTI) const override;
233
234 bool hasPairedLoad(EVT LoadedType, Align &RequiredAlignment) const override;
235
236 unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
237
238 bool lowerInterleavedLoad(Instruction *Load, Value *Mask,
240 ArrayRef<unsigned> Indices, unsigned Factor,
241 const APInt &GapMask) const override;
242 bool lowerInterleavedStore(Instruction *Store, Value *Mask,
243 ShuffleVectorInst *SVI, unsigned Factor,
244 const APInt &GapMask) const override;
245
247 IntrinsicInst *DI) const override;
248
250 Instruction *Store, Value *Mask,
251 ArrayRef<Value *> InterleaveValues) const override;
252
253 bool isLegalAddImmediate(int64_t) const override;
254 bool isLegalAddScalableImmediate(int64_t) const override;
255 bool isLegalICmpImmediate(int64_t) const override;
256
258 SDValue ConstNode) const override;
259
260 bool shouldConsiderGEPOffsetSplit() const override;
261
262 EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op,
263 const AttributeList &FuncAttributes) const override;
264
266 const AttributeList &FuncAttributes) const override;
267
268 bool findOptimalMemOpLowering(LLVMContext &Context, std::vector<EVT> &MemOps,
269 unsigned Limit, const MemOp &Op, unsigned DstAS,
270 unsigned SrcAS,
271 const AttributeList &FuncAttributes,
272 EVT *LargestVT = nullptr) const override;
273
274 /// Return true if the addressing mode represented by AM is legal for this
275 /// target, for a load/store of the specified type.
276 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
277 unsigned AS,
278 Instruction *I = nullptr) const override;
279
280 int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset,
281 int64_t MaxOffset) const override;
282
283 /// Return true if an FMA operation is faster than a pair of fmul and fadd
284 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
285 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
287 EVT VT) const override;
288 bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *Ty) const override;
289
291 CodeGenOptLevel OptLevel) const override;
292
293 /// Return true if the target has native support for
294 /// the specified value type and it is 'desirable' to use the type for the
295 /// given node type.
296 bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override;
297
298 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
300
301 /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
303 CombineLevel Level) const override;
304
305 bool isDesirableToPullExtFromShl(const MachineInstr &MI) const override {
306 return false;
307 }
308
309 /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
310 bool isDesirableToCommuteXorWithShift(const SDNode *N) const override;
311
312 /// Return true if it is profitable to fold a pair of shifts into a mask.
313 bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override;
314
315 /// Return true if it is profitable to fold a pair of shifts into a mask.
317 EVT VT = Y.getValueType();
318
319 if (VT.isVector())
320 return false;
321
322 return VT.getScalarSizeInBits() <= 64;
323 }
324
325 bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT,
326 unsigned SelectOpcode, SDValue X,
327 SDValue Y) const override;
328
329 /// Returns true if it is beneficial to convert a load of a constant
330 /// to just the constant itself.
332 Type *Ty) const override;
333
334 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
335 /// with this index.
336 bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
337 unsigned Index) const override;
338
339 bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
340 bool MathUsed) const override {
341 // Using overflow ops for overflow checks only should beneficial on
342 // AArch64.
343 return TargetLowering::shouldFormOverflowOp(Opcode, VT, true);
344 }
345
346 // Return true if the target wants to optimize the mul overflow intrinsic
347 // for the given \p VT.
349 EVT VT) const override;
350
351 Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr,
352 AtomicOrdering Ord) const override;
353 Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr,
354 AtomicOrdering Ord) const override;
355
356 void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override;
357
358 bool isOpSuitableForLDPSTP(const Instruction *I) const;
359 bool isOpSuitableForLSE128(const Instruction *I) const;
360 bool isOpSuitableForRCPC3(const Instruction *I) const;
361 bool shouldInsertFencesForAtomic(const Instruction *I) const override;
363 const Instruction *I) const override;
364
366 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
370 shouldExpandAtomicRMWInIR(const AtomicRMWInst *AI) const override;
371
373 shouldExpandAtomicCmpXchgInIR(const AtomicCmpXchgInst *AI) const override;
374
375 bool useLoadStackGuardNode(const Module &M) const override;
377 getPreferredVectorAction(MVT VT) const override;
378
379 /// If the target has a standard location for the stack protector cookie,
380 /// returns the address of that location. Otherwise, returns nullptr.
382 const LibcallLoweringInfo &Libcalls) const override;
383
384 void
386 const LibcallLoweringInfo &Libcalls) const override;
387
388 /// If the target has a standard location for the unsafe stack pointer,
389 /// returns the address of that location. Otherwise, returns nullptr.
391 IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const override;
392
393 /// If a physical register, this returns the register that receives the
394 /// exception address on entry to an EH pad.
396 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
397
398 /// If a physical register, this returns the register that receives the
399 /// exception typeid on entry to a landing pad.
401 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
402
403 bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
404
405 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
406 const MachineFunction &MF) const override;
407
408 bool isCheapToSpeculateCttz(Type *) const override {
409 return true;
410 }
411
412 bool isCheapToSpeculateCtlz(Type *) const override {
413 return true;
414 }
415
416 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
417
418 bool hasAndNotCompare(SDValue V) const override {
419 // We can use bics for any scalar.
420 return V.getValueType().isScalarInteger();
421 }
422
423 bool hasAndNot(SDValue Y) const override {
424 EVT VT = Y.getValueType();
425
426 if (!VT.isVector())
427 return hasAndNotCompare(Y);
428
429 if (VT.isScalableVector())
430 return true;
431
432 return VT.getFixedSizeInBits() >= 64; // vector 'bic'
433 }
434
437 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
438 SelectionDAG &DAG) const override;
439
442 unsigned ExpansionFactor) const override;
443
445 unsigned KeptBits) const override {
446 // For vectors, we don't have a preference..
447 if (XVT.isVector())
448 return false;
449
450 auto VTIsOk = [](EVT VT) -> bool {
451 return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
452 VT == MVT::i64;
453 };
454
455 // We are ok with KeptBitsVT being byte/word/dword, what SXT supports.
456 // XVT will be larger than KeptBitsVT.
457 MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
458 return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
459 }
460
461 bool preferIncOfAddToSubOfNot(EVT VT) const override;
462
463 bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
464
465 bool preferSelectsOverBooleanArithmetic(EVT VT) const override;
466
467 bool isComplexDeinterleavingSupported() const override;
469 ComplexDeinterleavingOperation Operation, Type *Ty) const override;
470
473 ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
474 Value *Accumulator = nullptr) const override;
475
476 bool supportSplitCSR(MachineFunction *MF) const override {
478 MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
479 }
480 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
482 MachineBasicBlock *Entry,
483 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
484
485 bool supportSwiftError() const override {
486 return true;
487 }
488
489 bool supportPtrAuthBundles() const override { return true; }
490
491 bool supportKCFIBundles() const override { return true; }
492
495 const TargetInstrInfo *TII) const override;
496
498 Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const override;
499
500 /// Enable aggressive FMA fusion on targets that want it.
501 bool enableAggressiveFMAFusion(EVT VT) const override;
502
503 bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override {
504 return true;
505 }
506
507 /// Returns the size of the platform's va_list object.
508 unsigned getVaListSizeInBits(const DataLayout &DL) const override;
509
510 /// Returns true if \p VecTy is a legal interleaved access type. This
511 /// function checks the vector element type and the overall width of the
512 /// vector.
514 bool &UseScalable) const;
515
516 /// Returns the number of interleaved accesses that will be generated when
517 /// lowering accesses of the given type.
518 unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL,
519 bool UseScalable) const;
520
522 const Instruction &I) const override;
523
525 Type *Ty, CallingConv::ID CallConv, bool isVarArg,
526 const DataLayout &DL) const override;
527
528 /// Used for exception handling on Win64.
529 bool needsFixedCatchObjects() const override;
530
531 bool fallBackToDAGISel(const Instruction &Inst) const override;
532
533 /// SVE code generation for fixed length vectors does not custom lower
534 /// BUILD_VECTOR. This makes BUILD_VECTOR legalisation a source of stores to
535 /// merge. However, merging them creates a BUILD_VECTOR that is just as
536 /// illegal as the original, thus leading to an infinite legalisation loop.
537 /// NOTE: Once BUILD_VECTOR is legal or can be custom lowered for all legal
538 /// vector types this override can be removed.
539 bool mergeStoresAfterLegalization(EVT VT) const override;
540
541 // If the platform/function should have a redzone, return the size in bytes.
542 unsigned getRedZoneSize(const Function &F) const {
543 if (F.hasFnAttribute(Attribute::NoRedZone))
544 return 0;
545 return 128;
546 }
547
548 bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) const;
550
552 bool AllowUnknown = false) const override;
553
554 bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const override;
555
556 bool shouldExpandCttzElements(EVT VT) const override;
557
558 bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const override;
559
560 /// If a change in streaming mode is required on entry to/return from a
561 /// function call it emits and returns the corresponding SMSTART or SMSTOP
562 /// node. \p Condition should be one of the enum values from
563 /// AArch64SME::ToggleCondition.
565 SDValue Chain, SDValue InGlue, unsigned Condition,
566 bool InsertVectorLengthCheck = false) const;
567
568 /// Returns true if \p RdxOp should be lowered to a SVE reduction. If a SVE2
569 /// pairwise operation can be used for the reduction \p PairwiseOpIID is set
570 /// to its intrinsic ID.
571 bool
573 std::optional<Intrinsic::ID> &PairwiseOpIID) const;
574
575 // Normally SVE is only used for byte size vectors that do not fit within a
576 // NEON vector. This changes when OverrideNEON is true, allowing SVE to be
577 // used for 64bit and 128bit vectors as well.
578 bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON = false) const;
579
580 // Follow NEON ABI rules even when using SVE for fixed length vectors.
582 EVT VT) const override;
585 EVT VT) const override;
587 CallingConv::ID CC, EVT VT,
588 EVT &IntermediateVT,
589 unsigned &NumIntermediates,
590 MVT &RegisterVT) const override;
591
592 /// True if stack clash protection is enabled for this functions.
593 bool hasInlineStackProbe(const MachineFunction &MF) const override;
594
595 /// In AArch64, true if FEAT_CPA is present. Allows pointer arithmetic
596 /// semantics to be preserved for instruction selection.
597 bool shouldPreservePtrArith(const Function &F, EVT PtrVT) const override;
598
599private:
600 /// Keep a pointer to the AArch64Subtarget around so that we can
601 /// make the right decision when generating code for different targets.
602 const AArch64Subtarget *Subtarget;
603
604 bool isExtFreeImpl(const Instruction *Ext) const override;
605
606 void addTypeForNEON(MVT VT);
607 void addTypeForFixedLengthSVE(MVT VT);
608 void addDRType(MVT VT);
609 void addQRType(MVT VT);
610
611 bool shouldExpandBuildVectorWithShuffles(EVT, unsigned) const override;
612
613 SDValue lowerEHPadEntry(SDValue Chain, SDLoc const &DL,
614 SelectionDAG &DAG) const override;
615
616 SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
617 bool isVarArg,
619 const SDLoc &DL, SelectionDAG &DAG,
620 SmallVectorImpl<SDValue> &InVals) const override;
621
622 void AdjustInstrPostInstrSelection(MachineInstr &MI,
623 SDNode *Node) const override;
624
625 SDValue LowerCall(CallLoweringInfo & /*CLI*/,
626 SmallVectorImpl<SDValue> &InVals) const override;
627
629 CallingConv::ID CallConv, bool isVarArg,
630 const SmallVectorImpl<CCValAssign> &RVLocs,
631 const SDLoc &DL, SelectionDAG &DAG,
632 SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
633 SDValue ThisVal, bool RequiresSMChange) const;
634
637 SDValue LowerStore128(SDValue Op, SelectionDAG &DAG) const;
639 SDValue LowerFMUL(SDValue Op, SelectionDAG &DAG) const;
640 SDValue LowerFMA(SDValue Op, SelectionDAG &DAG) const;
641
644
646
647 SDValue LowerVECTOR_COMPRESS(SDValue Op, SelectionDAG &DAG) const;
648
650 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
651 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
652
653 bool
654 isEligibleForTailCallOptimization(const CallLoweringInfo &CLI) const;
655
656 /// Finds the incoming stack arguments which overlap the given fixed stack
657 /// object and incorporates their load into the current chain. This prevents
658 /// an upcoming store from clobbering the stack argument before it's used.
659 SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
660 MachineFrameInfo &MFI, int ClobberedFI) const;
661
662 bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
663
664 void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
665 SDValue &Chain) const;
666
667 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
668 bool isVarArg,
670 LLVMContext &Context, const Type *RetTy) const override;
671
672 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
674 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
675 SelectionDAG &DAG) const override;
676
678 unsigned Flag) const;
680 unsigned Flag) const;
682 unsigned Flag) const;
684 unsigned Flag) const;
686 unsigned Flag) const;
687 template <class NodeTy>
688 SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
689 template <class NodeTy>
690 SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
691 template <class NodeTy>
692 SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
693 template <class NodeTy>
694 SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
695 SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
696 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
697 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
698 SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
699 SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
700 SDValue LowerELFTLSLocalExec(const GlobalValue *GV, SDValue ThreadBase,
701 const SDLoc &DL, SelectionDAG &DAG) const;
702 SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
703 SelectionDAG &DAG) const;
704 SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
705 SDValue LowerPtrAuthGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
706 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
709 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
712 SDValue TVal, SDValue FVal,
714 SDNodeFlags Flags, const SDLoc &dl,
715 SelectionDAG &DAG) const;
716 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
718 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
719 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
720 SDValue LowerBRIND(SDValue Op, SelectionDAG &DAG) const;
721 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
722 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
723 SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
724 SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
725 SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const;
730 SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
732 SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
733 SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
734 SDValue LowerGET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
735 SDValue LowerSET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
736 SDValue LowerRESET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
737 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
739 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
741 SDValue LowerZERO_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const;
743 SDValue LowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG) const;
744 SDValue LowerDUPQLane(SDValue Op, SelectionDAG &DAG) const;
745 SDValue LowerToPredicatedOp(SDValue Op, SelectionDAG &DAG,
746 unsigned NewOp) const;
747 SDValue LowerToScalableOp(SDValue Op, SelectionDAG &DAG) const;
748 SDValue LowerVECTOR_SPLICE(SDValue Op, SelectionDAG &DAG) const;
751 SDValue LowerVECTOR_DEINTERLEAVE(SDValue Op, SelectionDAG &DAG) const;
752 SDValue LowerVECTOR_INTERLEAVE(SDValue Op, SelectionDAG &DAG) const;
753 SDValue LowerVECTOR_HISTOGRAM(SDValue Op, SelectionDAG &DAG) const;
754 SDValue LowerPARTIAL_REDUCE_MLA(SDValue Op, SelectionDAG &DAG) const;
755 SDValue LowerGET_ACTIVE_LANE_MASK(SDValue Op, SelectionDAG &DAG) const;
756 SDValue LowerDIV(SDValue Op, SelectionDAG &DAG) const;
758 SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
761 SDValue LowerCTPOP_PARITY(SDValue Op, SelectionDAG &DAG) const;
763 SDValue LowerBitreverse(SDValue Op, SelectionDAG &DAG) const;
764 SDValue LowerMinMax(SDValue Op, SelectionDAG &DAG) const;
766 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
767 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
769 SDValue LowerVectorFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const;
770 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
772 SDValue LowerVectorXRINT(SDValue Op, SelectionDAG &DAG) const;
773 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
775 SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
776 SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) const;
778 SDValue LowerLOOP_DEPENDENCE_MASK(SDValue Op, SelectionDAG &DAG) const;
780 SDValue LowerVSCALE(SDValue Op, SelectionDAG &DAG) const;
781 SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
782 SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
783 SDValue LowerVECREDUCE_MUL(SDValue Op, SelectionDAG &DAG) const;
784 SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const;
785 SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
786 SDValue LowerInlineDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
789 SDValue LowerFCANONICALIZE(SDValue Op, SelectionDAG &DAG) const;
790 SDValue LowerAVG(SDValue Op, SelectionDAG &DAG, unsigned NewOp) const;
791
792 SDValue LowerFixedLengthVectorIntDivideToSVE(SDValue Op,
793 SelectionDAG &DAG) const;
794 SDValue LowerFixedLengthVectorIntExtendToSVE(SDValue Op,
795 SelectionDAG &DAG) const;
796 SDValue LowerFixedLengthVectorLoadToSVE(SDValue Op, SelectionDAG &DAG) const;
797 SDValue LowerFixedLengthVectorMLoadToSVE(SDValue Op, SelectionDAG &DAG) const;
798 SDValue LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp, SelectionDAG &DAG) const;
799 SDValue LowerPredReductionToSVE(SDValue ScalarOp, SelectionDAG &DAG) const;
800 SDValue LowerReductionToSVE(SDValue Op, SelectionDAG &DAG) const;
801 SDValue LowerFixedLengthVectorSelectToSVE(SDValue Op, SelectionDAG &DAG) const;
802 SDValue LowerFixedLengthVectorSetccToSVE(SDValue Op, SelectionDAG &DAG) const;
803 SDValue LowerFixedLengthVectorStoreToSVE(SDValue Op, SelectionDAG &DAG) const;
804 SDValue LowerFixedLengthVectorMStoreToSVE(SDValue Op,
805 SelectionDAG &DAG) const;
806 SDValue LowerFixedLengthVectorTruncateToSVE(SDValue Op,
807 SelectionDAG &DAG) const;
808 SDValue LowerFixedLengthExtractVectorElt(SDValue Op, SelectionDAG &DAG) const;
809 SDValue LowerFixedLengthInsertVectorElt(SDValue Op, SelectionDAG &DAG) const;
810 SDValue LowerFixedLengthBitcastToSVE(SDValue Op, SelectionDAG &DAG) const;
811 SDValue LowerFixedLengthConcatVectorsToSVE(SDValue Op,
812 SelectionDAG &DAG) const;
813 SDValue LowerFixedLengthFPExtendToSVE(SDValue Op, SelectionDAG &DAG) const;
814 SDValue LowerFixedLengthFPRoundToSVE(SDValue Op, SelectionDAG &DAG) const;
815 SDValue LowerFixedLengthIntToFPToSVE(SDValue Op, SelectionDAG &DAG) const;
816 SDValue LowerFixedLengthFPToIntToSVE(SDValue Op, SelectionDAG &DAG) const;
817 SDValue LowerFixedLengthVECTOR_SHUFFLEToSVE(SDValue Op,
818 SelectionDAG &DAG) const;
819 SDValue LowerFixedLengthBuildVectorToSVE(SDValue Op, SelectionDAG &DAG) const;
820 SDValue LowerFixedLengthVectorCompressToSVE(SDValue Op,
821 SelectionDAG &DAG) const;
822
823 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
824 SmallVectorImpl<SDNode *> &Created) const override;
825 SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
826 SmallVectorImpl<SDNode *> &Created) const override;
827 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
828 int &ExtraSteps, bool &UseOneConst,
829 bool Reciprocal) const override;
830 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
831 int &ExtraSteps) const override;
832 SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
833 const DenormalMode &Mode,
834 SDNodeFlags Flags = {}) const override;
835 SDValue getSqrtResultForDenormInput(SDValue Operand,
836 SelectionDAG &DAG) const override;
837 unsigned combineRepeatedFPDivisors() const override;
838
839 ConstraintType getConstraintType(StringRef Constraint) const override;
840 Register getRegisterByName(const char* RegName, LLT VT,
841 const MachineFunction &MF) const override;
842
843 /// Examine constraint string and operand type and determine a weight value.
844 /// The operand object must already have been set up with the operand type.
846 getSingleConstraintMatchWeight(AsmOperandInfo &info,
847 const char *constraint) const override;
848
849 std::pair<unsigned, const TargetRegisterClass *>
850 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
851 StringRef Constraint, MVT VT) const override;
852
853 const char *LowerXConstraint(EVT ConstraintVT) const override;
854
855 void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint,
856 std::vector<SDValue> &Ops,
857 SelectionDAG &DAG) const override;
858
860 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
861 if (ConstraintCode == "Q")
863 // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
864 // followed by llvm_unreachable so we'll leave them unimplemented in
865 // the backend for now.
866 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
867 }
868
869 /// Handle Lowering flag assembly outputs.
870 SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Flag,
871 const SDLoc &DL,
872 const AsmOperandInfo &Constraint,
873 SelectionDAG &DAG) const override;
874
875 bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const override;
876 bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const override;
877 bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
878 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
879 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
880 bool getIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
881 SDValue &Offset, SelectionDAG &DAG) const;
882 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
884 SelectionDAG &DAG) const override;
885 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
887 SelectionDAG &DAG) const override;
888 bool isIndexingLegal(MachineInstr &MI, Register Base, Register Offset,
889 bool IsPre, MachineRegisterInfo &MRI) const override;
890
891 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
892 SelectionDAG &DAG) const override;
893 void ReplaceBITCASTResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
894 SelectionDAG &DAG) const;
895 void ReplaceExtractSubVectorResults(SDNode *N,
896 SmallVectorImpl<SDValue> &Results,
897 SelectionDAG &DAG) const;
898 void ReplaceGetActiveLaneMaskResults(SDNode *N,
899 SmallVectorImpl<SDValue> &Results,
900 SelectionDAG &DAG) const;
901
902 bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
903
904 void finalizeLowering(MachineFunction &MF) const override;
905
906 bool shouldLocalize(const MachineInstr &MI,
907 const TargetTransformInfo *TTI) const override;
908
909 bool SimplifyDemandedBitsForTargetNode(SDValue Op,
910 const APInt &OriginalDemandedBits,
911 const APInt &OriginalDemandedElts,
912 KnownBits &Known,
914 unsigned Depth) const override;
915
916 bool canCreateUndefOrPoisonForTargetNode(SDValue Op,
917 const APInt &DemandedElts,
918 const SelectionDAG &DAG,
919 bool PoisonOnly, bool ConsiderFlags,
920 unsigned Depth) const override;
921
922 bool isTargetCanonicalConstantNode(SDValue Op) const override;
923
924 // With the exception of data-predicate transitions, no instructions are
925 // required to cast between legal scalable vector types. However:
926 // 1. Packed and unpacked types have different bit lengths, meaning BITCAST
927 // is not universally useable.
928 // 2. Most unpacked integer types are not legal and thus integer extends
929 // cannot be used to convert between unpacked and packed types.
930 // These can make "bitcasting" a multiphase process. REINTERPRET_CAST is used
931 // to transition between unpacked and packed types of the same element type,
932 // with BITCAST used otherwise.
933 // This function does not handle predicate bitcasts.
934 SDValue getSVESafeBitCast(EVT VT, SDValue Op, SelectionDAG &DAG) const;
935
936 // Returns the runtime value for PSTATE.SM by generating a call to
937 // __arm_sme_state.
938 SDValue getRuntimePStateSM(SelectionDAG &DAG, SDValue Chain, SDLoc DL,
939 EVT VT) const;
940
941 bool preferScalarizeSplat(SDNode *N) const override;
942
943 unsigned getMinimumJumpTableEntries() const override;
944
945 bool shouldScalarizeBinop(SDValue VecOp) const override {
946 return VecOp.getOpcode() == ISD::SETCC;
947 }
948
949 bool hasMultipleConditionRegisters(EVT VT) const override {
950 return VT.isScalableVector();
951 }
952};
953
954namespace AArch64 {
955FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
956 const TargetLibraryInfo *libInfo,
957 const LibcallLoweringInfo *libcallLowering);
958} // end namespace AArch64
959
960} // end namespace llvm
961
962#endif
return SDValue()
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG)
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG)
static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG)
static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG)
static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
static SDValue getTargetNode(ConstantPoolSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
iv Induction Variable Users
Definition IVUsers.cpp:48
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define RegName(no)
lazy value info
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
PowerPC Reduce CR logical Operation
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This file describes how to lower LLVM code to machine code.
static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG)
static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG)
Lower SRA_PARTS and friends, which return two i32 values and take a 2 x i32 value to shift plus a shi...
static SDValue LowerAVG(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG)
static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
Value * RHS
Value * LHS
static SDValue LowerCallResult(SDValue Chain, SDValue InGlue, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
SDValue changeStreamingMode(SelectionDAG &DAG, SDLoc DL, bool Enable, SDValue Chain, SDValue InGlue, unsigned Condition, bool InsertVectorLengthCheck=false) const
If a change in streaming mode is required on entry to/return from a function call it emits and return...
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override
Return true if it is profitable to fold a pair of shifts into a mask.
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT, std::optional< unsigned > ByteOffset) const override
Return true if it is profitable to reduce a load to a smaller type.
Value * getIRStackGuard(IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const override
If the target has a standard location for the stack protector cookie, returns the address of that loc...
Value * getSafeStackPointerLocation(IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const override
If the target has a standard location for the unsafe stack pointer, returns the address of that locat...
void initializeSplitCSR(MachineBasicBlock *Entry) const override
Perform necessary initialization to handle a subset of CSRs explicitly via copies.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool hasAndNotCompare(SDValue V) const override
Return true if the target should transform: (X & Y) == Y ---> (~X & Y) == 0 (X & Y) !...
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
void insertSSPDeclarations(Module &M, const LibcallLoweringInfo &Libcalls) const override
Inserts necessary declarations for SSP (stack protection) purpose.
bool isShuffleMaskLegal(ArrayRef< int > M, EVT VT) const override
Return true if the given shuffle mask can be codegen'd directly, or if it should be stack expanded.
unsigned getVaListSizeInBits(const DataLayout &DL) const override
Returns the size of the platform's va_list object.
MachineBasicBlock * EmitZAInstr(unsigned Opc, unsigned BaseReg, MachineInstr &MI, MachineBasicBlock *BB) const
void insertCopiesSplitCSR(MachineBasicBlock *Entry, const SmallVectorImpl< MachineBasicBlock * > &Exits) const override
Insert explicit copies in entry and exit blocks.
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) const override
Return the prefered common base offset.
bool shouldLowerReductionToSVE(SDValue RdxOp, std::optional< Intrinsic::ID > &PairwiseOpIID) const
Returns true if RdxOp should be lowered to a SVE reduction.
bool shouldExpandCttzElements(EVT VT) const override
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
MachineBasicBlock * EmitInitTPIDR2Object(MachineInstr &MI, MachineBasicBlock *BB) const
bool lowerInterleavedStore(Instruction *Store, Value *Mask, ShuffleVectorInst *SVI, unsigned Factor, const APInt &GapMask) const override
Lower an interleaved store into a stN intrinsic.
MachineBasicBlock * EmitTileLoad(unsigned Opc, unsigned BaseReg, MachineInstr &MI, MachineBasicBlock *BB) const
unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL, bool UseScalable) const
Returns the number of interleaved accesses that will be generated when lowering accesses of the given...
bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override
Return true if it is profitable to fold a pair of shifts into a mask.
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
bool preferSelectsOverBooleanArithmetic(EVT VT) const override
Should we prefer selects to doing arithmetic on boolean types.
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain targets require unusual breakdowns of certain types.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
Provide custom lowering hooks for some operations.
bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override
Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT from min(max(fptoi)) satur...
bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, const MachineFunction &MF) const override
Returns if it's reasonable to merge stores to MemVT size.
bool shouldOptimizeMulOverflowWithZeroHighBits(LLVMContext &Context, EVT VT) const override
bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override
bool shouldInsertTrailingSeqCstFenceForAtomicStore(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert a seq_cst trailing fence without reducing the or...
bool isIntDivCheap(EVT VT, AttributeList Attr) const override
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool shouldRemoveRedundantExtend(SDValue Op) const override
Return true (the default) if it is profitable to remove a sext_inreg(x) where the sext is redundant,...
bool shallExtractConstSplatVectorElementToStore(Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const override
Return true if the target shall perform extract vector element and store given that the vector is kno...
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC) const
Selects the correct CCAssignFn for a given CallingConvention value.
bool supportPtrAuthBundles() const override
Return true if the target supports ptrauth operand bundles.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ISD::SETCC ValueType.
bool optimizeExtendOrTruncateConversion(Instruction *I, Loop *L, const TargetTransformInfo &TTI) const override
Try to optimize extending or truncating conversion instructions (like zext, trunc,...
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const
Selects the correct CCAssignFn for a given CallingConvention value.
bool lowerDeinterleaveIntrinsicToLoad(Instruction *Load, Value *Mask, IntrinsicInst *DI) const override
Lower a deinterleave intrinsic to a target specific load intrinsic.
bool findOptimalMemOpLowering(LLVMContext &Context, std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes, EVT *LargestVT=nullptr) const override
Determines the optimal series of memory ops to replace the memset / memcpy.
MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const override
This callback is used to inspect load/store instructions and add target-specific MachineMemOperand fl...
unsigned getVectorIdxWidth(const DataLayout &DL) const override
Returns the type to be used for the index operand vector operations.
bool hasInlineStackProbe(const MachineFunction &MF) const override
True if stack clash protection is enabled for this functions.
bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT, unsigned SelectOpcode, SDValue X, SDValue Y) const override
Return true if pulling a binary operation into a select with an identity constant is profitable.
bool isLegalICmpImmediate(int64_t) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
bool preferIncOfAddToSubOfNot(EVT VT) const override
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
ShiftLegalizationStrategy preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const override
bool isOpSuitableForLSE128(const Instruction *I) const
void fixupPtrauthDiscriminator(MachineInstr &MI, MachineBasicBlock *BB, MachineOperand &IntDiscOp, MachineOperand &AddrDiscOp, const TargetRegisterClass *AddrDiscRC) const
Replace (0, vreg) discriminator components with the operands of blend or with (immediate,...
bool lowerInterleavedLoad(Instruction *Load, Value *Mask, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor, const APInt &GapMask) const override
Lower an interleaved load into a ldN intrinsic.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(const AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool fallBackToDAGISel(const Instruction &Inst) const override
bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override
Return true if the target has native support for the specified value type and it is 'desirable' to us...
bool isLegalAddScalableImmediate(int64_t) const override
Return true if adding the specified scalable immediate is legal, that is the target has add instructi...
bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const override
Try to convert math with an overflow comparison into the corresponding DAG node operation.
Value * createComplexDeinterleavingIR(IRBuilderBase &B, ComplexDeinterleavingOperation OperationType, ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, Value *Accumulator=nullptr) const override
Create the IR node for the given complex deinterleaving operation.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Returns true if the target allows unaligned memory accesses of the specified type.
MachineBasicBlock * EmitCheckMatchingVL(MachineInstr &MI, MachineBasicBlock *MBB) const
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
bool isLegalInterleavedAccessType(VectorType *VecTy, const DataLayout &DL, bool &UseScalable) const
Returns true if VecTy is a legal interleaved access type.
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const override
For some targets, an LLVM struct type must be broken down into multiple simple types,...
Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
MachineBasicBlock * EmitLoweredCatchRet(MachineInstr &MI, MachineBasicBlock *BB) const
bool isComplexDeinterleavingSupported() const override
Does this target support complex deinterleaving.
bool isZExtFree(Type *Ty1, Type *Ty2) const override
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const override
SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const
MachineBasicBlock * EmitZero(MachineInstr &MI, MachineBasicBlock *BB) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override
Return if the target supports combining a chain like:
bool isProfitableToHoist(Instruction *I) const override
Check if it is profitable to hoist instruction in then/else to if.
bool isOpSuitableForRCPC3(const Instruction *I) const
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const override
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isCheapToSpeculateCttz(Type *) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
unsigned getRedZoneSize(const Function &F) const
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
MachineBasicBlock * EmitZTInstr(MachineInstr &MI, MachineBasicBlock *BB, unsigned Opcode, bool Op0IsDef) const
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
MachineBasicBlock * EmitFill(MachineInstr &MI, MachineBasicBlock *BB) const
bool isCheapToSpeculateCtlz(Type *) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const override
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
MachineBasicBlock * EmitEntryPStateSM(MachineInstr &MI, MachineBasicBlock *BB) const
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
bool isReassocProfitable(SelectionDAG &DAG, SDValue N0, SDValue N1) const override
Control the following reassociation of operands: (op (op x, c1), y) -> (op (op x, y),...
bool shouldPreservePtrArith(const Function &F, EVT PtrVT) const override
In AArch64, true if FEAT_CPA is present.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
MachineBasicBlock * EmitF128CSEL(MachineInstr &MI, MachineBasicBlock *BB) const
LLT getOptimalMemOpLLT(const MemOp &Op, const AttributeList &FuncAttributes) const override
LLT returning variant.
bool isDesirableToPullExtFromShl(const MachineInstr &MI) const override
GlobalISel - return true if it's profitable to perform the combine: shl ([sza]ext x),...
bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const override
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <</l>>...
MachineBasicBlock * EmitAllocateSMESaveBuffer(MachineInstr &MI, MachineBasicBlock *BB) const
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool needsFixedCatchObjects() const override
Used for exception handling on Win64.
MachineBasicBlock * EmitAllocateZABuffer(MachineInstr &MI, MachineBasicBlock *BB) const
const AArch64TargetMachine & getTM() const
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const override
EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool generateFMAsInMachineCombiner(EVT VT, CodeGenOptLevel OptLevel) const override
bool isComplexDeinterleavingOperationSupported(ComplexDeinterleavingOperation Operation, Type *Ty) const override
Does this target support complex deinterleaving with the given operation and type.
bool isOpSuitableForLDPSTP(const Instruction *I) const
AArch64TargetLowering(const TargetMachine &TM, const AArch64Subtarget &STI)
MachineBasicBlock * EmitGetSMESaveSize(MachineInstr &MI, MachineBasicBlock *BB) const
bool hasPairedLoad(EVT LoadedType, Align &RequiredAlignment) const override
Return true if the target supplies and combines to a paired load two loaded values of type LoadedType...
bool isLegalAddImmediate(int64_t) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
bool shouldConsiderGEPOffsetSplit() const override
bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const override
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
bool isVectorClearMaskLegal(ArrayRef< int > M, EVT VT) const override
Similar to isShuffleMaskLegal.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo, const LibcallLoweringInfo *libcallLowering) const override
This method returns a target specific FastISel object, or null if the target does not support "fast" ...
void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
bool useLoadStackGuardNode(const Module &M) const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
void getTgtMemIntrinsic(SmallVectorImpl< IntrinsicInfo > &Infos, const CallBase &I, MachineFunction &MF, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for this result type with this index.
ArrayRef< MCPhysReg > getRoundingControlRegisters() const override
Returns a 0 terminated array of rounding control registers that can be attached into strict FP call.
bool isFPImmLegalAsFMov(const APFloat &Imm, EVT VT) const
bool lowerInterleaveIntrinsicToStore(Instruction *Store, Value *Mask, ArrayRef< Value * > InterleaveValues) const override
Lower an interleave intrinsic to a target specific store intrinsic.
MachineInstr * EmitKCFICheck(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator &MBBI, const TargetInstrInfo *TII) const override
bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) const
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
This method can be implemented by targets that want to expose additional information about sign bits ...
bool isDesirableToCommuteXorWithShift(const SDNode *N) const override
Returns false if N is a bit extraction pattern of (X >> C) & Mask.
bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override
Returns false if N is a bit extraction pattern of (X >> C) & Mask.
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
bool enableAggressiveFMAFusion(EVT VT) const override
Enable aggressive FMA fusion on targets that want it.
MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
MachineBasicBlock * EmitDynamicProbedAlloc(MachineInstr &MI, MachineBasicBlock *MBB) const
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(const AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const override
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
bool supportKCFIBundles() const override
Return true if the target supports kcfi operand bundles.
bool isMulAddWithConstProfitable(SDValue AddNode, SDValue ConstNode) const override
Return true if it may be profitable to transform (mul (add x, c1), c2) -> (add (mul x,...
bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON=false) const
bool mergeStoresAfterLegalization(EVT VT) const override
SVE code generation for fixed length vectors does not custom lower BUILD_VECTOR.
Class for arbitrary precision integers.
Definition APInt.h:78
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
CCState - This class holds information needed while lowering arguments and return values.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:272
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:729
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Tracks which library functions to use for a particular subtarget.
An instruction for reading from memory.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Machine Value Type.
static MVT getIntegerVT(unsigned BitWidth)
Instructions::iterator instr_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Function & getFunction()
Return the LLVM function that this machine code represents.
Representation of each machine instruction.
Flags
Flags values. These may be or'd together.
MachineOperand class - Representation of each machine instruction operand.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
An instruction for storing to memory.
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const
Try to convert math with an overflow comparison into the corresponding DAG node operation.
ShiftLegalizationStrategy
Return the preferred strategy to legalize tihs SHIFT instruction, with ExpansionFactor being the recu...
virtual unsigned getMinimumJumpTableEntries() const
Return lower limit for number of blocks in a jump table.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
TargetLowering(const TargetLowering &)=delete
virtual unsigned combineRepeatedFPDivisors() const
Indicate whether this target prefers to combine FDIVs with the same divisor.
Primary interface to the complete machine description for the target machine.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM Value Representation.
Definition Value.h:75
Base class of all SIMD vector types.
A range adaptor for a pair of iterators.
ArrayRef< MCPhysReg > getFPRArgRegs()
Rounding
Possible values of current rounding mode, which is specified in bits 23:22 of FPCR.
const unsigned StackProbeMaxLoopUnroll
Maximum number of iterations to unroll for a constant size probing loop.
const unsigned StackProbeMaxUnprobedStack
Maximum allowed number of unprobed bytes above SP at an ABI boundary.
const unsigned RoundingBitsPos
const uint64_t ReservedFPControlBits
ArrayRef< MCPhysReg > getGPRArgRegs()
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo, const LibcallLoweringInfo *libcallLowering)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ CXX_FAST_TLS
Used for access functions.
Definition CallingConv.h:72
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:819
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
AtomicOrdering
Atomic ordering for LLVM's memory model.
TargetTransformInfo TTI
CombineLevel
Definition DAGCombine.h:15
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
@ Enabled
Convert any .debug_str_offsets tables to DWARF64 if needed.
Definition DWP.h:27
@ Enable
Enable colors.
Definition WithColor.h:47
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Represent subnormal handling kind for floating point instruction inputs and outputs.
Extended Value Type.
Definition ValueTypes.h:35
uint64_t getScalarSizeInBits() const
Definition ValueTypes.h:393
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition ValueTypes.h:389
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:176
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
Definition ValueTypes.h:182
These are IR-level optimization flags that may be propagated to SDNodes.
This contains information for each constraint that we are lowering.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...