LLVM 22.0.0git
NVPTXISelLowering.cpp
Go to the documentation of this file.
1//===-- NVPTXISelLowering.cpp - NVPTX DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that NVPTX uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "NVPTXISelLowering.h"
16#include "NVPTX.h"
17#include "NVPTXISelDAGToDAG.h"
19#include "NVPTXSubtarget.h"
20#include "NVPTXTargetMachine.h"
22#include "NVPTXUtilities.h"
23#include "llvm/ADT/APFloat.h"
24#include "llvm/ADT/APInt.h"
25#include "llvm/ADT/STLExtras.h"
27#include "llvm/ADT/StringRef.h"
39#include "llvm/IR/Argument.h"
40#include "llvm/IR/Attributes.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DataLayout.h"
45#include "llvm/IR/FPEnv.h"
46#include "llvm/IR/Function.h"
47#include "llvm/IR/GlobalValue.h"
48#include "llvm/IR/IRBuilder.h"
49#include "llvm/IR/Instruction.h"
51#include "llvm/IR/IntrinsicsNVPTX.h"
52#include "llvm/IR/Module.h"
53#include "llvm/IR/Type.h"
54#include "llvm/IR/Value.h"
66#include <algorithm>
67#include <cassert>
68#include <cmath>
69#include <cstdint>
70#include <iterator>
71#include <optional>
72#include <string>
73#include <tuple>
74#include <utility>
75#include <vector>
76
77#define DEBUG_TYPE "nvptx-lower"
78
79using namespace llvm;
80
82 "nvptx-sched4reg",
83 cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
84
86 "nvptx-fma-level", cl::Hidden,
87 cl::desc("NVPTX Specific: FMA contraction (0: don't do it"
88 " 1: do it 2: do it aggressively"),
89 cl::init(2));
90
92 "nvptx-prec-divf32", cl::Hidden,
94 "NVPTX Specific: Override the precision of the lowering for f32 fdiv"),
96 clEnumValN(NVPTX::DivPrecisionLevel::Approx, "0", "Use div.approx"),
97 clEnumValN(NVPTX::DivPrecisionLevel::Full, "1", "Use div.full"),
99 "Use IEEE Compliant F32 div.rnd if available (default)"),
101 "Use IEEE Compliant F32 div.rnd if available, no FTZ")),
103
105 "nvptx-prec-sqrtf32", cl::Hidden,
106 cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),
107 cl::init(true));
108
109/// Whereas CUDA's implementation (see libdevice) uses ex2.approx for exp2(), it
110/// does NOT use lg2.approx for log2, so this is disabled by default.
112 "nvptx-approx-log2f32",
113 cl::desc("NVPTX Specific: whether to use lg2.approx for log2"),
114 cl::init(false));
115
117 "nvptx-force-min-byval-param-align", cl::Hidden,
118 cl::desc("NVPTX Specific: force 4-byte minimal alignment for byval"
119 " params of device functions."),
120 cl::init(false));
121
124 const SDNode &N) const {
125 // If nvptx-prec-div32=N is used on the command-line, always honor it
126 if (UsePrecDivF32.getNumOccurrences() > 0)
127 return UsePrecDivF32;
128
129 const SDNodeFlags Flags = N.getFlags();
130 if (Flags.hasApproximateFuncs())
132
134}
135
137 // If nvptx-prec-sqrtf32 is used on the command-line, always honor it
138 if (UsePrecSqrtF32.getNumOccurrences() > 0)
139 return UsePrecSqrtF32;
140
141 if (N) {
142 const SDNodeFlags Flags = N->getFlags();
143 if (Flags.hasApproximateFuncs())
144 return false;
145 }
146
147 return true;
148}
149
154
155static bool IsPTXVectorType(MVT VT) {
156 switch (VT.SimpleTy) {
157 default:
158 return false;
159 case MVT::v2i1:
160 case MVT::v4i1:
161 case MVT::v2i8:
162 case MVT::v4i8:
163 case MVT::v8i8: // <2 x i8x4>
164 case MVT::v16i8: // <4 x i8x4>
165 case MVT::v2i16:
166 case MVT::v4i16:
167 case MVT::v8i16: // <4 x i16x2>
168 case MVT::v2i32:
169 case MVT::v4i32:
170 case MVT::v2i64:
171 case MVT::v2f16:
172 case MVT::v4f16:
173 case MVT::v8f16: // <4 x f16x2>
174 case MVT::v2bf16:
175 case MVT::v4bf16:
176 case MVT::v8bf16: // <4 x bf16x2>
177 case MVT::v2f32:
178 case MVT::v4f32:
179 case MVT::v2f64:
180 case MVT::v4i64:
181 case MVT::v4f64:
182 case MVT::v8i32:
183 case MVT::v8f32:
184 case MVT::v16f16: // <8 x f16x2>
185 case MVT::v16bf16: // <8 x bf16x2>
186 case MVT::v16i16: // <8 x i16x2>
187 case MVT::v32i8: // <8 x i8x4>
188 return true;
189 }
190}
191
192// When legalizing vector loads/stores, this function is called, which does two
193// things:
194// 1. Determines Whether the vector is something we want to custom lower,
195// std::nullopt is returned if we do not want to custom lower it.
196// 2. If we do want to handle it, returns two parameters:
197// - unsigned int NumElts - The number of elements in the final vector
198// - EVT EltVT - The type of the elements in the final vector
199static std::optional<std::pair<unsigned int, MVT>>
201 unsigned AddressSpace) {
202 const bool CanLowerTo256Bit = STI.has256BitVectorLoadStore(AddressSpace);
203
204 if (CanLowerTo256Bit && VectorEVT.isScalarInteger() &&
205 VectorEVT.getSizeInBits() == 256)
206 return {{4, MVT::i64}};
207
208 if (!VectorEVT.isSimple())
209 return std::nullopt;
210 const MVT VectorVT = VectorEVT.getSimpleVT();
211
212 if (!VectorVT.isVector()) {
213 if (VectorVT == MVT::i128 || VectorVT == MVT::f128)
214 return {{2, MVT::i64}};
215 return std::nullopt;
216 }
217
218 const MVT EltVT = VectorVT.getVectorElementType();
219 const unsigned NumElts = VectorVT.getVectorNumElements();
220
221 // The size of the PTX virtual register that holds a packed type.
222 unsigned PackRegSize;
223
224 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
225 // legal. We can (and should) split that into 2 stores of <2 x double> here
226 // but I'm leaving that as a TODO for now.
227 switch (VectorVT.SimpleTy) {
228 default:
229 return std::nullopt;
230
231 case MVT::v4i64:
232 case MVT::v4f64:
233 // This is a "native" vector type iff the address space is global and the
234 // target supports 256-bit loads/stores
235 if (!CanLowerTo256Bit)
236 return std::nullopt;
237 [[fallthrough]];
238 case MVT::v2i8:
239 case MVT::v2i64:
240 case MVT::v2f64:
241 // This is a "native" vector type
242 return std::pair(NumElts, EltVT);
243
244 case MVT::v16f16: // <8 x f16x2>
245 case MVT::v16bf16: // <8 x bf16x2>
246 case MVT::v16i16: // <8 x i16x2>
247 case MVT::v32i8: // <8 x i8x4>
248 // This can be upsized into a "native" vector type iff the address space is
249 // global and the target supports 256-bit loads/stores.
250 if (!CanLowerTo256Bit)
251 return std::nullopt;
252 [[fallthrough]];
253 case MVT::v2i16: // <1 x i16x2>
254 case MVT::v2f16: // <1 x f16x2>
255 case MVT::v2bf16: // <1 x bf16x2>
256 case MVT::v4i8: // <1 x i8x4>
257 case MVT::v4i16: // <2 x i16x2>
258 case MVT::v4f16: // <2 x f16x2>
259 case MVT::v4bf16: // <2 x bf16x2>
260 case MVT::v8i8: // <2 x i8x4>
261 case MVT::v8f16: // <4 x f16x2>
262 case MVT::v8bf16: // <4 x bf16x2>
263 case MVT::v8i16: // <4 x i16x2>
264 case MVT::v16i8: // <4 x i8x4>
265 PackRegSize = 32;
266 break;
267
268 case MVT::v8f32: // <4 x f32x2>
269 case MVT::v8i32: // <4 x i32x2>
270 // This is a "native" vector type iff the address space is global and the
271 // target supports 256-bit loads/stores
272 if (!CanLowerTo256Bit)
273 return std::nullopt;
274 [[fallthrough]];
275 case MVT::v2f32: // <1 x f32x2>
276 case MVT::v4f32: // <2 x f32x2>
277 case MVT::v2i32: // <1 x i32x2>
278 case MVT::v4i32: // <2 x i32x2>
279 if (!STI.hasF32x2Instructions())
280 return std::pair(NumElts, EltVT);
281 PackRegSize = 64;
282 break;
283 }
284
285 // If we reach here, then we can pack 2 or more elements into a single 32-bit
286 // or 64-bit PTX register and treat the vector as a new vector containing
287 // packed elements.
288
289 // Number of elements to pack in one word.
290 const unsigned NPerReg = PackRegSize / EltVT.getSizeInBits();
291
292 return std::pair(NumElts / NPerReg, MVT::getVectorVT(EltVT, NPerReg));
293}
294
295/// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
296/// legal-ish MVTs that compose it. Unlike ComputeValueVTs, this will legalize
297/// the types as required by the calling convention (with special handling for
298/// i8s).
299/// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
300/// same number of types as the Ins/Outs arrays in LowerFormalArguments,
301/// LowerCall, and LowerReturn.
302static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL,
303 LLVMContext &Ctx, CallingConv::ID CallConv,
304 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
306 uint64_t StartingOffset = 0) {
307 SmallVector<EVT, 16> TempVTs;
308 SmallVector<uint64_t, 16> TempOffsets;
309 ComputeValueVTs(TLI, DL, Ty, TempVTs, /*MemVTs=*/nullptr, &TempOffsets,
310 StartingOffset);
311
312 for (const auto [VT, Off] : zip(TempVTs, TempOffsets)) {
313 MVT RegisterVT = TLI.getRegisterTypeForCallingConv(Ctx, CallConv, VT);
314 unsigned NumRegs = TLI.getNumRegistersForCallingConv(Ctx, CallConv, VT);
315
316 // Since we actually can load/store b8, we need to ensure that we'll use
317 // the original sized type for any i8s or i8 vectors.
318 if (VT.getScalarType() == MVT::i8) {
319 if (RegisterVT == MVT::i16)
320 RegisterVT = MVT::i8;
321 else if (RegisterVT == MVT::v2i16)
322 RegisterVT = MVT::v2i8;
323 else
324 assert(RegisterVT == MVT::v4i8 &&
325 "Expected v4i8, v2i16, or i16 for i8 RegisterVT");
326 }
327
328 // TODO: This is horribly incorrect for cases where the vector elements are
329 // not a multiple of bytes (ex i1) and legal or i8. However, this problem
330 // has existed for as long as NVPTX has and no one has complained, so we'll
331 // leave it for now.
332 for (unsigned I : seq(NumRegs)) {
333 ValueVTs.push_back(RegisterVT);
334 Offsets.push_back(Off + I * RegisterVT.getStoreSize());
335 }
336 }
337}
338
339// We return an EVT that can hold N VTs
340// If the VT is a vector, the resulting EVT is a flat vector with the same
341// element type as VT's element type.
342static EVT getVectorizedVT(EVT VT, unsigned N, LLVMContext &C) {
343 if (N == 1)
344 return VT;
345
346 return VT.isVector() ? EVT::getVectorVT(C, VT.getScalarType(),
347 VT.getVectorNumElements() * N)
348 : EVT::getVectorVT(C, VT, N);
349}
350
352 const SDLoc &dl, SelectionDAG &DAG) {
353 if (V.getValueType() == VT) {
354 assert(I == 0 && "Index must be 0 for scalar value");
355 return V;
356 }
357
358 if (!VT.isVector())
359 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, V,
360 DAG.getVectorIdxConstant(I, dl));
361
362 return DAG.getNode(
363 ISD::EXTRACT_SUBVECTOR, dl, VT, V,
365}
366
367template <typename T>
368static inline SDValue getBuildVectorizedValue(unsigned N, const SDLoc &dl,
369 SelectionDAG &DAG, T GetElement) {
370 if (N == 1)
371 return GetElement(0);
372
374 for (const unsigned I : llvm::seq(N)) {
375 SDValue Val = GetElement(I);
376 if (Val.getValueType().isVector())
377 DAG.ExtractVectorElements(Val, Values);
378 else
379 Values.push_back(Val);
380 }
381
382 EVT VT = EVT::getVectorVT(*DAG.getContext(), Values[0].getValueType(),
383 Values.size());
384 return DAG.getBuildVector(VT, dl, Values);
385}
386
387/// PromoteScalarIntegerPTX
388/// Used to make sure the arguments/returns are suitable for passing
389/// and promote them to a larger size if they're not.
390///
391/// The promoted type is placed in \p PromoteVT if the function returns true.
393 if (VT.isScalarInteger()) {
394 switch (PowerOf2Ceil(VT.getFixedSizeInBits())) {
395 default:
397 "Promotion is not suitable for scalars of size larger than 64-bits");
398 case 1:
399 return MVT::i1;
400 case 2:
401 case 4:
402 case 8:
403 return MVT::i8;
404 case 16:
405 return MVT::i16;
406 case 32:
407 return MVT::i32;
408 case 64:
409 return MVT::i64;
410 }
411 }
412 return VT;
413}
414
415// Check whether we can merge loads/stores of some of the pieces of a
416// flattened function parameter or return value into a single vector
417// load/store.
418//
419// The flattened parameter is represented as a list of EVTs and
420// offsets, and the whole structure is aligned to ParamAlignment. This
421// function determines whether we can load/store pieces of the
422// parameter starting at index Idx using a single vectorized op of
423// size AccessSize. If so, it returns the number of param pieces
424// covered by the vector op. Otherwise, it returns 1.
425template <typename T>
427 unsigned Idx, uint32_t AccessSize, const SmallVectorImpl<EVT> &ValueVTs,
428 const SmallVectorImpl<T> &Offsets, Align ParamAlignment) {
429
430 // Can't vectorize if param alignment is not sufficient.
431 if (ParamAlignment < AccessSize)
432 return 1;
433 // Can't vectorize if offset is not aligned.
434 if (Offsets[Idx] & (AccessSize - 1))
435 return 1;
436
437 EVT EltVT = ValueVTs[Idx];
438 unsigned EltSize = EltVT.getStoreSize();
439
440 // Element is too large to vectorize.
441 if (EltSize >= AccessSize)
442 return 1;
443
444 unsigned NumElts = AccessSize / EltSize;
445 // Can't vectorize if AccessBytes if not a multiple of EltSize.
446 if (AccessSize != EltSize * NumElts)
447 return 1;
448
449 // We don't have enough elements to vectorize.
450 if (Idx + NumElts > ValueVTs.size())
451 return 1;
452
453 // PTX ISA can only deal with 2- and 4-element vector ops.
454 if (NumElts != 4 && NumElts != 2)
455 return 1;
456
457 for (unsigned j = Idx + 1; j < Idx + NumElts; ++j) {
458 // Types do not match.
459 if (ValueVTs[j] != EltVT)
460 return 1;
461
462 // Elements are not contiguous.
463 if (Offsets[j] - Offsets[j - 1] != EltSize)
464 return 1;
465 }
466 // OK. We can vectorize ValueVTs[i..i+NumElts)
467 return NumElts;
468}
469
470// Computes whether and how we can vectorize the loads/stores of a
471// flattened function parameter or return value.
472//
473// The flattened parameter is represented as the list of ValueVTs and
474// Offsets, and is aligned to ParamAlignment bytes. We return a vector
475// of the same size as ValueVTs indicating how each piece should be
476// loaded/stored (i.e. as a scalar, or as part of a vector
477// load/store).
478template <typename T>
481 const SmallVectorImpl<T> &Offsets, Align ParamAlignment,
482 bool IsVAArg = false) {
483 // Set vector size to match ValueVTs and mark all elements as
484 // scalars by default.
485
486 if (IsVAArg)
487 return SmallVector<unsigned>(ValueVTs.size(), 1);
488
489 SmallVector<unsigned, 16> VectorInfo;
490
491 const auto GetNumElts = [&](unsigned I) -> unsigned {
492 for (const unsigned AccessSize : {16, 8, 4, 2}) {
493 const unsigned NumElts = canMergeParamLoadStoresStartingAt(
494 I, AccessSize, ValueVTs, Offsets, ParamAlignment);
495 assert((NumElts == 1 || NumElts == 2 || NumElts == 4) &&
496 "Unexpected vectorization size");
497 if (NumElts != 1)
498 return NumElts;
499 }
500 return 1;
501 };
502
503 // Check what we can vectorize using 128/64/32-bit accesses.
504 for (unsigned I = 0, E = ValueVTs.size(); I != E;) {
505 const unsigned NumElts = GetNumElts(I);
506 VectorInfo.push_back(NumElts);
507 I += NumElts;
508 }
509 assert(std::accumulate(VectorInfo.begin(), VectorInfo.end(), 0u) ==
510 ValueVTs.size());
511 return VectorInfo;
512}
513
514// NVPTXTargetLowering Constructor.
516 const NVPTXSubtarget &STI)
517 : TargetLowering(TM, STI), nvTM(&TM), STI(STI), GlobalUniqueCallSite(0) {
518 // always lower memset, memcpy, and memmove intrinsics to load/store
519 // instructions, rather
520 // then generating calls to memset, mempcy or memmove.
524
527
528 // Jump is Expensive. Don't create extra control flow for 'and', 'or'
529 // condition branches.
530 setJumpIsExpensive(true);
531
532 // Wide divides are _very_ slow. Try to reduce the width of the divide if
533 // possible.
534 addBypassSlowDiv(64, 32);
535
536 // By default, use the Source scheduling
537 if (sched4reg)
539 else
541
542 auto setFP16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
543 LegalizeAction NoF16Action) {
544 bool IsOpSupported = STI.allowFP16Math();
545 switch (Op) {
546 // Several FP16 instructions are available on sm_80 only.
547 case ISD::FMINNUM:
548 case ISD::FMAXNUM:
549 case ISD::FMAXNUM_IEEE:
550 case ISD::FMINNUM_IEEE:
551 case ISD::FMAXIMUM:
552 case ISD::FMINIMUM:
553 case ISD::FMAXIMUMNUM:
554 case ISD::FMINIMUMNUM:
555 IsOpSupported &= STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70;
556 break;
557 case ISD::FEXP2:
558 IsOpSupported &= STI.getSmVersion() >= 75 && STI.getPTXVersion() >= 70;
559 break;
560 }
561 setOperationAction(Op, VT, IsOpSupported ? Action : NoF16Action);
562 };
563
564 auto setBF16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
565 LegalizeAction NoBF16Action) {
566 bool IsOpSupported = STI.hasNativeBF16Support(Op);
568 Op, VT, IsOpSupported ? Action : NoBF16Action);
569 };
570
571 auto setI16x2OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
572 LegalizeAction NoI16x2Action) {
573 bool IsOpSupported = false;
574 // instructions are available on sm_90 only
575 switch (Op) {
576 case ISD::ADD:
577 case ISD::SMAX:
578 case ISD::SMIN:
579 case ISD::UMIN:
580 case ISD::UMAX:
581 IsOpSupported = STI.getSmVersion() >= 90 && STI.getPTXVersion() >= 80;
582 break;
583 }
584 setOperationAction(Op, VT, IsOpSupported ? Action : NoI16x2Action);
585 };
586
587 addRegisterClass(MVT::i1, &NVPTX::B1RegClass);
588 addRegisterClass(MVT::i16, &NVPTX::B16RegClass);
589 addRegisterClass(MVT::v2i16, &NVPTX::B32RegClass);
590 addRegisterClass(MVT::v4i8, &NVPTX::B32RegClass);
591 addRegisterClass(MVT::i32, &NVPTX::B32RegClass);
592 addRegisterClass(MVT::i64, &NVPTX::B64RegClass);
593 addRegisterClass(MVT::f32, &NVPTX::B32RegClass);
594 addRegisterClass(MVT::f64, &NVPTX::B64RegClass);
595 addRegisterClass(MVT::f16, &NVPTX::B16RegClass);
596 addRegisterClass(MVT::v2f16, &NVPTX::B32RegClass);
597 addRegisterClass(MVT::bf16, &NVPTX::B16RegClass);
598 addRegisterClass(MVT::v2bf16, &NVPTX::B32RegClass);
599
600 if (STI.hasF32x2Instructions()) {
601 addRegisterClass(MVT::v2f32, &NVPTX::B64RegClass);
602 addRegisterClass(MVT::v2i32, &NVPTX::B64RegClass);
603 }
604
605 // Conversion to/from FP16/FP16x2 is always legal.
610
611 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
612 if (STI.getSmVersion() >= 30 && STI.getPTXVersion() > 31)
613 setOperationAction(ISD::READSTEADYCOUNTER, MVT::i64, Legal);
614
615 setFP16OperationAction(ISD::SETCC, MVT::f16, Legal, Promote);
616 setFP16OperationAction(ISD::SETCC, MVT::v2f16, Legal, Expand);
617
618 // Conversion to/from BFP16/BFP16x2 is always legal.
623
624 setBF16OperationAction(ISD::SETCC, MVT::v2bf16, Legal, Expand);
625 setBF16OperationAction(ISD::SETCC, MVT::bf16, Legal, Promote);
626 if (getOperationAction(ISD::SETCC, MVT::bf16) == Promote)
627 AddPromotedToType(ISD::SETCC, MVT::bf16, MVT::f32);
628
629 // Conversion to/from i16/i16x2 is always legal.
634
639
640 // No support for these operations with v2f32/v2i32
641 setOperationAction(ISD::INSERT_VECTOR_ELT, {MVT::v2f32, MVT::v2i32}, Expand);
642 setOperationAction(ISD::VECTOR_SHUFFLE, {MVT::v2f32, MVT::v2i32}, Expand);
643
646 MVT::v2i32, Expand);
647
648 // Need custom lowering in case the index is dynamic.
649 if (STI.hasF32x2Instructions())
650 setOperationAction(ISD::EXTRACT_VECTOR_ELT, {MVT::v2f32, MVT::v2i32},
651 Custom);
652
653 // Custom conversions to/from v2i8.
654 setOperationAction(ISD::BITCAST, MVT::v2i8, Custom);
655
656 // Only logical ops can be done on v4i8/v2i32 directly, others must be done
657 // elementwise.
674 {MVT::v4i8, MVT::v2i32}, Expand);
675
676 // Operations not directly supported by NVPTX.
677 for (MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32,
678 MVT::v2f32, MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16,
679 MVT::v4i8, MVT::i32, MVT::v2i32, MVT::i64}) {
681 setOperationAction(ISD::BR_CC, VT, Expand);
682 }
683
684 // We don't want ops like FMINIMUM or UMAX to be lowered to SETCC+VSELECT.
685 setOperationAction(ISD::VSELECT, {MVT::v2f32, MVT::v2i32}, Expand);
686
687 // Some SIGN_EXTEND_INREG can be done using cvt instruction.
688 // For others we will expand to a SHL/SRA pair.
694 setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::v2i16, MVT::v2i32}, Expand);
695
702
705
707 {MVT::i8, MVT::i16, MVT::v2i16, MVT::i32, MVT::i64},
708 Expand);
709
710 if (STI.hasHWROT32()) {
713 Custom);
714 }
715
717
718 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
719 setOperationAction(ISD::BRIND, MVT::Other, Expand);
720
721 // We want to legalize constant related memmove and memcopy
722 // intrinsics.
724
725 // FP extload/truncstore is not legal in PTX. We need to expand all these.
726 for (auto FloatVTs :
728 for (MVT ValVT : FloatVTs) {
729 for (MVT MemVT : FloatVTs) {
730 setLoadExtAction(ISD::EXTLOAD, ValVT, MemVT, Expand);
731 setTruncStoreAction(ValVT, MemVT, Expand);
732 }
733 }
734 }
735
736 // To improve CodeGen we'll legalize any-extend loads to zext loads. This is
737 // how they'll be lowered in ISel anyway, and by doing this a little earlier
738 // we allow for more DAG combine opportunities.
739 for (auto IntVTs :
741 for (MVT ValVT : IntVTs)
742 for (MVT MemVT : IntVTs)
743 if (isTypeLegal(ValVT))
744 setLoadExtAction(ISD::EXTLOAD, ValVT, MemVT, Custom);
745
746 // PTX does not support load / store predicate registers
747 setOperationAction({ISD::LOAD, ISD::STORE}, MVT::i1, Custom);
748 for (MVT VT : MVT::integer_valuetypes()) {
750 Promote);
751 setTruncStoreAction(VT, MVT::i1, Expand);
752 }
753
754 // Disable generations of extload/truncstore for v2i32/v2i16/v2i8. The generic
755 // expansion for these nodes when they are unaligned is incorrect if the
756 // type is a vector.
757 //
758 // TODO: Fix the generic expansion for these nodes found in
759 // TargetLowering::expandUnalignedLoad/Store.
761 MVT::v2i8, Expand);
763 {MVT::v2i8, MVT::v2i16}, Expand);
764 setTruncStoreAction(MVT::v2i16, MVT::v2i8, Expand);
765 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
766 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
767
768 // Register custom handling for illegal type loads/stores. We'll try to custom
769 // lower almost all illegal types and logic in the lowering will discard cases
770 // we can't handle.
771 setOperationAction({ISD::LOAD, ISD::STORE}, {MVT::i128, MVT::f128}, Custom);
773 if (!isTypeLegal(VT) && VT.getStoreSizeInBits() <= 256)
774 setOperationAction({ISD::STORE, ISD::LOAD}, VT, Custom);
775
776 // Custom legalization for LDU intrinsics.
777 // TODO: The logic to lower these is not very robust and we should rewrite it.
778 // Perhaps LDU should not be represented as an intrinsic at all.
781 if (IsPTXVectorType(VT))
783
787 MVT::i1, Expand);
788
789 // This is legal in NVPTX
794
795 setOperationAction(ISD::DYNAMIC_STACKALLOC, {MVT::i32, MVT::i64}, Custom);
796 setOperationAction({ISD::STACKRESTORE, ISD::STACKSAVE}, MVT::Other, Custom);
797
798 // TRAP can be lowered to PTX trap
799 setOperationAction(ISD::TRAP, MVT::Other, Legal);
800 // DEBUGTRAP can be lowered to PTX brkpt
801 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
802
803 // Support varargs.
804 setOperationAction(ISD::VASTART, MVT::Other, Custom);
805 setOperationAction(ISD::VAARG, MVT::Other, Custom);
806 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
807 setOperationAction(ISD::VAEND, MVT::Other, Expand);
808
810 {MVT::i16, MVT::i32, MVT::i64}, Legal);
811
813 Promote);
816
817 setI16x2OperationAction(ISD::ABS, MVT::v2i16, Legal, Custom);
818 setI16x2OperationAction(ISD::SMIN, MVT::v2i16, Legal, Custom);
819 setI16x2OperationAction(ISD::SMAX, MVT::v2i16, Legal, Custom);
820 setI16x2OperationAction(ISD::UMIN, MVT::v2i16, Legal, Custom);
821 setI16x2OperationAction(ISD::UMAX, MVT::v2i16, Legal, Custom);
822 setI16x2OperationAction(ISD::CTPOP, MVT::v2i16, Legal, Expand);
823 setI16x2OperationAction(ISD::CTLZ, MVT::v2i16, Legal, Expand);
824
825 setI16x2OperationAction(ISD::ADD, MVT::v2i16, Legal, Custom);
826 setI16x2OperationAction(ISD::SUB, MVT::v2i16, Legal, Custom);
827 setI16x2OperationAction(ISD::MUL, MVT::v2i16, Legal, Custom);
828 setI16x2OperationAction(ISD::SHL, MVT::v2i16, Legal, Custom);
829 setI16x2OperationAction(ISD::SREM, MVT::v2i16, Legal, Custom);
830 setI16x2OperationAction(ISD::UREM, MVT::v2i16, Legal, Custom);
831
832 // Other arithmetic and logic ops are unsupported.
836 {MVT::v2i16, MVT::v2i32}, Expand);
837
838 // v2i32 is not supported for any arithmetic operations
843 MVT::v2i32, Expand);
844
849 if (STI.getPTXVersion() >= 43) {
854 }
855
857 setOperationAction(ISD::CTTZ, {MVT::v2i16, MVT::v2i32}, Expand);
860
861 // PTX does not directly support SELP of i1, so promote to i32 first
863
864 // PTX cannot multiply two i64s in a single instruction.
867
868 // We have some custom DAG combine patterns for these nodes
871 ISD::FADD, ISD::FMAXNUM, ISD::FMINNUM,
872 ISD::FMAXIMUM, ISD::FMINIMUM, ISD::FMAXIMUMNUM,
873 ISD::FMINIMUMNUM, ISD::MUL, ISD::SHL,
875 ISD::BUILD_VECTOR, ISD::ADDRSPACECAST, ISD::LOAD,
876 ISD::STORE, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND});
877
878 // setcc for f16x2 and bf16x2 needs special handling to prevent
879 // legalizer's attempt to scalarize it due to v2i1 not being legal.
880 if (STI.allowFP16Math() || STI.hasBF16Math())
882
883 // Vector reduction operations. These may be turned into shuffle or tree
884 // reductions depending on what instructions are available for each type.
886 MVT EltVT = VT.getVectorElementType();
887 if (EltVT == MVT::f32 || EltVT == MVT::f64) {
888 setOperationAction({ISD::VECREDUCE_FMAX, ISD::VECREDUCE_FMIN,
889 ISD::VECREDUCE_FMAXIMUM, ISD::VECREDUCE_FMINIMUM},
890 VT, Custom);
891 }
892 }
893
894 // Promote fp16 arithmetic if fp16 hardware isn't available or the
895 // user passed --nvptx-no-fp16-math. The flag is useful because,
896 // although sm_53+ GPUs have some sort of FP16 support in
897 // hardware, only sm_53 and sm_60 have full implementation. Others
898 // only have token amount of hardware and are likely to run faster
899 // by using fp32 units instead.
900 for (const auto &Op : {ISD::FADD, ISD::FMUL, ISD::FSUB, ISD::FMA}) {
901 setFP16OperationAction(Op, MVT::f16, Legal, Promote);
902 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
903 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
904 // bf16 must be promoted to f32.
905 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
906 if (getOperationAction(Op, MVT::bf16) == Promote)
907 AddPromotedToType(Op, MVT::bf16, MVT::f32);
908 setOperationAction(Op, MVT::v2f32,
909 STI.hasF32x2Instructions() ? Legal : Expand);
910 }
911
912 // On SM80, we select add/mul/sub as fma to avoid promotion to float
913 for (const auto &Op : {ISD::FADD, ISD::FMUL, ISD::FSUB}) {
914 for (const auto &VT : {MVT::bf16, MVT::v2bf16}) {
915 if (!STI.hasNativeBF16Support(Op) && STI.hasNativeBF16Support(ISD::FMA)) {
917 }
918 }
919 }
920
921 // f16/f16x2 neg was introduced in PTX 60, SM_53.
922 const bool IsFP16FP16x2NegAvailable = STI.getSmVersion() >= 53 &&
923 STI.getPTXVersion() >= 60 &&
924 STI.allowFP16Math();
925 for (const auto &VT : {MVT::f16, MVT::v2f16})
926 setOperationAction(ISD::FNEG, VT,
927 IsFP16FP16x2NegAvailable ? Legal : Expand);
928
929 setBF16OperationAction(ISD::FNEG, MVT::bf16, Legal, Expand);
930 setBF16OperationAction(ISD::FNEG, MVT::v2bf16, Legal, Expand);
931 setOperationAction(ISD::FNEG, MVT::v2f32, Expand);
932 // (would be) Library functions.
933
934 // These map to conversion instructions for scalar FP types.
935 for (const auto &Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FRINT,
936 ISD::FROUNDEVEN, ISD::FTRUNC}) {
937 setOperationAction(Op, MVT::f16, Legal);
938 setOperationAction(Op, MVT::f32, Legal);
939 setOperationAction(Op, MVT::f64, Legal);
940 setOperationAction(Op, MVT::v2f16, Expand);
941 setOperationAction(Op, MVT::v2bf16, Expand);
942 setOperationAction(Op, MVT::v2f32, Expand);
943 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
944 if (getOperationAction(Op, MVT::bf16) == Promote)
945 AddPromotedToType(Op, MVT::bf16, MVT::f32);
946 }
947
948 if (STI.getSmVersion() < 80 || STI.getPTXVersion() < 71) {
949 setOperationAction(ISD::BF16_TO_FP, MVT::f32, Expand);
950 }
951 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
952 for (MVT VT : {MVT::bf16, MVT::f32, MVT::f64}) {
953 setOperationAction(ISD::FP_EXTEND, VT, Custom);
955 }
956 }
957
958 // Expand v2f32 = fp_extend
959 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
960 // Expand v2[b]f16 = fp_round v2f32
961 setOperationAction(ISD::FP_ROUND, {MVT::v2bf16, MVT::v2f16}, Expand);
962
963 // sm_80 only has conversions between f32 and bf16. Custom lower all other
964 // bf16 conversions.
965 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
966 for (MVT VT : {MVT::i1, MVT::i16, MVT::i32, MVT::i64}) {
969 VT, Custom);
970 }
973 MVT::bf16, Custom);
974 }
975
976 setOperationAction(ISD::FROUND, MVT::f16, Promote);
977 setOperationAction(ISD::FROUND, MVT::v2f16, Expand);
978 setOperationAction(ISD::FROUND, MVT::v2bf16, Expand);
979 setOperationAction(ISD::FROUND, MVT::f32, Custom);
980 setOperationAction(ISD::FROUND, MVT::f64, Custom);
981 setOperationAction(ISD::FROUND, MVT::bf16, Promote);
982 AddPromotedToType(ISD::FROUND, MVT::bf16, MVT::f32);
983
984 // 'Expand' implements FCOPYSIGN without calling an external library.
991
992 // These map to corresponding instructions for f32/f64. f16 must be
993 // promoted to f32. v2f16 is expanded to f16, which is then promoted
994 // to f32.
995 for (const auto &Op :
996 {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FTANH}) {
997 setOperationAction(Op, MVT::f16, Promote);
998 setOperationAction(Op, MVT::f32, Legal);
999 // only div/rem/sqrt are legal for f64
1000 if (Op == ISD::FDIV || Op == ISD::FREM || Op == ISD::FSQRT) {
1001 setOperationAction(Op, MVT::f64, Legal);
1002 }
1003 setOperationAction(Op, {MVT::v2f16, MVT::v2bf16, MVT::v2f32}, Expand);
1004 setOperationAction(Op, MVT::bf16, Promote);
1005 AddPromotedToType(Op, MVT::bf16, MVT::f32);
1006 }
1007 setOperationAction(ISD::FREM, {MVT::f32, MVT::f64}, Custom);
1008
1009 setOperationAction(ISD::FABS, {MVT::f32, MVT::f64}, Legal);
1010 setOperationAction(ISD::FABS, MVT::v2f32, Expand);
1011 if (STI.getPTXVersion() >= 65) {
1012 setFP16OperationAction(ISD::FABS, MVT::f16, Legal, Promote);
1013 setFP16OperationAction(ISD::FABS, MVT::v2f16, Legal, Expand);
1014 } else {
1015 setOperationAction(ISD::FABS, MVT::f16, Promote);
1016 setOperationAction(ISD::FABS, MVT::v2f16, Expand);
1017 }
1018 setBF16OperationAction(ISD::FABS, MVT::v2bf16, Legal, Expand);
1019 setBF16OperationAction(ISD::FABS, MVT::bf16, Legal, Promote);
1020 if (getOperationAction(ISD::FABS, MVT::bf16) == Promote)
1021 AddPromotedToType(ISD::FABS, MVT::bf16, MVT::f32);
1022
1023 for (const auto &Op :
1024 {ISD::FMINNUM, ISD::FMAXNUM, ISD::FMINIMUMNUM, ISD::FMAXIMUMNUM}) {
1025 setOperationAction(Op, MVT::f32, Legal);
1026 setOperationAction(Op, MVT::f64, Legal);
1027 setFP16OperationAction(Op, MVT::f16, Legal, Promote);
1028 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
1029 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
1030 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
1031 if (getOperationAction(Op, MVT::bf16) == Promote)
1032 AddPromotedToType(Op, MVT::bf16, MVT::f32);
1033 setOperationAction(Op, MVT::v2f32, Expand);
1034 }
1035 bool SupportsF32MinMaxNaN =
1036 STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70;
1037 for (const auto &Op : {ISD::FMINIMUM, ISD::FMAXIMUM}) {
1038 setOperationAction(Op, MVT::f32, SupportsF32MinMaxNaN ? Legal : Expand);
1039 setFP16OperationAction(Op, MVT::f16, Legal, Expand);
1040 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
1041 setBF16OperationAction(Op, MVT::bf16, Legal, Expand);
1042 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
1043 setOperationAction(Op, MVT::v2f32, Expand);
1044 }
1045
1046 // Custom lowering for inline asm with 128-bit operands
1049
1050 // FEXP2 support:
1051 // - f32
1052 // - f16/f16x2 (sm_70+, PTX 7.0+)
1053 // - bf16/bf16x2 (sm_90+, PTX 7.8+)
1054 // When f16/bf16 types aren't supported, they are promoted/expanded to f32.
1055 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
1056 setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
1057 setFP16OperationAction(ISD::FEXP2, MVT::f16, Legal, Promote);
1058 setFP16OperationAction(ISD::FEXP2, MVT::v2f16, Legal, Expand);
1059 setBF16OperationAction(ISD::FEXP2, MVT::bf16, Legal, Promote);
1060 setBF16OperationAction(ISD::FEXP2, MVT::v2bf16, Legal, Expand);
1061
1062 // FLOG2 supports f32 only
1063 // f16/bf16 types aren't supported, but they are promoted/expanded to f32.
1064 if (UseApproxLog2F32) {
1065 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
1066 setOperationPromotedToType(ISD::FLOG2, MVT::f16, MVT::f32);
1067 setOperationPromotedToType(ISD::FLOG2, MVT::bf16, MVT::f32);
1068 setOperationAction(ISD::FLOG2, {MVT::v2f16, MVT::v2bf16, MVT::v2f32},
1069 Expand);
1070 }
1071
1072 setOperationAction(ISD::ADDRSPACECAST, {MVT::i32, MVT::i64}, Custom);
1073
1074 setOperationAction(ISD::ATOMIC_LOAD_SUB, {MVT::i32, MVT::i64}, Expand);
1075
1076 // atom.b128 is legal in PTX but since we don't represent i128 as a legal
1077 // type, we need to custom lower it.
1078 setOperationAction({ISD::ATOMIC_CMP_SWAP, ISD::ATOMIC_SWAP}, MVT::i128,
1079 Custom);
1080
1081 // Now deduce the information based on the above mentioned
1082 // actions
1083 computeRegisterProperties(STI.getRegisterInfo());
1084
1085 // PTX support for 16-bit CAS is emulated. Only use 32+
1086 setMinCmpXchgSizeInBits(STI.getMinCmpXchgSizeInBits());
1087 setMaxAtomicSizeInBitsSupported(STI.hasAtomSwap128() ? 128 : 64);
1089
1090 // Custom lowering for tcgen05.ld vector operands
1092 {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32,
1093 MVT::v32i32, MVT::v64i32, MVT::v128i32},
1094 Custom);
1095
1096 // Custom lowering for tcgen05.st vector operands
1098 {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32,
1099 MVT::v32i32, MVT::v64i32, MVT::v128i32, MVT::Other},
1100 Custom);
1101
1102 // Enable custom lowering for the following:
1103 // * MVT::i128 - clusterlaunchcontrol
1104 // * MVT::i32 - prmt
1105 // * MVT::v4f32 - cvt_rs fp{4/6/8}x4 intrinsics
1106 // * MVT::Other - internal.addrspace.wrap
1108 {MVT::i32, MVT::i128, MVT::v4f32, MVT::Other}, Custom);
1109}
1110
1113 if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
1114 VT.getScalarType() == MVT::i1)
1115 return TypeSplitVector;
1117}
1118
1120 int Enabled, int &ExtraSteps,
1121 bool &UseOneConst,
1122 bool Reciprocal) const {
1125 return SDValue();
1126
1127 if (ExtraSteps == ReciprocalEstimate::Unspecified)
1128 ExtraSteps = 0;
1129
1130 SDLoc DL(Operand);
1131 EVT VT = Operand.getValueType();
1132 bool Ftz = useF32FTZ(DAG.getMachineFunction());
1133
1134 auto MakeIntrinsicCall = [&](Intrinsic::ID IID) {
1135 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
1136 DAG.getConstant(IID, DL, MVT::i32), Operand);
1137 };
1138
1139 // The sqrt and rsqrt refinement processes assume we always start out with an
1140 // approximation of the rsqrt. Therefore, if we're going to do any refinement
1141 // (i.e. ExtraSteps > 0), we must return an rsqrt. But if we're *not* doing
1142 // any refinement, we must return a regular sqrt.
1143 if (Reciprocal || ExtraSteps > 0) {
1144 if (VT == MVT::f32)
1145 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f
1146 : Intrinsic::nvvm_rsqrt_approx_f);
1147 else if (VT == MVT::f64)
1148 return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);
1149 else
1150 return SDValue();
1151 } else {
1152 if (VT == MVT::f32)
1153 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f
1154 : Intrinsic::nvvm_sqrt_approx_f);
1155 else {
1156 // There's no sqrt.approx.f64 instruction, so we emit
1157 // reciprocal(rsqrt(x)). This is faster than
1158 // select(x == 0, 0, x * rsqrt(x)). (In fact, it's faster than plain
1159 // x * rsqrt(x).)
1160 return DAG.getNode(
1162 DAG.getConstant(Intrinsic::nvvm_rcp_approx_ftz_d, DL, MVT::i32),
1163 MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));
1164 }
1165 }
1166}
1167
1169 const DataLayout &DL, Type *RetTy, const ArgListTy &Args,
1171 std::optional<unsigned> FirstVAArg, const CallBase &CB,
1172 unsigned UniqueCallSite) const {
1173 auto PtrVT = getPointerTy(DL);
1174
1175 std::string Prototype;
1176 raw_string_ostream O(Prototype);
1177 O << "prototype_" << UniqueCallSite << " : .callprototype ";
1178
1179 if (RetTy->isVoidTy()) {
1180 O << "()";
1181 } else {
1182 O << "(";
1183 if (shouldPassAsArray(RetTy)) {
1184 const Align RetAlign = getArgumentAlignment(&CB, RetTy, 0, DL);
1185 O << ".param .align " << RetAlign.value() << " .b8 _["
1186 << DL.getTypeAllocSize(RetTy) << "]";
1187 } else if (RetTy->isFloatingPointTy() || RetTy->isIntegerTy()) {
1188 unsigned size = 0;
1189 if (auto *ITy = dyn_cast<IntegerType>(RetTy)) {
1190 size = ITy->getBitWidth();
1191 } else {
1192 assert(RetTy->isFloatingPointTy() &&
1193 "Floating point type expected here");
1194 size = RetTy->getPrimitiveSizeInBits();
1195 }
1196 // PTX ABI requires all scalar return values to be at least 32
1197 // bits in size. fp16 normally uses .b16 as its storage type in
1198 // PTX, so its size must be adjusted here, too.
1200
1201 O << ".param .b" << size << " _";
1202 } else if (isa<PointerType>(RetTy)) {
1203 O << ".param .b" << PtrVT.getSizeInBits() << " _";
1204 } else {
1205 llvm_unreachable("Unknown return type");
1206 }
1207 O << ") ";
1208 }
1209 O << "_ (";
1210
1211 bool first = true;
1212
1213 const unsigned NumArgs = FirstVAArg.value_or(Args.size());
1214 auto AllOuts = ArrayRef(Outs);
1215 for (const unsigned I : llvm::seq(NumArgs)) {
1216 const auto ArgOuts =
1217 AllOuts.take_while([I](auto O) { return O.OrigArgIndex == I; });
1218 AllOuts = AllOuts.drop_front(ArgOuts.size());
1219
1220 Type *Ty = Args[I].Ty;
1221 if (!first) {
1222 O << ", ";
1223 }
1224 first = false;
1225
1226 if (ArgOuts[0].Flags.isByVal()) {
1227 // Indirect calls need strict ABI alignment so we disable optimizations by
1228 // not providing a function to optimize.
1229 Type *ETy = Args[I].IndirectType;
1230 Align InitialAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1231 Align ParamByValAlign =
1232 getFunctionByValParamAlign(/*F=*/nullptr, ETy, InitialAlign, DL);
1233
1234 O << ".param .align " << ParamByValAlign.value() << " .b8 _["
1235 << ArgOuts[0].Flags.getByValSize() << "]";
1236 } else {
1237 if (shouldPassAsArray(Ty)) {
1238 Align ParamAlign =
1239 getArgumentAlignment(&CB, Ty, I + AttributeList::FirstArgIndex, DL);
1240 O << ".param .align " << ParamAlign.value() << " .b8 _["
1241 << DL.getTypeAllocSize(Ty) << "]";
1242 continue;
1243 }
1244 // i8 types in IR will be i16 types in SDAG
1245 assert((getValueType(DL, Ty) == ArgOuts[0].VT ||
1246 (getValueType(DL, Ty) == MVT::i8 && ArgOuts[0].VT == MVT::i16)) &&
1247 "type mismatch between callee prototype and arguments");
1248 // scalar type
1249 unsigned sz = 0;
1250 if (auto *ITy = dyn_cast<IntegerType>(Ty)) {
1251 sz = promoteScalarArgumentSize(ITy->getBitWidth());
1252 } else if (isa<PointerType>(Ty)) {
1253 sz = PtrVT.getSizeInBits();
1254 } else {
1255 sz = Ty->getPrimitiveSizeInBits();
1256 }
1257 O << ".param .b" << sz << " _";
1258 }
1259 }
1260
1261 if (FirstVAArg)
1262 O << (first ? "" : ",") << " .param .align "
1263 << STI.getMaxRequiredAlignment() << " .b8 _[]";
1264 O << ")";
1265 if (shouldEmitPTXNoReturn(&CB, *nvTM))
1266 O << " .noreturn";
1267 O << ";";
1268
1269 return Prototype;
1270}
1271
1273 const Function *F, Type *Ty, unsigned Idx, const DataLayout &DL) const {
1274 return getAlign(*F, Idx).value_or(getFunctionParamOptimizedAlign(F, Ty, DL));
1275}
1276
1277Align NVPTXTargetLowering::getArgumentAlignment(const CallBase *CB, Type *Ty,
1278 unsigned Idx,
1279 const DataLayout &DL) const {
1280 if (!CB) {
1281 // CallSite is zero, fallback to ABI type alignment
1282 return DL.getABITypeAlign(Ty);
1283 }
1284
1285 const Function *DirectCallee = CB->getCalledFunction();
1286
1287 if (!DirectCallee) {
1288 // We don't have a direct function symbol, but that may be because of
1289 // constant cast instructions in the call.
1290
1291 // With bitcast'd call targets, the instruction will be the call
1292 if (const auto *CI = dyn_cast<CallInst>(CB)) {
1293 // Check if we have call alignment metadata
1294 if (MaybeAlign StackAlign = getAlign(*CI, Idx))
1295 return StackAlign.value();
1296 }
1297 DirectCallee = getMaybeBitcastedCallee(CB);
1298 }
1299
1300 // Check for function alignment information if we found that the
1301 // ultimate target is a Function
1302 if (DirectCallee)
1303 return getFunctionArgumentAlignment(DirectCallee, Ty, Idx, DL);
1304
1305 // Call is indirect, fall back to the ABI type alignment
1306 return DL.getABITypeAlign(Ty);
1307}
1308
1310 const GlobalAddressSDNode *Func) {
1311 if (!Func)
1312 return false;
1313 if (auto *CalleeFunc = dyn_cast<Function>(Func->getGlobal()))
1314 return CB->getFunctionType() != CalleeFunc->getFunctionType();
1315 return false;
1316}
1317
1319 const DataLayout &DL,
1320 const TargetLowering &TL) {
1321 if (Ptr->getOpcode() == ISD::FrameIndex) {
1322 auto Ty = TL.getPointerTy(DL, ADDRESS_SPACE_LOCAL);
1323 Ptr = DAG.getAddrSpaceCast(SDLoc(), Ty, Ptr, ADDRESS_SPACE_GENERIC,
1325
1327 }
1328
1329 // Peel of an addrspacecast to generic and load directly from the specific
1330 // address space.
1331 if (Ptr->getOpcode() == ISD::ADDRSPACECAST) {
1332 const auto *ASC = cast<AddrSpaceCastSDNode>(Ptr);
1333 if (ASC->getDestAddressSpace() == ADDRESS_SPACE_GENERIC) {
1334 Ptr = ASC->getOperand(0);
1335 return MachinePointerInfo(ASC->getSrcAddressSpace());
1336 }
1337 }
1338
1339 return MachinePointerInfo();
1340}
1341
1343 if (Flags.isSExt())
1344 return ISD::SIGN_EXTEND;
1345 if (Flags.isZExt())
1346 return ISD::ZERO_EXTEND;
1347 return ISD::ANY_EXTEND;
1348}
1349
1351 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
1352 SDLoc dl) {
1353 const EVT ActualVT = V.getValueType();
1354 assert((ActualVT == ExpectedVT ||
1355 (ExpectedVT.isInteger() && ActualVT.isInteger())) &&
1356 "Non-integer argument type size mismatch");
1357 if (ExpectedVT.bitsGT(ActualVT))
1358 return DAG.getNode(getExtOpcode(Flags), dl, ExpectedVT, V);
1359 if (ExpectedVT.bitsLT(ActualVT))
1360 return DAG.getNode(ISD::TRUNCATE, dl, ExpectedVT, V);
1361
1362 return V;
1363}
1364
1366 SmallVectorImpl<SDValue> &InVals) const {
1367
1368 if (CLI.IsVarArg && (STI.getPTXVersion() < 60 || STI.getSmVersion() < 30))
1370 "Support for variadic functions (unsized array parameter) introduced "
1371 "in PTX ISA version 6.0 and requires target sm_30.");
1372
1373 SelectionDAG &DAG = CLI.DAG;
1374 SDLoc dl = CLI.DL;
1375 const SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1376 SDValue Callee = CLI.Callee;
1377 ArgListTy &Args = CLI.getArgs();
1378 Type *RetTy = CLI.RetTy;
1379 const CallBase *CB = CLI.CB;
1380 const DataLayout &DL = DAG.getDataLayout();
1381 LLVMContext &Ctx = *DAG.getContext();
1382
1383 const auto GetI32 = [&](const unsigned I) {
1384 return DAG.getConstant(I, dl, MVT::i32);
1385 };
1386
1387 const unsigned UniqueCallSite = GlobalUniqueCallSite++;
1388 const SDValue CallChain = CLI.Chain;
1389 const SDValue StartChain =
1390 DAG.getCALLSEQ_START(CallChain, UniqueCallSite, 0, dl);
1391 SDValue DeclareGlue = StartChain.getValue(1);
1392
1393 SmallVector<SDValue, 16> CallPrereqs{StartChain};
1394
1395 const auto MakeDeclareScalarParam = [&](SDValue Symbol, unsigned Size) {
1396 // PTX ABI requires integral types to be at least 32 bits in size. FP16 is
1397 // loaded/stored using i16, so it's handled here as well.
1398 const unsigned SizeBits = promoteScalarArgumentSize(Size * 8);
1399 SDValue Declare =
1400 DAG.getNode(NVPTXISD::DeclareScalarParam, dl, {MVT::Other, MVT::Glue},
1401 {StartChain, Symbol, GetI32(SizeBits), DeclareGlue});
1402 CallPrereqs.push_back(Declare);
1403 DeclareGlue = Declare.getValue(1);
1404 return Declare;
1405 };
1406
1407 const auto MakeDeclareArrayParam = [&](SDValue Symbol, Align Align,
1408 unsigned Size) {
1409 SDValue Declare = DAG.getNode(
1410 NVPTXISD::DeclareArrayParam, dl, {MVT::Other, MVT::Glue},
1411 {StartChain, Symbol, GetI32(Align.value()), GetI32(Size), DeclareGlue});
1412 CallPrereqs.push_back(Declare);
1413 DeclareGlue = Declare.getValue(1);
1414 return Declare;
1415 };
1416
1417 // Variadic arguments.
1418 //
1419 // Normally, for each argument, we declare a param scalar or a param
1420 // byte array in the .param space, and store the argument value to that
1421 // param scalar or array starting at offset 0.
1422 //
1423 // In the case of the first variadic argument, we declare a vararg byte array
1424 // with size 0. The exact size of this array isn't known at this point, so
1425 // it'll be patched later. All the variadic arguments will be stored to this
1426 // array at a certain offset (which gets tracked by 'VAOffset'). The offset is
1427 // initially set to 0, so it can be used for non-variadic arguments (which use
1428 // 0 offset) to simplify the code.
1429 //
1430 // After all vararg is processed, 'VAOffset' holds the size of the
1431 // vararg byte array.
1432 assert((CLI.IsVarArg || CLI.Args.size() == CLI.NumFixedArgs) &&
1433 "Non-VarArg function with extra arguments");
1434
1435 const unsigned FirstVAArg = CLI.NumFixedArgs; // position of first variadic
1436 unsigned VAOffset = 0; // current offset in the param array
1437
1438 const SDValue VADeclareParam =
1439 CLI.Args.size() > FirstVAArg
1440 ? MakeDeclareArrayParam(getCallParamSymbol(DAG, FirstVAArg, MVT::i32),
1441 Align(STI.getMaxRequiredAlignment()), 0)
1442 : SDValue();
1443
1444 // Args.size() and Outs.size() need not match.
1445 // Outs.size() will be larger
1446 // * if there is an aggregate argument with multiple fields (each field
1447 // showing up separately in Outs)
1448 // * if there is a vector argument with more than typical vector-length
1449 // elements (generally if more than 4) where each vector element is
1450 // individually present in Outs.
1451 // So a different index should be used for indexing into Outs/OutVals.
1452 // See similar issue in LowerFormalArguments.
1453 auto AllOuts = ArrayRef(CLI.Outs);
1454 auto AllOutVals = ArrayRef(CLI.OutVals);
1455 assert(AllOuts.size() == AllOutVals.size() &&
1456 "Outs and OutVals must be the same size");
1457 // Declare the .params or .reg need to pass values
1458 // to the function
1459 for (const auto E : llvm::enumerate(Args)) {
1460 const auto ArgI = E.index();
1461 const auto Arg = E.value();
1462 const auto ArgOuts =
1463 AllOuts.take_while([&](auto O) { return O.OrigArgIndex == ArgI; });
1464 const auto ArgOutVals = AllOutVals.take_front(ArgOuts.size());
1465 AllOuts = AllOuts.drop_front(ArgOuts.size());
1466 AllOutVals = AllOutVals.drop_front(ArgOuts.size());
1467
1468 const bool IsVAArg = (ArgI >= FirstVAArg);
1469 const bool IsByVal = Arg.IsByVal;
1470
1471 const SDValue ParamSymbol =
1472 getCallParamSymbol(DAG, IsVAArg ? FirstVAArg : ArgI, MVT::i32);
1473
1474 assert((!IsByVal || Arg.IndirectType) &&
1475 "byval arg must have indirect type");
1476 Type *ETy = (IsByVal ? Arg.IndirectType : Arg.Ty);
1477
1478 const Align ArgAlign = [&]() {
1479 if (IsByVal) {
1480 // The ByValAlign in the Outs[OIdx].Flags is always set at this point,
1481 // so we don't need to worry whether it's naturally aligned or not.
1482 // See TargetLowering::LowerCallTo().
1483 const Align InitialAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1485 InitialAlign, DL);
1486 }
1487 return getArgumentAlignment(CB, Arg.Ty, ArgI + 1, DL);
1488 }();
1489
1490 const unsigned TySize = DL.getTypeAllocSize(ETy);
1491 assert((!IsByVal || TySize == ArgOuts[0].Flags.getByValSize()) &&
1492 "type size mismatch");
1493
1494 const SDValue ArgDeclare = [&]() {
1495 if (IsVAArg)
1496 return VADeclareParam;
1497
1498 if (IsByVal || shouldPassAsArray(Arg.Ty))
1499 return MakeDeclareArrayParam(ParamSymbol, ArgAlign, TySize);
1500
1501 assert(ArgOuts.size() == 1 && "We must pass only one value as non-array");
1502 assert((ArgOuts[0].VT.isInteger() || ArgOuts[0].VT.isFloatingPoint()) &&
1503 "Only int and float types are supported as non-array arguments");
1504
1505 return MakeDeclareScalarParam(ParamSymbol, TySize);
1506 }();
1507
1508 if (IsByVal) {
1509 assert(ArgOutVals.size() == 1 && "We must pass only one value as byval");
1510 SDValue SrcPtr = ArgOutVals[0];
1511 const auto PointerInfo = refinePtrAS(SrcPtr, DAG, DL, *this);
1512 const Align BaseSrcAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1513
1514 if (IsVAArg)
1515 VAOffset = alignTo(VAOffset, ArgAlign);
1516
1517 SmallVector<EVT, 4> ValueVTs, MemVTs;
1519 ComputeValueVTs(*this, DL, ETy, ValueVTs, &MemVTs, &Offsets);
1520
1521 unsigned J = 0;
1522 const auto VI = VectorizePTXValueVTs(MemVTs, Offsets, ArgAlign, IsVAArg);
1523 for (const unsigned NumElts : VI) {
1524 EVT LoadVT = getVectorizedVT(MemVTs[J], NumElts, Ctx);
1525 Align SrcAlign = commonAlignment(BaseSrcAlign, Offsets[J]);
1526 SDValue SrcAddr = DAG.getObjectPtrOffset(dl, SrcPtr, Offsets[J]);
1527 SDValue SrcLoad =
1528 DAG.getLoad(LoadVT, dl, CallChain, SrcAddr, PointerInfo, SrcAlign);
1529
1530 TypeSize ParamOffset = Offsets[J].getWithIncrement(VAOffset);
1531 Align ParamAlign = commonAlignment(ArgAlign, ParamOffset);
1532 SDValue ParamAddr =
1533 DAG.getObjectPtrOffset(dl, ParamSymbol, ParamOffset);
1534 SDValue StoreParam =
1535 DAG.getStore(ArgDeclare, dl, SrcLoad, ParamAddr,
1537 CallPrereqs.push_back(StoreParam);
1538
1539 J += NumElts;
1540 }
1541 if (IsVAArg)
1542 VAOffset += TySize;
1543 } else {
1546 ComputePTXValueVTs(*this, DL, Ctx, CLI.CallConv, Arg.Ty, VTs, Offsets,
1547 VAOffset);
1548 assert(VTs.size() == Offsets.size() && "Size mismatch");
1549 assert(VTs.size() == ArgOuts.size() && "Size mismatch");
1550
1551 // PTX Interoperability Guide 3.3(A): [Integer] Values shorter
1552 // than 32-bits are sign extended or zero extended, depending on
1553 // whether they are signed or unsigned types. This case applies
1554 // only to scalar parameters and not to aggregate values.
1555 const bool ExtendIntegerParam =
1556 Arg.Ty->isIntegerTy() && DL.getTypeAllocSizeInBits(Arg.Ty) < 32;
1557
1558 const auto GetStoredValue = [&](const unsigned I) {
1559 SDValue StVal = ArgOutVals[I];
1561 StVal.getValueType() &&
1562 "OutVal type should always be legal");
1563
1564 const EVT VTI = promoteScalarIntegerPTX(VTs[I]);
1565 const EVT StoreVT =
1566 ExtendIntegerParam ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
1567
1568 return correctParamType(StVal, StoreVT, ArgOuts[I].Flags, DAG, dl);
1569 };
1570
1571 unsigned J = 0;
1572 const auto VI = VectorizePTXValueVTs(VTs, Offsets, ArgAlign, IsVAArg);
1573 for (const unsigned NumElts : VI) {
1574 const EVT EltVT = promoteScalarIntegerPTX(VTs[J]);
1575
1576 unsigned Offset;
1577 if (IsVAArg) {
1578 // TODO: We may need to support vector types that can be passed
1579 // as scalars in variadic arguments.
1580 assert(NumElts == 1 &&
1581 "Vectorization should be disabled for vaargs.");
1582
1583 // Align each part of the variadic argument to their type.
1584 VAOffset = alignTo(VAOffset, DAG.getEVTAlign(EltVT));
1585 Offset = VAOffset;
1586
1587 const EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : EltVT;
1588 VAOffset += DL.getTypeAllocSize(TheStoreType.getTypeForEVT(Ctx));
1589 } else {
1590 assert(VAOffset == 0 && "VAOffset must be 0 for non-VA args");
1591 Offset = Offsets[J];
1592 }
1593
1594 SDValue Ptr =
1595 DAG.getObjectPtrOffset(dl, ParamSymbol, TypeSize::getFixed(Offset));
1596
1597 const MaybeAlign CurrentAlign = ExtendIntegerParam
1598 ? MaybeAlign(std::nullopt)
1599 : commonAlignment(ArgAlign, Offset);
1600
1601 SDValue Val =
1602 getBuildVectorizedValue(NumElts, dl, DAG, [&](unsigned K) {
1603 return GetStoredValue(J + K);
1604 });
1605
1606 SDValue StoreParam =
1607 DAG.getStore(ArgDeclare, dl, Val, Ptr,
1609 CallPrereqs.push_back(StoreParam);
1610
1611 J += NumElts;
1612 }
1613 }
1614 }
1615
1616 // Handle Result
1617 if (!Ins.empty()) {
1618 const SDValue RetSymbol = DAG.getExternalSymbol("retval0", MVT::i32);
1619 const unsigned ResultSize = DL.getTypeAllocSize(RetTy);
1620 if (shouldPassAsArray(RetTy)) {
1621 const Align RetAlign = getArgumentAlignment(CB, RetTy, 0, DL);
1622 MakeDeclareArrayParam(RetSymbol, RetAlign, ResultSize);
1623 } else {
1624 MakeDeclareScalarParam(RetSymbol, ResultSize);
1625 }
1626 }
1627
1628 // Set the size of the vararg param byte array if the callee is a variadic
1629 // function and the variadic part is not empty.
1630 if (VADeclareParam) {
1631 SDValue DeclareParamOps[] = {VADeclareParam.getOperand(0),
1632 VADeclareParam.getOperand(1),
1633 VADeclareParam.getOperand(2), GetI32(VAOffset),
1634 VADeclareParam.getOperand(4)};
1635 DAG.MorphNodeTo(VADeclareParam.getNode(), VADeclareParam.getOpcode(),
1636 VADeclareParam->getVTList(), DeclareParamOps);
1637 }
1638
1639 const auto *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
1640 // If the type of the callsite does not match that of the function, convert
1641 // the callsite to an indirect call.
1642 const bool ConvertToIndirectCall = shouldConvertToIndirectCall(CB, Func);
1643
1644 // Both indirect calls and libcalls have nullptr Func. In order to distinguish
1645 // between them we must rely on the call site value which is valid for
1646 // indirect calls but is always null for libcalls.
1647 const bool IsIndirectCall = (!Func && CB) || ConvertToIndirectCall;
1648
1649 if (isa<ExternalSymbolSDNode>(Callee)) {
1650 Function* CalleeFunc = nullptr;
1651
1652 // Try to find the callee in the current module.
1653 Callee = DAG.getSymbolFunctionGlobalAddress(Callee, &CalleeFunc);
1654 assert(CalleeFunc != nullptr && "Libcall callee must be set.");
1655
1656 // Set the "libcall callee" attribute to indicate that the function
1657 // must always have a declaration.
1658 CalleeFunc->addFnAttr("nvptx-libcall-callee", "true");
1659 }
1660
1661 if (IsIndirectCall) {
1662 // This is indirect function call case : PTX requires a prototype of the
1663 // form
1664 // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
1665 // to be emitted, and the label has to used as the last arg of call
1666 // instruction.
1667 // The prototype is embedded in a string and put as the operand for a
1668 // CallPrototype SDNode which will print out to the value of the string.
1669 const bool HasVAArgs = CLI.IsVarArg && (CLI.Args.size() > CLI.NumFixedArgs);
1670 std::string Proto =
1671 getPrototype(DL, RetTy, Args, CLI.Outs,
1672 HasVAArgs ? std::optional(FirstVAArg) : std::nullopt, *CB,
1673 UniqueCallSite);
1674 const char *ProtoStr = nvTM->getStrPool().save(Proto).data();
1675 const SDValue PrototypeDeclare = DAG.getNode(
1676 NVPTXISD::CallPrototype, dl, MVT::Other,
1677 {StartChain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32)});
1678 CallPrereqs.push_back(PrototypeDeclare);
1679 }
1680
1681 const unsigned Proto = IsIndirectCall ? UniqueCallSite : 0;
1682 const unsigned NumArgs =
1683 std::min<unsigned>(CLI.NumFixedArgs + 1, Args.size());
1684 /// CALL(Chain, IsConvergent, IsIndirectCall/IsUniform, NumReturns,
1685 /// NumParams, Callee, Proto)
1686 const SDValue CallToken = DAG.getTokenFactor(dl, CallPrereqs);
1687 const SDValue Call = DAG.getNode(
1688 NVPTXISD::CALL, dl, MVT::Other,
1689 {CallToken, GetI32(CLI.IsConvergent), GetI32(IsIndirectCall),
1690 GetI32(Ins.empty() ? 0 : 1), GetI32(NumArgs), Callee, GetI32(Proto)});
1691
1692 SmallVector<SDValue, 16> LoadChains{Call};
1693 SmallVector<SDValue, 16> ProxyRegOps;
1694 if (!Ins.empty()) {
1697 ComputePTXValueVTs(*this, DL, Ctx, CLI.CallConv, RetTy, VTs, Offsets);
1698 assert(VTs.size() == Ins.size() && "Bad value decomposition");
1699
1700 const Align RetAlign = getArgumentAlignment(CB, RetTy, 0, DL);
1701 const SDValue RetSymbol = DAG.getExternalSymbol("retval0", MVT::i32);
1702
1703 // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
1704 // 32-bits are sign extended or zero extended, depending on whether
1705 // they are signed or unsigned types.
1706 const bool ExtendIntegerRetVal =
1707 RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
1708
1709 unsigned I = 0;
1710 const auto VI = VectorizePTXValueVTs(VTs, Offsets, RetAlign);
1711 for (const unsigned NumElts : VI) {
1712 const MaybeAlign CurrentAlign =
1713 ExtendIntegerRetVal ? MaybeAlign(std::nullopt)
1714 : commonAlignment(RetAlign, Offsets[I]);
1715
1716 const EVT VTI = promoteScalarIntegerPTX(VTs[I]);
1717 const EVT LoadVT =
1718 ExtendIntegerRetVal ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
1719 const EVT VecVT = getVectorizedVT(LoadVT, NumElts, Ctx);
1720 SDValue Ptr =
1721 DAG.getObjectPtrOffset(dl, RetSymbol, TypeSize::getFixed(Offsets[I]));
1722
1723 SDValue R =
1724 DAG.getLoad(VecVT, dl, Call, Ptr,
1726
1727 LoadChains.push_back(R.getValue(1));
1728 for (const unsigned J : llvm::seq(NumElts))
1729 ProxyRegOps.push_back(getExtractVectorizedValue(R, J, LoadVT, dl, DAG));
1730 I += NumElts;
1731 }
1732 }
1733
1734 const SDValue EndToken = DAG.getTokenFactor(dl, LoadChains);
1735 const SDValue CallEnd = DAG.getCALLSEQ_END(EndToken, UniqueCallSite,
1736 UniqueCallSite + 1, SDValue(), dl);
1737
1738 // Append ProxyReg instructions to the chain to make sure that `callseq_end`
1739 // will not get lost. Otherwise, during libcalls expansion, the nodes can become
1740 // dangling.
1741 for (const auto [I, Reg] : llvm::enumerate(ProxyRegOps)) {
1742 SDValue Proxy =
1743 DAG.getNode(NVPTXISD::ProxyReg, dl, Reg.getValueType(), {CallEnd, Reg});
1744 SDValue Ret = correctParamType(Proxy, Ins[I].VT, Ins[I].Flags, DAG, dl);
1745 InVals.push_back(Ret);
1746 }
1747
1748 // set IsTailCall to false for now, until we figure out how to express
1749 // tail call optimization in PTX
1750 CLI.IsTailCall = false;
1751 return CallEnd;
1752}
1753
1755 SelectionDAG &DAG) const {
1756
1757 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1758 const Function &Fn = DAG.getMachineFunction().getFunction();
1759
1761 Fn,
1762 "Support for dynamic alloca introduced in PTX ISA version 7.3 and "
1763 "requires target sm_52.",
1764 SDLoc(Op).getDebugLoc()));
1765 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()),
1766 Op.getOperand(0)};
1767 return DAG.getMergeValues(Ops, SDLoc());
1768 }
1769
1770 SDLoc DL(Op.getNode());
1771 SDValue Chain = Op.getOperand(0);
1772 SDValue Size = Op.getOperand(1);
1773 uint64_t Align = Op.getConstantOperandVal(2);
1774
1775 // The alignment on a ISD::DYNAMIC_STACKALLOC node may be 0 to indicate that
1776 // the default stack alignment should be used.
1777 if (Align == 0)
1779
1780 // The size for ptx alloca instruction is 64-bit for m64 and 32-bit for m32.
1781 const MVT LocalVT = getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_LOCAL);
1782
1783 SDValue Alloc =
1784 DAG.getNode(NVPTXISD::DYNAMIC_STACKALLOC, DL, {LocalVT, MVT::Other},
1785 {Chain, DAG.getZExtOrTrunc(Size, DL, LocalVT),
1786 DAG.getTargetConstant(Align, DL, MVT::i32)});
1787
1788 SDValue ASC = DAG.getAddrSpaceCast(
1790
1791 return DAG.getMergeValues({ASC, SDValue(Alloc.getNode(), 1)}, DL);
1792}
1793
1795 SelectionDAG &DAG) const {
1796 SDLoc DL(Op.getNode());
1797 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1798 const Function &Fn = DAG.getMachineFunction().getFunction();
1799
1801 Fn,
1802 "Support for stackrestore requires PTX ISA version >= 7.3 and target "
1803 ">= sm_52.",
1804 DL.getDebugLoc()));
1805 return Op.getOperand(0);
1806 }
1807
1808 const MVT LocalVT = getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_LOCAL);
1809 SDValue Chain = Op.getOperand(0);
1810 SDValue Ptr = Op.getOperand(1);
1811 SDValue ASC = DAG.getAddrSpaceCast(DL, LocalVT, Ptr, ADDRESS_SPACE_GENERIC,
1813 return DAG.getNode(NVPTXISD::STACKRESTORE, DL, MVT::Other, {Chain, ASC});
1814}
1815
1817 SelectionDAG &DAG) const {
1818 SDLoc DL(Op.getNode());
1819 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1820 const Function &Fn = DAG.getMachineFunction().getFunction();
1821
1823 Fn,
1824 "Support for stacksave requires PTX ISA version >= 7.3 and target >= "
1825 "sm_52.",
1826 DL.getDebugLoc()));
1827 auto Ops = {DAG.getConstant(0, DL, Op.getValueType()), Op.getOperand(0)};
1828 return DAG.getMergeValues(Ops, DL);
1829 }
1830
1831 const MVT LocalVT = getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_LOCAL);
1832 SDValue Chain = Op.getOperand(0);
1833 SDValue SS =
1834 DAG.getNode(NVPTXISD::STACKSAVE, DL, {LocalVT, MVT::Other}, Chain);
1835 SDValue ASC = DAG.getAddrSpaceCast(
1836 DL, Op.getValueType(), SS, ADDRESS_SPACE_LOCAL, ADDRESS_SPACE_GENERIC);
1837 return DAG.getMergeValues({ASC, SDValue(SS.getNode(), 1)}, DL);
1838}
1839
1840// By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
1841// (see LegalizeDAG.cpp). This is slow and uses local memory.
1842// We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
1843SDValue
1844NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
1845 SDNode *Node = Op.getNode();
1846 SDLoc dl(Node);
1848 unsigned NumOperands = Node->getNumOperands();
1849 for (unsigned i = 0; i < NumOperands; ++i) {
1850 SDValue SubOp = Node->getOperand(i);
1851 EVT VVT = SubOp.getNode()->getValueType(0);
1852 EVT EltVT = VVT.getVectorElementType();
1853 unsigned NumSubElem = VVT.getVectorNumElements();
1854 for (unsigned j = 0; j < NumSubElem; ++j) {
1855 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
1856 DAG.getIntPtrConstant(j, dl)));
1857 }
1858 }
1859 return DAG.getBuildVector(Node->getValueType(0), dl, Ops);
1860}
1861
1863 SelectionDAG &DAG,
1864 unsigned Mode = NVPTX::PTXPrmtMode::NONE) {
1865 assert(A.getValueType() == MVT::i32 && B.getValueType() == MVT::i32 &&
1866 Selector.getValueType() == MVT::i32 && "PRMT must have i32 operands");
1867 return DAG.getNode(NVPTXISD::PRMT, DL, MVT::i32,
1868 {A, B, Selector, DAG.getConstant(Mode, DL, MVT::i32)});
1869}
1870
1872 SelectionDAG &DAG,
1873 unsigned Mode = NVPTX::PTXPrmtMode::NONE) {
1874 return getPRMT(A, B, DAG.getConstant(Selector, DL, MVT::i32), DL, DAG, Mode);
1875}
1876
1877/// Reduces the elements using the scalar operations provided. The operations
1878/// are sorted descending in number of inputs they take. The flags on the
1879/// original reduction operation will be propagated to each scalar operation.
1880/// Nearby elements are grouped in tree reduction, unlike the shuffle reduction
1881/// used in ExpandReductions and SelectionDAG.
1883 const SmallVector<SDValue> &Elements, EVT EltTy,
1884 ArrayRef<std::pair<unsigned /*NodeType*/, unsigned /*NumInputs*/>> Ops,
1885 const SDLoc &DL, const SDNodeFlags Flags, SelectionDAG &DAG) {
1886 // Build the reduction tree at each level, starting with all the elements.
1887 SmallVector<SDValue> Level = Elements;
1888
1889 unsigned OpIdx = 0;
1890 while (Level.size() > 1) {
1891 // Try to reduce this level using the current operator.
1892 const auto [Op, NumInputs] = Ops[OpIdx];
1893
1894 // Build the next level by partially reducing all elements.
1895 SmallVector<SDValue> ReducedLevel;
1896 unsigned I = 0, E = Level.size();
1897 for (; I + NumInputs <= E; I += NumInputs) {
1898 // Reduce elements in groups of [NumInputs], as much as possible.
1899 ReducedLevel.push_back(DAG.getNode(
1900 Op, DL, EltTy, ArrayRef<SDValue>(Level).slice(I, NumInputs), Flags));
1901 }
1902
1903 if (I < E) {
1904 // Handle leftover elements.
1905
1906 if (ReducedLevel.empty()) {
1907 // We didn't reduce anything at this level. We need to pick a smaller
1908 // operator.
1909 ++OpIdx;
1910 assert(OpIdx < Ops.size() && "no smaller operators for reduction");
1911 continue;
1912 }
1913
1914 // We reduced some things but there's still more left, meaning the
1915 // operator's number of inputs doesn't evenly divide this level size. Move
1916 // these elements to the next level.
1917 for (; I < E; ++I)
1918 ReducedLevel.push_back(Level[I]);
1919 }
1920
1921 // Process the next level.
1922 Level = ReducedLevel;
1923 }
1924
1925 return *Level.begin();
1926}
1927
1928// Get scalar reduction opcode
1929static ISD::NodeType getScalarOpcodeForReduction(unsigned ReductionOpcode) {
1930 switch (ReductionOpcode) {
1931 case ISD::VECREDUCE_FMAX:
1932 return ISD::FMAXNUM;
1933 case ISD::VECREDUCE_FMIN:
1934 return ISD::FMINNUM;
1935 case ISD::VECREDUCE_FMAXIMUM:
1936 return ISD::FMAXIMUM;
1937 case ISD::VECREDUCE_FMINIMUM:
1938 return ISD::FMINIMUM;
1939 default:
1940 llvm_unreachable("unhandled reduction opcode");
1941 }
1942}
1943
1944/// Get 3-input scalar reduction opcode
1945static std::optional<unsigned>
1946getScalar3OpcodeForReduction(unsigned ReductionOpcode) {
1947 switch (ReductionOpcode) {
1948 case ISD::VECREDUCE_FMAX:
1949 return NVPTXISD::FMAXNUM3;
1950 case ISD::VECREDUCE_FMIN:
1951 return NVPTXISD::FMINNUM3;
1952 case ISD::VECREDUCE_FMAXIMUM:
1953 return NVPTXISD::FMAXIMUM3;
1954 case ISD::VECREDUCE_FMINIMUM:
1955 return NVPTXISD::FMINIMUM3;
1956 default:
1957 return std::nullopt;
1958 }
1959}
1960
1961/// Lower reductions to either a sequence of operations or a tree if
1962/// reassociations are allowed. This method will use larger operations like
1963/// max3/min3 when the target supports them.
1964SDValue NVPTXTargetLowering::LowerVECREDUCE(SDValue Op,
1965 SelectionDAG &DAG) const {
1966 SDLoc DL(Op);
1967 const SDNodeFlags Flags = Op->getFlags();
1968 SDValue Vector = Op.getOperand(0);
1969
1970 const unsigned Opcode = Op->getOpcode();
1971 const EVT EltTy = Vector.getValueType().getVectorElementType();
1972
1973 // Whether we can use 3-input min/max when expanding the reduction.
1974 const bool CanUseMinMax3 =
1975 EltTy == MVT::f32 && STI.getSmVersion() >= 100 &&
1976 STI.getPTXVersion() >= 88 &&
1977 (Opcode == ISD::VECREDUCE_FMAX || Opcode == ISD::VECREDUCE_FMIN ||
1978 Opcode == ISD::VECREDUCE_FMAXIMUM || Opcode == ISD::VECREDUCE_FMINIMUM);
1979
1980 // A list of SDNode opcodes with equivalent semantics, sorted descending by
1981 // number of inputs they take.
1982 SmallVector<std::pair<unsigned /*Op*/, unsigned /*NumIn*/>, 2> ScalarOps;
1983
1984 if (auto Opcode3Elem = getScalar3OpcodeForReduction(Opcode);
1985 CanUseMinMax3 && Opcode3Elem)
1986 ScalarOps.push_back({*Opcode3Elem, 3});
1987 ScalarOps.push_back({getScalarOpcodeForReduction(Opcode), 2});
1988
1990 DAG.ExtractVectorElements(Vector, Elements);
1991
1992 return buildTreeReduction(Elements, EltTy, ScalarOps, DL, Flags, DAG);
1993}
1994
1995SDValue NVPTXTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
1996 // Handle bitcasting from v2i8 without hitting the default promotion
1997 // strategy which goes through stack memory.
1998 EVT FromVT = Op->getOperand(0)->getValueType(0);
1999 if (FromVT != MVT::v2i8) {
2000 return Op;
2001 }
2002
2003 // Pack vector elements into i16 and bitcast to final type
2004 SDLoc DL(Op);
2005 SDValue Vec0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i8,
2006 Op->getOperand(0), DAG.getIntPtrConstant(0, DL));
2007 SDValue Vec1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i8,
2008 Op->getOperand(0), DAG.getIntPtrConstant(1, DL));
2009 SDValue Extend0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i16, Vec0);
2010 SDValue Extend1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i16, Vec1);
2011 SDValue Const8 = DAG.getConstant(8, DL, MVT::i16);
2012 SDValue AsInt = DAG.getNode(
2013 ISD::OR, DL, MVT::i16,
2014 {Extend0, DAG.getNode(ISD::SHL, DL, MVT::i16, {Extend1, Const8})});
2015 EVT ToVT = Op->getValueType(0);
2016 return DAG.getBitcast(ToVT, AsInt);
2017}
2018
2019// We can init constant f16x2/v2i16/v4i8 with a single .b32 move. Normally it
2020// would get lowered as two constant loads and vector-packing move.
2021// Instead we want just a constant move:
2022// mov.b32 %r2, 0x40003C00
2023SDValue NVPTXTargetLowering::LowerBUILD_VECTOR(SDValue Op,
2024 SelectionDAG &DAG) const {
2025 EVT VT = Op->getValueType(0);
2026 if (!(NVPTX::isPackedVectorTy(VT) && VT.is32BitVector()))
2027 return Op;
2028 SDLoc DL(Op);
2029
2030 if (!llvm::all_of(Op->ops(), [](SDValue Operand) {
2031 return Operand->isUndef() || isa<ConstantSDNode>(Operand) ||
2032 isa<ConstantFPSDNode>(Operand);
2033 })) {
2034 if (VT != MVT::v4i8)
2035 return Op;
2036 // Lower non-const v4i8 vector as byte-wise constructed i32, which allows us
2037 // to optimize calculation of constant parts.
2038 auto GetPRMT = [&](const SDValue Left, const SDValue Right, bool Cast,
2039 uint64_t SelectionValue) -> SDValue {
2040 SDValue L = Left;
2041 SDValue R = Right;
2042 if (Cast) {
2043 L = DAG.getAnyExtOrTrunc(L, DL, MVT::i32);
2044 R = DAG.getAnyExtOrTrunc(R, DL, MVT::i32);
2045 }
2046 return getPRMT(L, R, SelectionValue, DL, DAG);
2047 };
2048 auto PRMT__10 = GetPRMT(Op->getOperand(0), Op->getOperand(1), true, 0x3340);
2049 auto PRMT__32 = GetPRMT(Op->getOperand(2), Op->getOperand(3), true, 0x3340);
2050 auto PRMT3210 = GetPRMT(PRMT__10, PRMT__32, false, 0x5410);
2051 return DAG.getBitcast(VT, PRMT3210);
2052 }
2053
2054 // Get value or the Nth operand as an APInt(32). Undef values treated as 0.
2055 auto GetOperand = [](SDValue Op, int N) -> APInt {
2056 const SDValue &Operand = Op->getOperand(N);
2057 EVT VT = Op->getValueType(0);
2058 if (Operand->isUndef())
2059 return APInt(32, 0);
2060 APInt Value;
2061 if (VT == MVT::v2f16 || VT == MVT::v2bf16)
2062 Value = cast<ConstantFPSDNode>(Operand)->getValueAPF().bitcastToAPInt();
2063 else if (VT == MVT::v2i16 || VT == MVT::v4i8)
2064 Value = Operand->getAsAPIntVal();
2065 else
2066 llvm_unreachable("Unsupported type");
2067 // i8 values are carried around as i16, so we need to zero out upper bits,
2068 // so they do not get in the way of combining individual byte values
2069 if (VT == MVT::v4i8)
2070 Value = Value.trunc(8);
2071 return Value.zext(32);
2072 };
2073
2074 // Construct a 32-bit constant by shifting into place smaller values
2075 // (elements of the vector type VT).
2076 // For example, if VT has 2 elements, then N == 2:
2077 // ShiftAmount = 32 / N = 16
2078 // Value |= Op0 (b16) << 0
2079 // Value |= Op1 (b16) << 16
2080 // If N == 4:
2081 // ShiftAmount = 32 / N = 8
2082 // Value |= Op0 (b8) << 0
2083 // Value |= Op1 (b8) << 8
2084 // Value |= Op2 (b8) << 16
2085 // Value |= Op3 (b8) << 24
2086 // ...etc
2087 APInt Value(32, 0);
2088 const unsigned NumElements = VT.getVectorNumElements();
2089 assert(32 % NumElements == 0 && "must evenly divide bit length");
2090 const unsigned ShiftAmount = 32 / NumElements;
2091 for (unsigned ElementNo : seq(NumElements))
2092 Value |= GetOperand(Op, ElementNo).shl(ElementNo * ShiftAmount);
2093 SDValue Const = DAG.getConstant(Value, DL, MVT::i32);
2094 return DAG.getNode(ISD::BITCAST, DL, Op->getValueType(0), Const);
2095}
2096
2097SDValue NVPTXTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
2098 SelectionDAG &DAG) const {
2099 SDValue Index = Op->getOperand(1);
2100 SDValue Vector = Op->getOperand(0);
2101 SDLoc DL(Op);
2102 EVT VectorVT = Vector.getValueType();
2103
2104 if (VectorVT == MVT::v4i8) {
2105 SDValue Selector = DAG.getNode(ISD::OR, DL, MVT::i32,
2106 DAG.getZExtOrTrunc(Index, DL, MVT::i32),
2107 DAG.getConstant(0x7770, DL, MVT::i32));
2108 SDValue PRMT = getPRMT(DAG.getBitcast(MVT::i32, Vector),
2109 DAG.getConstant(0, DL, MVT::i32), Selector, DL, DAG);
2110 SDValue Ext = DAG.getAnyExtOrTrunc(PRMT, DL, Op->getValueType(0));
2111 SDNodeFlags Flags;
2112 Flags.setNoSignedWrap(Ext.getScalarValueSizeInBits() > 8);
2113 Flags.setNoUnsignedWrap(Ext.getScalarValueSizeInBits() >= 8);
2114 Ext->setFlags(Flags);
2115 return Ext;
2116 }
2117
2118 // Constant index will be matched by tablegen.
2119 if (isa<ConstantSDNode>(Index.getNode()))
2120 return Op;
2121
2122 // Extract individual elements and select one of them.
2123 assert(NVPTX::isPackedVectorTy(VectorVT) &&
2124 VectorVT.getVectorNumElements() == 2 && "Unexpected vector type.");
2125 EVT EltVT = VectorVT.getVectorElementType();
2126
2127 SDLoc dl(Op.getNode());
2128 SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
2129 DAG.getIntPtrConstant(0, dl));
2130 SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
2131 DAG.getIntPtrConstant(1, dl));
2132 return DAG.getSelectCC(dl, Index, DAG.getIntPtrConstant(0, dl), E0, E1,
2134}
2135
2136SDValue NVPTXTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
2137 SelectionDAG &DAG) const {
2138 SDValue Vector = Op->getOperand(0);
2139 EVT VectorVT = Vector.getValueType();
2140
2141 if (VectorVT != MVT::v4i8)
2142 return Op;
2143 SDLoc DL(Op);
2144 SDValue Value = Op->getOperand(1);
2145 if (Value->isUndef())
2146 return Vector;
2147
2148 SDValue Index = Op->getOperand(2);
2149
2150 SDValue BFI =
2151 DAG.getNode(NVPTXISD::BFI, DL, MVT::i32,
2152 {DAG.getZExtOrTrunc(Value, DL, MVT::i32), Vector,
2153 DAG.getNode(ISD::MUL, DL, MVT::i32,
2154 DAG.getZExtOrTrunc(Index, DL, MVT::i32),
2155 DAG.getConstant(8, DL, MVT::i32)),
2156 DAG.getConstant(8, DL, MVT::i32)});
2157 return DAG.getNode(ISD::BITCAST, DL, Op->getValueType(0), BFI);
2158}
2159
2160SDValue NVPTXTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
2161 SelectionDAG &DAG) const {
2162 SDValue V1 = Op.getOperand(0);
2163 EVT VectorVT = V1.getValueType();
2164 if (VectorVT != MVT::v4i8 || Op.getValueType() != MVT::v4i8)
2165 return Op;
2166
2167 // Lower shuffle to PRMT instruction.
2168 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2169 SDValue V2 = Op.getOperand(1);
2170 uint32_t Selector = 0;
2171 for (auto I : llvm::enumerate(SVN->getMask())) {
2172 if (I.value() != -1) // -1 is a placeholder for undef.
2173 Selector |= (I.value() << (I.index() * 4));
2174 }
2175
2176 SDLoc DL(Op);
2177 SDValue PRMT = getPRMT(DAG.getBitcast(MVT::i32, V1),
2178 DAG.getBitcast(MVT::i32, V2), Selector, DL, DAG);
2179 return DAG.getBitcast(Op.getValueType(), PRMT);
2180}
2181/// LowerShiftRightParts - Lower SRL_PARTS, SRA_PARTS, which
2182/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
2183/// amount, or
2184/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
2185/// amount.
2186SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,
2187 SelectionDAG &DAG) const {
2188 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
2189 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
2190
2191 EVT VT = Op.getValueType();
2192 unsigned VTBits = VT.getSizeInBits();
2193 SDLoc dl(Op);
2194 SDValue ShOpLo = Op.getOperand(0);
2195 SDValue ShOpHi = Op.getOperand(1);
2196 SDValue ShAmt = Op.getOperand(2);
2197 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
2198
2199 if (VTBits == 32 && STI.getSmVersion() >= 35) {
2200 // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
2201 // {dHi, dLo} = {aHi, aLo} >> Amt
2202 // dHi = aHi >> Amt
2203 // dLo = shf.r.clamp aLo, aHi, Amt
2204
2205 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
2206 SDValue Lo =
2207 DAG.getNode(NVPTXISD::FSHR_CLAMP, dl, VT, ShOpHi, ShOpLo, ShAmt);
2208
2209 SDValue Ops[2] = { Lo, Hi };
2210 return DAG.getMergeValues(Ops, dl);
2211 }
2212 else {
2213 // {dHi, dLo} = {aHi, aLo} >> Amt
2214 // - if (Amt>=size) then
2215 // dLo = aHi >> (Amt-size)
2216 // dHi = aHi >> Amt (this is either all 0 or all 1)
2217 // else
2218 // dLo = (aLo >>logic Amt) | (aHi << (size-Amt))
2219 // dHi = aHi >> Amt
2220
2221 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
2222 DAG.getConstant(VTBits, dl, MVT::i32),
2223 ShAmt);
2224 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
2225 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
2226 DAG.getConstant(VTBits, dl, MVT::i32));
2227 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
2228 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
2229 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
2230
2231 SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
2232 DAG.getConstant(VTBits, dl, MVT::i32),
2233 ISD::SETGE);
2234 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
2235 SDValue Lo = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
2236
2237 SDValue Ops[2] = { Lo, Hi };
2238 return DAG.getMergeValues(Ops, dl);
2239 }
2240}
2241
2242/// LowerShiftLeftParts - Lower SHL_PARTS, which
2243/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
2244/// amount, or
2245/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
2246/// amount.
2247SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op,
2248 SelectionDAG &DAG) const {
2249 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
2250 assert(Op.getOpcode() == ISD::SHL_PARTS);
2251
2252 EVT VT = Op.getValueType();
2253 unsigned VTBits = VT.getSizeInBits();
2254 SDLoc dl(Op);
2255 SDValue ShOpLo = Op.getOperand(0);
2256 SDValue ShOpHi = Op.getOperand(1);
2257 SDValue ShAmt = Op.getOperand(2);
2258
2259 if (VTBits == 32 && STI.getSmVersion() >= 35) {
2260 // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
2261 // {dHi, dLo} = {aHi, aLo} << Amt
2262 // dHi = shf.l.clamp aLo, aHi, Amt
2263 // dLo = aLo << Amt
2264
2265 SDValue Hi =
2266 DAG.getNode(NVPTXISD::FSHL_CLAMP, dl, VT, ShOpHi, ShOpLo, ShAmt);
2267 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
2268
2269 SDValue Ops[2] = { Lo, Hi };
2270 return DAG.getMergeValues(Ops, dl);
2271 }
2272 else {
2273 // {dHi, dLo} = {aHi, aLo} << Amt
2274 // - if (Amt>=size) then
2275 // dLo = aLo << Amt (all 0)
2276 // dLo = aLo << (Amt-size)
2277 // else
2278 // dLo = aLo << Amt
2279 // dHi = (aHi << Amt) | (aLo >> (size-Amt))
2280
2281 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
2282 DAG.getConstant(VTBits, dl, MVT::i32),
2283 ShAmt);
2284 SDValue Tmp1 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
2285 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
2286 DAG.getConstant(VTBits, dl, MVT::i32));
2287 SDValue Tmp2 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
2288 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
2289 SDValue TrueVal = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
2290
2291 SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
2292 DAG.getConstant(VTBits, dl, MVT::i32),
2293 ISD::SETGE);
2294 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
2295 SDValue Hi = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
2296
2297 SDValue Ops[2] = { Lo, Hi };
2298 return DAG.getMergeValues(Ops, dl);
2299 }
2300}
2301
2302/// If the types match, convert the generic copysign to the NVPTXISD version,
2303/// otherwise bail ensuring that mismatched cases are properly expaned.
2304SDValue NVPTXTargetLowering::LowerFCOPYSIGN(SDValue Op,
2305 SelectionDAG &DAG) const {
2306 EVT VT = Op.getValueType();
2307 SDLoc DL(Op);
2308
2309 SDValue In1 = Op.getOperand(0);
2310 SDValue In2 = Op.getOperand(1);
2311 EVT SrcVT = In2.getValueType();
2312
2313 if (!SrcVT.bitsEq(VT))
2314 return SDValue();
2315
2316 return DAG.getNode(NVPTXISD::FCOPYSIGN, DL, VT, In1, In2);
2317}
2318
2319SDValue NVPTXTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
2320 EVT VT = Op.getValueType();
2321
2322 if (VT == MVT::f32)
2323 return LowerFROUND32(Op, DAG);
2324
2325 if (VT == MVT::f64)
2326 return LowerFROUND64(Op, DAG);
2327
2328 llvm_unreachable("unhandled type");
2329}
2330
2331// This is the the rounding method used in CUDA libdevice in C like code:
2332// float roundf(float A)
2333// {
2334// float RoundedA = (float) (int) ( A > 0 ? (A + 0.5f) : (A - 0.5f));
2335// RoundedA = abs(A) > 0x1.0p23 ? A : RoundedA;
2336// return abs(A) < 0.5 ? (float)(int)A : RoundedA;
2337// }
2338SDValue NVPTXTargetLowering::LowerFROUND32(SDValue Op,
2339 SelectionDAG &DAG) const {
2340 SDLoc SL(Op);
2341 SDValue A = Op.getOperand(0);
2342 EVT VT = Op.getValueType();
2343
2344 SDValue AbsA = DAG.getNode(ISD::FABS, SL, VT, A);
2345
2346 // RoundedA = (float) (int) ( A > 0 ? (A + 0.5f) : (A - 0.5f))
2347 SDValue Bitcast = DAG.getNode(ISD::BITCAST, SL, MVT::i32, A);
2348 const unsigned SignBitMask = 0x80000000;
2349 SDValue Sign = DAG.getNode(ISD::AND, SL, MVT::i32, Bitcast,
2350 DAG.getConstant(SignBitMask, SL, MVT::i32));
2351 const unsigned PointFiveInBits = 0x3F000000;
2352 SDValue PointFiveWithSignRaw =
2353 DAG.getNode(ISD::OR, SL, MVT::i32, Sign,
2354 DAG.getConstant(PointFiveInBits, SL, MVT::i32));
2355 SDValue PointFiveWithSign =
2356 DAG.getNode(ISD::BITCAST, SL, VT, PointFiveWithSignRaw);
2357 SDValue AdjustedA = DAG.getNode(ISD::FADD, SL, VT, A, PointFiveWithSign);
2358 SDValue RoundedA = DAG.getNode(ISD::FTRUNC, SL, VT, AdjustedA);
2359
2360 // RoundedA = abs(A) > 0x1.0p23 ? A : RoundedA;
2361 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2362 SDValue IsLarge =
2363 DAG.getSetCC(SL, SetCCVT, AbsA, DAG.getConstantFP(pow(2.0, 23.0), SL, VT),
2364 ISD::SETOGT);
2365 RoundedA = DAG.getNode(ISD::SELECT, SL, VT, IsLarge, A, RoundedA);
2366
2367 // return abs(A) < 0.5 ? (float)(int)A : RoundedA;
2368 SDValue IsSmall =DAG.getSetCC(SL, SetCCVT, AbsA,
2369 DAG.getConstantFP(0.5, SL, VT), ISD::SETOLT);
2370 SDValue RoundedAForSmallA = DAG.getNode(ISD::FTRUNC, SL, VT, A);
2371 return DAG.getNode(ISD::SELECT, SL, VT, IsSmall, RoundedAForSmallA, RoundedA);
2372}
2373
2374// The implementation of round(double) is similar to that of round(float) in
2375// that they both separate the value range into three regions and use a method
2376// specific to the region to round the values. However, round(double) first
2377// calculates the round of the absolute value and then adds the sign back while
2378// round(float) directly rounds the value with sign.
2379SDValue NVPTXTargetLowering::LowerFROUND64(SDValue Op,
2380 SelectionDAG &DAG) const {
2381 SDLoc SL(Op);
2382 SDValue A = Op.getOperand(0);
2383 EVT VT = Op.getValueType();
2384
2385 SDValue AbsA = DAG.getNode(ISD::FABS, SL, VT, A);
2386
2387 // double RoundedA = (double) (int) (abs(A) + 0.5f);
2388 SDValue AdjustedA = DAG.getNode(ISD::FADD, SL, VT, AbsA,
2389 DAG.getConstantFP(0.5, SL, VT));
2390 SDValue RoundedA = DAG.getNode(ISD::FTRUNC, SL, VT, AdjustedA);
2391
2392 // RoundedA = abs(A) < 0.5 ? (double)0 : RoundedA;
2393 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2394 SDValue IsSmall =DAG.getSetCC(SL, SetCCVT, AbsA,
2395 DAG.getConstantFP(0.5, SL, VT), ISD::SETOLT);
2396 RoundedA = DAG.getNode(ISD::SELECT, SL, VT, IsSmall,
2397 DAG.getConstantFP(0, SL, VT),
2398 RoundedA);
2399
2400 // Add sign to rounded_A
2401 RoundedA = DAG.getNode(ISD::FCOPYSIGN, SL, VT, RoundedA, A);
2402 DAG.getNode(ISD::FTRUNC, SL, VT, A);
2403
2404 // RoundedA = abs(A) > 0x1.0p52 ? A : RoundedA;
2405 SDValue IsLarge =
2406 DAG.getSetCC(SL, SetCCVT, AbsA, DAG.getConstantFP(pow(2.0, 52.0), SL, VT),
2407 ISD::SETOGT);
2408 return DAG.getNode(ISD::SELECT, SL, VT, IsLarge, A, RoundedA);
2409}
2410
2412 EVT VT = N->getValueType(0);
2413 EVT NVT = MVT::f32;
2414 if (VT.isVector()) {
2415 NVT = EVT::getVectorVT(*DAG.getContext(), NVT, VT.getVectorElementCount());
2416 }
2417 SDLoc DL(N);
2418 SDValue Tmp0 = DAG.getFPExtendOrRound(N->getOperand(0), DL, NVT);
2419 SDValue Tmp1 = DAG.getFPExtendOrRound(N->getOperand(1), DL, NVT);
2420 SDValue Res = DAG.getNode(N->getOpcode(), DL, NVT, Tmp0, Tmp1, N->getFlags());
2421 return DAG.getFPExtendOrRound(Res, DL, VT);
2422}
2423
2424SDValue NVPTXTargetLowering::PromoteBinOpIfF32FTZ(SDValue Op,
2425 SelectionDAG &DAG) const {
2426 if (useF32FTZ(DAG.getMachineFunction())) {
2427 return PromoteBinOpToF32(Op.getNode(), DAG);
2428 }
2429 return Op;
2430}
2431
2432SDValue NVPTXTargetLowering::LowerINT_TO_FP(SDValue Op,
2433 SelectionDAG &DAG) const {
2434 assert(STI.getSmVersion() < 90 || STI.getPTXVersion() < 78);
2435
2436 if (Op.getValueType() == MVT::bf16) {
2437 SDLoc Loc(Op);
2438 return DAG.getNode(
2439 ISD::FP_ROUND, Loc, MVT::bf16,
2440 DAG.getNode(Op.getOpcode(), Loc, MVT::f32, Op.getOperand(0)),
2441 DAG.getIntPtrConstant(0, Loc, /*isTarget=*/true));
2442 }
2443
2444 // Everything else is considered legal.
2445 return Op;
2446}
2447
2448SDValue NVPTXTargetLowering::LowerFP_TO_INT(SDValue Op,
2449 SelectionDAG &DAG) const {
2450 assert(STI.getSmVersion() < 90 || STI.getPTXVersion() < 78);
2451
2452 if (Op.getOperand(0).getValueType() == MVT::bf16) {
2453 SDLoc Loc(Op);
2454 return DAG.getNode(
2455 Op.getOpcode(), Loc, Op.getValueType(),
2456 DAG.getNode(ISD::FP_EXTEND, Loc, MVT::f32, Op.getOperand(0)));
2457 }
2458
2459 // Everything else is considered legal.
2460 return Op;
2461}
2462
2463SDValue NVPTXTargetLowering::LowerFP_ROUND(SDValue Op,
2464 SelectionDAG &DAG) const {
2465 EVT NarrowVT = Op.getValueType();
2466 SDValue Wide = Op.getOperand(0);
2467 EVT WideVT = Wide.getValueType();
2468 if (NarrowVT.getScalarType() == MVT::bf16) {
2469 const TargetLowering *TLI = STI.getTargetLowering();
2470 if (STI.getSmVersion() < 80 || STI.getPTXVersion() < 70) {
2471 return TLI->expandFP_ROUND(Op.getNode(), DAG);
2472 }
2473 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
2474 // This combination was the first to support f32 -> bf16.
2475 if (STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70) {
2476 if (WideVT.getScalarType() == MVT::f32) {
2477 return Op;
2478 }
2479 if (WideVT.getScalarType() == MVT::f64) {
2480 SDLoc Loc(Op);
2481 // Round-inexact-to-odd f64 to f32, then do the final rounding using
2482 // the hardware f32 -> bf16 instruction.
2484 WideVT.isVector() ? WideVT.changeVectorElementType(MVT::f32)
2485 : MVT::f32,
2486 Wide, Loc, DAG);
2487 return DAG.getFPExtendOrRound(rod, Loc, NarrowVT);
2488 }
2489 }
2490 return TLI->expandFP_ROUND(Op.getNode(), DAG);
2491 }
2492 }
2493
2494 // Everything else is considered legal.
2495 return Op;
2496}
2497
2498SDValue NVPTXTargetLowering::LowerFP_EXTEND(SDValue Op,
2499 SelectionDAG &DAG) const {
2500 SDValue Narrow = Op.getOperand(0);
2501 EVT NarrowVT = Narrow.getValueType();
2502 EVT WideVT = Op.getValueType();
2503 if (NarrowVT.getScalarType() == MVT::bf16) {
2504 if (WideVT.getScalarType() == MVT::f32 &&
2505 (STI.getSmVersion() < 80 || STI.getPTXVersion() < 71)) {
2506 SDLoc Loc(Op);
2507 return DAG.getNode(ISD::BF16_TO_FP, Loc, WideVT, Narrow);
2508 }
2509 if (WideVT.getScalarType() == MVT::f64 &&
2510 (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78)) {
2511 EVT F32 = NarrowVT.isVector() ? NarrowVT.changeVectorElementType(MVT::f32)
2512 : MVT::f32;
2513 SDLoc Loc(Op);
2514 if (STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 71) {
2515 Op = DAG.getNode(ISD::FP_EXTEND, Loc, F32, Narrow);
2516 } else {
2517 Op = DAG.getNode(ISD::BF16_TO_FP, Loc, F32, Narrow);
2518 }
2519 return DAG.getNode(ISD::FP_EXTEND, Loc, WideVT, Op);
2520 }
2521 }
2522
2523 // Everything else is considered legal.
2524 return Op;
2525}
2526
2528 SDLoc DL(Op);
2529 if (Op.getValueType() != MVT::v2i16)
2530 return Op;
2531 EVT EltVT = Op.getValueType().getVectorElementType();
2532 SmallVector<SDValue> VecElements;
2533 for (int I = 0, E = Op.getValueType().getVectorNumElements(); I < E; I++) {
2534 SmallVector<SDValue> ScalarArgs;
2535 llvm::transform(Op->ops(), std::back_inserter(ScalarArgs),
2536 [&](const SDUse &O) {
2537 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
2538 O.get(), DAG.getIntPtrConstant(I, DL));
2539 });
2540 VecElements.push_back(DAG.getNode(Op.getOpcode(), DL, EltVT, ScalarArgs));
2541 }
2542 SDValue V =
2543 DAG.getNode(ISD::BUILD_VECTOR, DL, Op.getValueType(), VecElements);
2544 return V;
2545}
2546
2548 SDNode *N = Op.getNode();
2549 SDLoc DL(N);
2551
2552 // split the vector argument
2553 for (size_t I = 0; I < N->getNumOperands(); I++) {
2554 SDValue Val = N->getOperand(I);
2555 EVT ValVT = Val.getValueType();
2556 if (ValVT.isVector()) {
2557 EVT EltVT = ValVT.getVectorElementType();
2558 for (unsigned J = 0, NElts = ValVT.getVectorNumElements(); J < NElts; J++)
2559 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
2560 DAG.getIntPtrConstant(J, DL)));
2561 } else
2562 Ops.push_back(Val);
2563 }
2564
2566 SDValue Tcgen05StNode =
2567 DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, N->getVTList(), Ops,
2568 MemSD->getMemoryVT(), MemSD->getMemOperand());
2569
2570 return Tcgen05StNode;
2571}
2572
2573static unsigned getTcgen05MMADisableOutputLane(unsigned IID) {
2574 switch (IID) {
2575 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
2576 return NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG1;
2577 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
2578 return NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG2;
2579 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
2580 return NVPTXISD::TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2581 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
2582 return NVPTXISD::TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2583 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
2584 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1;
2585 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
2586 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2;
2587 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
2588 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2589 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
2590 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2591 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
2592 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2593 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
2594 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2595 case Intrinsic::
2596 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
2597 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2598 case Intrinsic::
2599 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
2600 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2601 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
2602 return NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG1;
2603 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
2604 return NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG2;
2605 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
2606 return NVPTXISD::TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2607 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
2608 return NVPTXISD::TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2609 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
2610 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1;
2611 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
2612 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2;
2613 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
2614 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2615 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
2616 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2617 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
2618 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2619 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
2620 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2621 case Intrinsic::
2622 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift:
2623 return NVPTXISD::
2624 TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2625 case Intrinsic::
2626 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift:
2627 return NVPTXISD::
2628 TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2629 };
2630 llvm_unreachable("unhandled tcgen05.mma.disable_output_lane intrinsic");
2631}
2632
2634 SDNode *N = Op.getNode();
2635 SDLoc DL(N);
2636 unsigned IID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2637
2639 // split the vector argument
2640 for (size_t I = 0; I < N->getNumOperands(); I++) {
2641 if (I == 1)
2642 continue; // skip IID
2643 SDValue Val = N->getOperand(I);
2644 EVT ValVT = Val.getValueType();
2645 if (ValVT.isVector()) {
2646 EVT EltVT = ValVT.getVectorElementType();
2647 for (unsigned J = 0, NElts = ValVT.getVectorNumElements(); J < NElts; J++)
2648 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
2649 DAG.getIntPtrConstant(J, DL)));
2650 } else
2651 Ops.push_back(Val);
2652 }
2653
2655 SDValue Tcgen05MMANode = DAG.getMemIntrinsicNode(
2656 getTcgen05MMADisableOutputLane(IID), DL, N->getVTList(), Ops,
2657 MemSD->getMemoryVT(), MemSD->getMemOperand());
2658
2659 return Tcgen05MMANode;
2660}
2661
2662// Lower vector return type of tcgen05.ld intrinsics
2663static std::optional<std::pair<SDValue, SDValue>>
2664lowerTcgen05Ld(SDNode *N, SelectionDAG &DAG, bool HasOffset = false) {
2665 SDLoc DL(N);
2666 EVT ResVT = N->getValueType(0);
2667 if (!ResVT.isVector())
2668 return {}; // already legalized.
2669
2670 const unsigned NumElts = ResVT.getVectorNumElements();
2671
2672 // Create the return type of the instructions
2673 SmallVector<EVT, 5> ListVTs;
2674 for (unsigned i = 0; i < NumElts; ++i)
2675 ListVTs.push_back(MVT::i32);
2676
2677 ListVTs.push_back(N->getValueType(1)); // Chain
2678
2679 SDVTList ResVTs = DAG.getVTList(ListVTs);
2680
2681 SmallVector<SDValue, 8> Ops{N->getOperand(0), N->getOperand(1),
2682 N->getOperand(2)};
2683
2684 if (HasOffset) {
2685 Ops.push_back(N->getOperand(3)); // offset
2686 Ops.push_back(N->getOperand(4)); // Pack flag
2687 } else
2688 Ops.push_back(N->getOperand(3)); // Pack flag
2689
2691 SDValue NewNode =
2693 MemSD->getMemoryVT(), MemSD->getMemOperand());
2694
2695 // split the vector result
2696 SmallVector<SDValue, 4> ScalarRes;
2697 for (unsigned i = 0; i < NumElts; ++i) {
2698 SDValue Res = NewNode.getValue(i);
2699 ScalarRes.push_back(Res);
2700 }
2701
2702 SDValue Chain = NewNode.getValue(NumElts);
2703 SDValue BuildVector = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
2704 return {{BuildVector, Chain}};
2705}
2706
2708 SDNode *N = Op.getNode();
2709 SDValue Intrin = N->getOperand(1);
2710
2711 // Get the intrinsic ID
2712 unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
2713 switch (IntrinNo) {
2714 default:
2715 break;
2716 case Intrinsic::nvvm_tcgen05_st_16x64b_x1:
2717 case Intrinsic::nvvm_tcgen05_st_16x64b_x2:
2718 case Intrinsic::nvvm_tcgen05_st_16x64b_x4:
2719 case Intrinsic::nvvm_tcgen05_st_16x64b_x8:
2720 case Intrinsic::nvvm_tcgen05_st_16x64b_x16:
2721 case Intrinsic::nvvm_tcgen05_st_16x64b_x32:
2722 case Intrinsic::nvvm_tcgen05_st_16x64b_x128:
2723 case Intrinsic::nvvm_tcgen05_st_16x128b_x1:
2724 case Intrinsic::nvvm_tcgen05_st_16x128b_x2:
2725 case Intrinsic::nvvm_tcgen05_st_16x128b_x4:
2726 case Intrinsic::nvvm_tcgen05_st_16x128b_x8:
2727 case Intrinsic::nvvm_tcgen05_st_16x128b_x16:
2728 case Intrinsic::nvvm_tcgen05_st_16x128b_x32:
2729 case Intrinsic::nvvm_tcgen05_st_16x128b_x64:
2730 case Intrinsic::nvvm_tcgen05_st_16x256b_x1:
2731 case Intrinsic::nvvm_tcgen05_st_16x256b_x2:
2732 case Intrinsic::nvvm_tcgen05_st_16x256b_x4:
2733 case Intrinsic::nvvm_tcgen05_st_16x256b_x8:
2734 case Intrinsic::nvvm_tcgen05_st_16x256b_x16:
2735 case Intrinsic::nvvm_tcgen05_st_16x256b_x32:
2736 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x1:
2737 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x2:
2738 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x4:
2739 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x8:
2740 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x16:
2741 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x32:
2742 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x64:
2743 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x128:
2744 case Intrinsic::nvvm_tcgen05_st_32x32b_x1:
2745 case Intrinsic::nvvm_tcgen05_st_32x32b_x2:
2746 case Intrinsic::nvvm_tcgen05_st_32x32b_x4:
2747 case Intrinsic::nvvm_tcgen05_st_32x32b_x8:
2748 case Intrinsic::nvvm_tcgen05_st_32x32b_x16:
2749 case Intrinsic::nvvm_tcgen05_st_32x32b_x32:
2750 case Intrinsic::nvvm_tcgen05_st_16x64b_x64:
2751 case Intrinsic::nvvm_tcgen05_st_32x32b_x64:
2752 case Intrinsic::nvvm_tcgen05_st_32x32b_x128:
2753 return lowerTcgen05St(Op, DAG);
2754 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
2755 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
2756 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
2757 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
2758 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
2759 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
2760 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
2761 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
2762 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
2763 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
2764 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
2765 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
2766 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
2767 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
2768 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
2769 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
2770 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
2771 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
2772 case Intrinsic::
2773 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
2774 case Intrinsic::
2775 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
2776 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
2777 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
2778 case Intrinsic::
2779 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift:
2780 case Intrinsic::
2781 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift:
2783 }
2784 return Op;
2785}
2786
2788 SelectionDAG &DAG) {
2789
2790 SDNode *N = Op.getNode();
2791 if (N->getOperand(1).getValueType() != MVT::i128) {
2792 // return, if the operand is already lowered
2793 return SDValue();
2794 }
2795
2796 unsigned IID =
2797 cast<ConstantSDNode>(N->getOperand(0).getNode())->getZExtValue();
2798 auto Opcode = [&]() {
2799 switch (IID) {
2800 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_is_canceled:
2801 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_IS_CANCELED;
2802 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_x:
2803 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_X;
2804 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y:
2805 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Y;
2806 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z:
2807 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Z;
2808 default:
2809 llvm_unreachable("unsupported/unhandled intrinsic");
2810 }
2811 }();
2812
2813 SDLoc DL(N);
2814 SDValue TryCancelResponse = N->getOperand(1);
2815 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TryCancelResponse);
2816 SDValue TryCancelResponse0 =
2817 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Cast,
2818 DAG.getIntPtrConstant(0, DL));
2819 SDValue TryCancelResponse1 =
2820 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Cast,
2821 DAG.getIntPtrConstant(1, DL));
2822
2823 return DAG.getNode(Opcode, DL, N->getVTList(),
2824 {TryCancelResponse0, TryCancelResponse1});
2825}
2826
2828 SDNode *N = Op.getNode();
2829 SDLoc DL(N);
2830 SDValue F32Vec = N->getOperand(1);
2831 SDValue RBits = N->getOperand(2);
2832
2833 unsigned IntrinsicID = N->getConstantOperandVal(0);
2834
2835 // Extract the 4 float elements from the vector
2837 for (unsigned i = 0; i < 4; ++i)
2838 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, F32Vec,
2839 DAG.getIntPtrConstant(i, DL)));
2840
2842
2843 auto [OpCode, RetTy, CvtModeFlag] =
2844 [&]() -> std::tuple<unsigned, MVT::SimpleValueType, uint32_t> {
2845 switch (IntrinsicID) {
2846 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite:
2847 return {NVPTXISD::CVT_E4M3X4_F32X4_RS_SF, MVT::v4i8,
2848 CvtMode::RS | CvtMode::RELU_FLAG};
2849 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite:
2850 return {NVPTXISD::CVT_E4M3X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2851 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite:
2852 return {NVPTXISD::CVT_E5M2X4_F32X4_RS_SF, MVT::v4i8,
2853 CvtMode::RS | CvtMode::RELU_FLAG};
2854 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite:
2855 return {NVPTXISD::CVT_E5M2X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2856 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite:
2857 return {NVPTXISD::CVT_E2M3X4_F32X4_RS_SF, MVT::v4i8,
2858 CvtMode::RS | CvtMode::RELU_FLAG};
2859 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite:
2860 return {NVPTXISD::CVT_E2M3X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2861 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite:
2862 return {NVPTXISD::CVT_E3M2X4_F32X4_RS_SF, MVT::v4i8,
2863 CvtMode::RS | CvtMode::RELU_FLAG};
2864 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite:
2865 return {NVPTXISD::CVT_E3M2X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2866 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite:
2867 return {NVPTXISD::CVT_E2M1X4_F32X4_RS_SF, MVT::i16,
2868 CvtMode::RS | CvtMode::RELU_FLAG};
2869 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite:
2870 return {NVPTXISD::CVT_E2M1X4_F32X4_RS_SF, MVT::i16, CvtMode::RS};
2871 default:
2872 llvm_unreachable("unsupported/unhandled intrinsic");
2873 }
2874 }();
2875
2876 Ops.push_back(RBits);
2877 Ops.push_back(DAG.getConstant(CvtModeFlag, DL, MVT::i32));
2878
2879 return DAG.getNode(OpCode, DL, RetTy, Ops);
2880}
2881
2883 const unsigned Mode = [&]() {
2884 switch (Op->getConstantOperandVal(0)) {
2885 case Intrinsic::nvvm_prmt:
2887 case Intrinsic::nvvm_prmt_b4e:
2889 case Intrinsic::nvvm_prmt_ecl:
2891 case Intrinsic::nvvm_prmt_ecr:
2893 case Intrinsic::nvvm_prmt_f4e:
2895 case Intrinsic::nvvm_prmt_rc16:
2897 case Intrinsic::nvvm_prmt_rc8:
2899 default:
2900 llvm_unreachable("unsupported/unhandled intrinsic");
2901 }
2902 }();
2903 SDLoc DL(Op);
2904 SDValue A = Op->getOperand(1);
2905 SDValue B = Op.getNumOperands() == 4 ? Op.getOperand(2)
2906 : DAG.getConstant(0, DL, MVT::i32);
2907 SDValue Selector = (Op->op_end() - 1)->get();
2908 return getPRMT(A, B, Selector, DL, DAG, Mode);
2909}
2910
2912 switch (Op->getConstantOperandVal(1)) {
2913 default:
2914 return Op;
2915
2916 // These tcgen05 intrinsics return a v2i32, which is legal, so we have to
2917 // lower them through LowerOperation() instead of ReplaceNodeResults().
2918 case Intrinsic::nvvm_tcgen05_ld_16x64b_x2:
2919 case Intrinsic::nvvm_tcgen05_ld_16x128b_x1:
2920 case Intrinsic::nvvm_tcgen05_ld_32x32b_x2:
2921 if (auto Res = lowerTcgen05Ld(Op.getNode(), DAG))
2922 return DAG.getMergeValues({Res->first, Res->second}, SDLoc(Op));
2923 return SDValue();
2924
2925 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2:
2926 if (auto Res = lowerTcgen05Ld(Op.getNode(), DAG, /*HasOffset=*/true))
2927 return DAG.getMergeValues({Res->first, Res->second}, SDLoc(Op));
2928 return SDValue();
2929 }
2930}
2931
2933 switch (Op->getConstantOperandVal(0)) {
2934 default:
2935 return Op;
2936 case Intrinsic::nvvm_prmt:
2937 case Intrinsic::nvvm_prmt_b4e:
2938 case Intrinsic::nvvm_prmt_ecl:
2939 case Intrinsic::nvvm_prmt_ecr:
2940 case Intrinsic::nvvm_prmt_f4e:
2941 case Intrinsic::nvvm_prmt_rc16:
2942 case Intrinsic::nvvm_prmt_rc8:
2943 return lowerPrmtIntrinsic(Op, DAG);
2944 case Intrinsic::nvvm_internal_addrspace_wrap:
2945 return Op.getOperand(1);
2946 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_is_canceled:
2947 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_x:
2948 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y:
2949 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z:
2951 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite:
2952 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite:
2953 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite:
2954 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite:
2955 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite:
2956 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite:
2957 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite:
2958 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite:
2959 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite:
2960 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite:
2961 return lowerCvtRSIntrinsics(Op, DAG);
2962 }
2963}
2964
2965// In PTX 64-bit CTLZ and CTPOP are supported, but they return a 32-bit value.
2966// Lower these into a node returning the correct type which is zero-extended
2967// back to the correct size.
2969 SDValue V = Op->getOperand(0);
2970 assert(V.getValueType() == MVT::i64 &&
2971 "Unexpected CTLZ/CTPOP type to legalize");
2972
2973 SDLoc DL(Op);
2974 SDValue CT = DAG.getNode(Op->getOpcode(), DL, MVT::i32, V);
2975 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, CT, SDNodeFlags::NonNeg);
2976}
2977
2979 unsigned Opcode, SelectionDAG &DAG) {
2980 assert(A.getValueType() == MVT::i64 && B.getValueType() == MVT::i64);
2981
2982 const auto *AmtConst = dyn_cast<ConstantSDNode>(ShiftAmount);
2983 if (!AmtConst)
2984 return SDValue();
2985 const auto Amt = AmtConst->getZExtValue() & 63;
2986
2987 SDValue UnpackA =
2988 DAG.getNode(NVPTXISD::UNPACK_VECTOR, DL, {MVT::i32, MVT::i32}, A);
2989 SDValue UnpackB =
2990 DAG.getNode(NVPTXISD::UNPACK_VECTOR, DL, {MVT::i32, MVT::i32}, B);
2991
2992 // Arch is Little endiain: 0 = low bits, 1 = high bits
2993 SDValue ALo = UnpackA.getValue(0);
2994 SDValue AHi = UnpackA.getValue(1);
2995 SDValue BLo = UnpackB.getValue(0);
2996 SDValue BHi = UnpackB.getValue(1);
2997
2998 // The bitfeild consists of { AHi : ALo : BHi : BLo }
2999 //
3000 // * FSHL, Amt < 32 - The window will contain { AHi : ALo : BHi }
3001 // * FSHL, Amt >= 32 - The window will contain { ALo : BHi : BLo }
3002 // * FSHR, Amt < 32 - The window will contain { ALo : BHi : BLo }
3003 // * FSHR, Amt >= 32 - The window will contain { AHi : ALo : BHi }
3004 //
3005 // Note that Amt = 0 and Amt = 32 are special cases where 32-bit funnel shifts
3006 // are not needed at all. Amt = 0 is a no-op producing either A or B depending
3007 // on the direction. Amt = 32 can be implemented by a packing and unpacking
3008 // move to select and arrange the 32bit values. For simplicity, these cases
3009 // are not handled here explicitly and instead we rely on DAGCombiner to
3010 // remove the no-op funnel shifts we insert.
3011 auto [High, Mid, Low] = ((Opcode == ISD::FSHL) == (Amt < 32))
3012 ? std::make_tuple(AHi, ALo, BHi)
3013 : std::make_tuple(ALo, BHi, BLo);
3014
3015 SDValue NewAmt = DAG.getConstant(Amt & 31, DL, MVT::i32);
3016 SDValue RHi = DAG.getNode(Opcode, DL, MVT::i32, {High, Mid, NewAmt});
3017 SDValue RLo = DAG.getNode(Opcode, DL, MVT::i32, {Mid, Low, NewAmt});
3018
3019 return DAG.getNode(NVPTXISD::BUILD_VECTOR, DL, MVT::i64, {RLo, RHi});
3020}
3021
3023 return expandFSH64(Op->getOperand(0), Op->getOperand(1), Op->getOperand(2),
3024 SDLoc(Op), Op->getOpcode(), DAG);
3025}
3026
3028 unsigned Opcode = Op->getOpcode() == ISD::ROTL ? ISD::FSHL : ISD::FSHR;
3029 return expandFSH64(Op->getOperand(0), Op->getOperand(0), Op->getOperand(1),
3030 SDLoc(Op), Opcode, DAG);
3031}
3032
3034 // Lower (frem x, y) into (sub x, (mul (ftrunc (div x, y)) y)),
3035 // i.e. "poor man's fmod()". When y is infinite, x is returned. This matches
3036 // the semantics of LLVM's frem.
3037 SDLoc DL(Op);
3038 SDValue X = Op->getOperand(0);
3039 SDValue Y = Op->getOperand(1);
3040 EVT Ty = Op.getValueType();
3041 SDNodeFlags Flags = Op->getFlags();
3042
3043 SDValue Div = DAG.getNode(ISD::FDIV, DL, Ty, X, Y, Flags);
3044 SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, Ty, Div, Flags);
3045 SDValue Mul = DAG.getNode(ISD::FMUL, DL, Ty, Trunc, Y,
3047 SDValue Sub = DAG.getNode(ISD::FSUB, DL, Ty, X, Mul,
3049
3050 if (Flags.hasNoInfs())
3051 return Sub;
3052
3053 // If Y is infinite, return X
3054 SDValue AbsY = DAG.getNode(ISD::FABS, DL, Ty, Y);
3055 SDValue Inf =
3056 DAG.getConstantFP(APFloat::getInf(Ty.getFltSemantics()), DL, Ty);
3057 SDValue IsInf = DAG.getSetCC(DL, MVT::i1, AbsY, Inf, ISD::SETEQ);
3058 return DAG.getSelect(DL, Ty, IsInf, X, Sub);
3059}
3060
3062 assert(Op.getValueType() == MVT::i1 && "Custom lowering enabled only for i1");
3063
3064 SDValue Cond = Op->getOperand(0);
3065 SDValue TrueVal = Op->getOperand(1);
3066 SDValue FalseVal = Op->getOperand(2);
3067 SDLoc DL(Op);
3068
3069 // If both operands are truncated, we push the select through the truncates.
3070 if (TrueVal.getOpcode() == ISD::TRUNCATE &&
3071 FalseVal.getOpcode() == ISD::TRUNCATE) {
3072 TrueVal = TrueVal.getOperand(0);
3073 FalseVal = FalseVal.getOperand(0);
3074
3075 EVT VT = TrueVal.getSimpleValueType().bitsLE(FalseVal.getSimpleValueType())
3076 ? TrueVal.getValueType()
3077 : FalseVal.getValueType();
3078 TrueVal = DAG.getAnyExtOrTrunc(TrueVal, DL, VT);
3079 FalseVal = DAG.getAnyExtOrTrunc(FalseVal, DL, VT);
3080 SDValue Select = DAG.getSelect(DL, VT, Cond, TrueVal, FalseVal);
3081 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Select);
3082 }
3083
3084 // Otherwise, expand the select into a series of logical operations. These
3085 // often can be folded into other operations either by us or ptxas.
3086 TrueVal = DAG.getFreeze(TrueVal);
3087 FalseVal = DAG.getFreeze(FalseVal);
3088 SDValue And1 = DAG.getNode(ISD::AND, DL, MVT::i1, Cond, TrueVal);
3089 SDValue NotCond = DAG.getNOT(DL, Cond, MVT::i1);
3090 SDValue And2 = DAG.getNode(ISD::AND, DL, MVT::i1, NotCond, FalseVal);
3091 SDValue Or = DAG.getNode(ISD::OR, DL, MVT::i1, And1, And2);
3092 return Or;
3093}
3094
3095SDValue
3097 switch (Op.getOpcode()) {
3098 case ISD::RETURNADDR:
3099 return SDValue();
3100 case ISD::FRAMEADDR:
3101 return SDValue();
3102 case ISD::ADDRSPACECAST:
3103 return LowerADDRSPACECAST(Op, DAG);
3105 return lowerIntrinsicWChain(Op, DAG);
3107 return lowerIntrinsicWOChain(Op, DAG);
3109 return lowerIntrinsicVoid(Op, DAG);
3110 case ISD::BUILD_VECTOR:
3111 return LowerBUILD_VECTOR(Op, DAG);
3112 case ISD::BITCAST:
3113 return LowerBITCAST(Op, DAG);
3115 return Op;
3117 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
3119 return LowerINSERT_VECTOR_ELT(Op, DAG);
3121 return LowerVECTOR_SHUFFLE(Op, DAG);
3123 return LowerCONCAT_VECTORS(Op, DAG);
3124 case ISD::VECREDUCE_FMAX:
3125 case ISD::VECREDUCE_FMIN:
3126 case ISD::VECREDUCE_FMAXIMUM:
3127 case ISD::VECREDUCE_FMINIMUM:
3128 return LowerVECREDUCE(Op, DAG);
3129 case ISD::STORE:
3130 return LowerSTORE(Op, DAG);
3131 case ISD::LOAD:
3132 return LowerLOAD(Op, DAG);
3133 case ISD::SHL_PARTS:
3134 return LowerShiftLeftParts(Op, DAG);
3135 case ISD::SRA_PARTS:
3136 case ISD::SRL_PARTS:
3137 return LowerShiftRightParts(Op, DAG);
3138 case ISD::SELECT:
3139 return lowerSELECT(Op, DAG);
3140 case ISD::FROUND:
3141 return LowerFROUND(Op, DAG);
3142 case ISD::FCOPYSIGN:
3143 return LowerFCOPYSIGN(Op, DAG);
3144 case ISD::SINT_TO_FP:
3145 case ISD::UINT_TO_FP:
3146 return LowerINT_TO_FP(Op, DAG);
3147 case ISD::FP_TO_SINT:
3148 case ISD::FP_TO_UINT:
3149 return LowerFP_TO_INT(Op, DAG);
3150 case ISD::FP_ROUND:
3151 return LowerFP_ROUND(Op, DAG);
3152 case ISD::FP_EXTEND:
3153 return LowerFP_EXTEND(Op, DAG);
3154 case ISD::BR_JT:
3155 return LowerBR_JT(Op, DAG);
3156 case ISD::VAARG:
3157 return LowerVAARG(Op, DAG);
3158 case ISD::VASTART:
3159 return LowerVASTART(Op, DAG);
3160 case ISD::FSHL:
3161 case ISD::FSHR:
3162 return lowerFSH(Op, DAG);
3163 case ISD::ROTL:
3164 case ISD::ROTR:
3165 return lowerROT(Op, DAG);
3166 case ISD::ABS:
3167 case ISD::SMIN:
3168 case ISD::SMAX:
3169 case ISD::UMIN:
3170 case ISD::UMAX:
3171 case ISD::ADD:
3172 case ISD::SUB:
3173 case ISD::MUL:
3174 case ISD::SHL:
3175 case ISD::SREM:
3176 case ISD::UREM:
3177 return LowerVectorArith(Op, DAG);
3178 case ISD::DYNAMIC_STACKALLOC:
3179 return LowerDYNAMIC_STACKALLOC(Op, DAG);
3180 case ISD::STACKRESTORE:
3181 return LowerSTACKRESTORE(Op, DAG);
3182 case ISD::STACKSAVE:
3183 return LowerSTACKSAVE(Op, DAG);
3184 case ISD::CopyToReg:
3185 return LowerCopyToReg_128(Op, DAG);
3186 case ISD::FADD:
3187 case ISD::FSUB:
3188 case ISD::FMUL:
3189 // Used only for bf16 on SM80, where we select fma for non-ftz operation
3190 return PromoteBinOpIfF32FTZ(Op, DAG);
3191 case ISD::CTPOP:
3192 case ISD::CTLZ:
3193 return lowerCTLZCTPOP(Op, DAG);
3194 case ISD::FREM:
3195 return lowerFREM(Op, DAG);
3196
3197 default:
3198 llvm_unreachable("Custom lowering not defined for operation");
3199 }
3200}
3201
3202SDValue NVPTXTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
3203 SDLoc DL(Op);
3204 SDValue Chain = Op.getOperand(0);
3205 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
3206 SDValue Index = Op.getOperand(2);
3207
3208 unsigned JId = JT->getIndex();
3210 ArrayRef<MachineBasicBlock *> MBBs = MJTI->getJumpTables()[JId].MBBs;
3211
3212 SDValue IdV = DAG.getConstant(JId, DL, MVT::i32);
3213
3214 // Generate BrxStart node
3215 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
3216 Chain = DAG.getNode(NVPTXISD::BrxStart, DL, VTs, Chain, IdV);
3217
3218 // Generate BrxItem nodes
3219 assert(!MBBs.empty());
3220 for (MachineBasicBlock *MBB : MBBs.drop_back())
3221 Chain = DAG.getNode(NVPTXISD::BrxItem, DL, VTs, Chain.getValue(0),
3222 DAG.getBasicBlock(MBB), Chain.getValue(1));
3223
3224 // Generate BrxEnd nodes
3225 SDValue EndOps[] = {Chain.getValue(0), DAG.getBasicBlock(MBBs.back()), Index,
3226 IdV, Chain.getValue(1)};
3227 SDValue BrxEnd = DAG.getNode(NVPTXISD::BrxEnd, DL, MVT::Other, EndOps);
3228
3229 return BrxEnd;
3230}
3231
3232// This will prevent AsmPrinter from trying to print the jump tables itself.
3236
3237SDValue NVPTXTargetLowering::LowerADDRSPACECAST(SDValue Op,
3238 SelectionDAG &DAG) const {
3240 unsigned SrcAS = N->getSrcAddressSpace();
3241 unsigned DestAS = N->getDestAddressSpace();
3242 if (SrcAS != llvm::ADDRESS_SPACE_GENERIC &&
3243 DestAS != llvm::ADDRESS_SPACE_GENERIC) {
3244 // Shared and SharedCluster can be converted to each other through generic
3245 // space
3246 if ((SrcAS == llvm::ADDRESS_SPACE_SHARED &&
3249 DestAS == llvm::ADDRESS_SPACE_SHARED)) {
3250 SDLoc DL(Op.getNode());
3251 const MVT GenerictVT =
3253 SDValue GenericConversion = DAG.getAddrSpaceCast(
3254 DL, GenerictVT, Op.getOperand(0), SrcAS, ADDRESS_SPACE_GENERIC);
3255 SDValue SharedClusterConversion =
3256 DAG.getAddrSpaceCast(DL, Op.getValueType(), GenericConversion,
3257 ADDRESS_SPACE_GENERIC, DestAS);
3258 return SharedClusterConversion;
3259 }
3260
3261 return DAG.getUNDEF(Op.getValueType());
3262 }
3263
3264 return Op;
3265}
3266
3267// This function is almost a copy of SelectionDAG::expandVAArg().
3268// The only diff is that this one produces loads from local address space.
3269SDValue NVPTXTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3270 const TargetLowering *TLI = STI.getTargetLowering();
3271 SDLoc DL(Op);
3272
3273 SDNode *Node = Op.getNode();
3274 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3275 EVT VT = Node->getValueType(0);
3276 auto *Ty = VT.getTypeForEVT(*DAG.getContext());
3277 SDValue Tmp1 = Node->getOperand(0);
3278 SDValue Tmp2 = Node->getOperand(1);
3279 const MaybeAlign MA(Node->getConstantOperandVal(3));
3280
3281 SDValue VAListLoad = DAG.getLoad(TLI->getPointerTy(DAG.getDataLayout()), DL,
3282 Tmp1, Tmp2, MachinePointerInfo(V));
3283 SDValue VAList = VAListLoad;
3284
3285 if (MA && *MA > TLI->getMinStackArgumentAlignment()) {
3286 VAList = DAG.getNode(
3287 ISD::ADD, DL, VAList.getValueType(), VAList,
3288 DAG.getConstant(MA->value() - 1, DL, VAList.getValueType()));
3289
3290 VAList = DAG.getNode(ISD::AND, DL, VAList.getValueType(), VAList,
3291 DAG.getSignedConstant(-(int64_t)MA->value(), DL,
3292 VAList.getValueType()));
3293 }
3294
3295 // Increment the pointer, VAList, to the next vaarg
3296 Tmp1 = DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
3298 DL, VAList.getValueType()));
3299
3300 // Store the incremented VAList to the legalized pointer
3301 Tmp1 = DAG.getStore(VAListLoad.getValue(1), DL, Tmp1, Tmp2,
3302 MachinePointerInfo(V));
3303
3304 const Value *SrcV = Constant::getNullValue(
3306
3307 // Load the actual argument out of the pointer VAList
3308 return DAG.getLoad(VT, DL, Tmp1, VAList, MachinePointerInfo(SrcV));
3309}
3310
3311SDValue NVPTXTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3312 const TargetLowering *TLI = STI.getTargetLowering();
3313 SDLoc DL(Op);
3314 EVT PtrVT = TLI->getPointerTy(DAG.getDataLayout());
3315
3316 // Store the address of unsized array <function>_vararg[] in the ap object.
3317 SDValue VAReg = getParamSymbol(DAG, /* vararg */ -1, PtrVT);
3318
3319 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3320 return DAG.getStore(Op.getOperand(0), DL, VAReg, Op.getOperand(1),
3321 MachinePointerInfo(SV));
3322}
3323
3324/// replaceLoadVector - Convert vector loads into multi-output scalar loads.
3325static std::optional<std::pair<SDValue, SDValue>>
3328 const EVT ResVT = LD->getValueType(0);
3329 const EVT MemVT = LD->getMemoryVT();
3330
3331 // If we're doing sign/zero extension as part of the load, avoid lowering to
3332 // a LoadV node. TODO: consider relaxing this restriction.
3333 if (ResVT != MemVT)
3334 return std::nullopt;
3335
3336 const auto NumEltsAndEltVT =
3337 getVectorLoweringShape(ResVT, STI, LD->getAddressSpace());
3338 if (!NumEltsAndEltVT)
3339 return std::nullopt;
3340 const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
3341
3342 Align Alignment = LD->getAlign();
3343 const auto &TD = DAG.getDataLayout();
3344 Align PrefAlign = TD.getPrefTypeAlign(MemVT.getTypeForEVT(*DAG.getContext()));
3345 if (Alignment < PrefAlign) {
3346 // This load is not sufficiently aligned, so bail out and let this vector
3347 // load be scalarized. Note that we may still be able to emit smaller
3348 // vector loads. For example, if we are loading a <4 x float> with an
3349 // alignment of 8, this check will fail but the legalizer will try again
3350 // with 2 x <2 x float>, which will succeed with an alignment of 8.
3351 return std::nullopt;
3352 }
3353
3354 // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
3355 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
3356 // loaded type to i16 and propagate the "real" type as the memory type.
3357 const MVT LoadEltVT = (EltVT.getSizeInBits() < 16) ? MVT::i16 : EltVT;
3358
3359 unsigned Opcode;
3360 switch (NumElts) {
3361 default:
3362 return std::nullopt;
3363 case 2:
3364 Opcode = NVPTXISD::LoadV2;
3365 break;
3366 case 4:
3367 Opcode = NVPTXISD::LoadV4;
3368 break;
3369 case 8:
3370 Opcode = NVPTXISD::LoadV8;
3371 break;
3372 }
3373 auto ListVTs = SmallVector<EVT, 9>(NumElts, LoadEltVT);
3374 ListVTs.push_back(MVT::Other);
3375 SDVTList LdResVTs = DAG.getVTList(ListVTs);
3376
3377 SDLoc DL(LD);
3378
3379 // Copy regular operands
3380 SmallVector<SDValue, 8> OtherOps(LD->ops());
3381
3382 // The select routine does not have access to the LoadSDNode instance, so
3383 // pass along the extension information
3384 OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType(), DL));
3385
3386 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps, MemVT,
3387 LD->getMemOperand());
3388
3389 SmallVector<SDValue> ScalarRes;
3390 if (EltVT.isVector()) {
3392 assert(NumElts * EltVT.getVectorNumElements() ==
3393 ResVT.getVectorNumElements());
3394 // Generate EXTRACT_VECTOR_ELTs to split v2[i,f,bf]16/v4i8 subvectors back
3395 // into individual elements.
3396 for (const unsigned I : llvm::seq(NumElts)) {
3397 SDValue SubVector = NewLD.getValue(I);
3398 DAG.ExtractVectorElements(SubVector, ScalarRes);
3399 }
3400 } else {
3401 for (const unsigned I : llvm::seq(NumElts)) {
3402 SDValue Res = NewLD.getValue(I);
3403 if (LoadEltVT != EltVT)
3404 Res = DAG.getNode(ISD::TRUNCATE, DL, EltVT, Res);
3405 ScalarRes.push_back(Res);
3406 }
3407 }
3408
3409 SDValue LoadChain = NewLD.getValue(NumElts);
3410
3411 const MVT BuildVecVT =
3412 MVT::getVectorVT(EltVT.getScalarType(), ScalarRes.size());
3413 SDValue BuildVec = DAG.getBuildVector(BuildVecVT, DL, ScalarRes);
3414 SDValue LoadValue = DAG.getBitcast(ResVT, BuildVec);
3415
3416 return {{LoadValue, LoadChain}};
3417}
3418
3421 const NVPTXSubtarget &STI) {
3422 if (auto Res = replaceLoadVector(N, DAG, STI))
3423 Results.append({Res->first, Res->second});
3424}
3425
3427 const NVPTXSubtarget &STI) {
3428 if (auto Res = replaceLoadVector(N, DAG, STI))
3429 return DAG.getMergeValues({Res->first, Res->second}, SDLoc(N));
3430 return SDValue();
3431}
3432
3433// v = ld i1* addr
3434// =>
3435// v1 = ld i8* addr (-> i16)
3436// v = trunc i16 to i1
3438 SDLoc dl(LD);
3439 assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
3440 assert(LD->getValueType(0) == MVT::i1 && "Custom lowering for i1 load only");
3441 SDValue newLD = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i16, LD->getChain(),
3442 LD->getBasePtr(), LD->getPointerInfo(),
3443 MVT::i8, LD->getAlign(),
3444 LD->getMemOperand()->getFlags());
3445 SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
3446 // The legalizer (the caller) is expecting two values from the legalized
3447 // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
3448 // in LegalizeDAG.cpp which also uses MergeValues.
3449 return DAG.getMergeValues({result, LD->getChain()}, dl);
3450}
3451
3452SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
3453 LoadSDNode *LD = cast<LoadSDNode>(Op);
3454
3455 if (Op.getValueType() == MVT::i1)
3456 return lowerLOADi1(LD, DAG);
3457
3458 // To improve CodeGen we'll legalize any-extend loads to zext loads. This is
3459 // how they'll be lowered in ISel anyway, and by doing this a little earlier
3460 // we allow for more DAG combine opportunities.
3461 if (LD->getExtensionType() == ISD::EXTLOAD) {
3462 assert(LD->getValueType(0).isInteger() && LD->getMemoryVT().isInteger() &&
3463 "Unexpected fpext-load");
3464 return DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(Op), Op.getValueType(),
3465 LD->getChain(), LD->getBasePtr(), LD->getMemoryVT(),
3466 LD->getMemOperand());
3467 }
3468
3469 llvm_unreachable("Unexpected custom lowering for load");
3470}
3471
3473 const NVPTXSubtarget &STI) {
3474 MemSDNode *N = cast<MemSDNode>(Op.getNode());
3475 SDValue Val = N->getOperand(1);
3476 SDLoc DL(N);
3477 const EVT ValVT = Val.getValueType();
3478 const EVT MemVT = N->getMemoryVT();
3479
3480 // If we're truncating as part of the store, avoid lowering to a StoreV node.
3481 // TODO: consider relaxing this restriction.
3482 if (ValVT != MemVT)
3483 return SDValue();
3484
3485 const auto NumEltsAndEltVT =
3486 getVectorLoweringShape(ValVT, STI, N->getAddressSpace());
3487 if (!NumEltsAndEltVT)
3488 return SDValue();
3489 const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
3490
3491 const DataLayout &TD = DAG.getDataLayout();
3492
3493 Align Alignment = N->getAlign();
3494 Align PrefAlign = TD.getPrefTypeAlign(ValVT.getTypeForEVT(*DAG.getContext()));
3495 if (Alignment < PrefAlign) {
3496 // This store is not sufficiently aligned, so bail out and let this vector
3497 // store be scalarized. Note that we may still be able to emit smaller
3498 // vector stores. For example, if we are storing a <4 x float> with an
3499 // alignment of 8, this check will fail but the legalizer will try again
3500 // with 2 x <2 x float>, which will succeed with an alignment of 8.
3501 return SDValue();
3502 }
3503
3504 unsigned Opcode;
3505 switch (NumElts) {
3506 default:
3507 return SDValue();
3508 case 2:
3509 Opcode = NVPTXISD::StoreV2;
3510 break;
3511 case 4:
3512 Opcode = NVPTXISD::StoreV4;
3513 break;
3514 case 8:
3515 Opcode = NVPTXISD::StoreV8;
3516 break;
3517 }
3518
3520
3521 // First is the chain
3522 Ops.push_back(N->getOperand(0));
3523
3524 // Then the split values
3525 if (EltVT.isVector()) {
3527 assert(NumElts * EltVT.getVectorNumElements() ==
3528 ValVT.getVectorNumElements());
3529 // Combine individual elements into v2[i,f,bf]16/v4i8 subvectors to be
3530 // stored as b32s
3531 const unsigned NumEltsPerSubVector = EltVT.getVectorNumElements();
3532 for (const unsigned I : llvm::seq(NumElts)) {
3533 SmallVector<SDValue, 4> SubVectorElts;
3534 DAG.ExtractVectorElements(Val, SubVectorElts, I * NumEltsPerSubVector,
3535 NumEltsPerSubVector);
3536 Ops.push_back(DAG.getBuildVector(EltVT, DL, SubVectorElts));
3537 }
3538 } else {
3539 SDValue V = DAG.getBitcast(MVT::getVectorVT(EltVT, NumElts), Val);
3540 for (const unsigned I : llvm::seq(NumElts)) {
3541 SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, V,
3542 DAG.getIntPtrConstant(I, DL));
3543
3544 // Since StoreV2 is a target node, we cannot rely on DAG type
3545 // legalization. Therefore, we must ensure the type is legal. For i1 and
3546 // i8, we set the stored type to i16 and propagate the "real" type as the
3547 // memory type.
3548 if (EltVT.getSizeInBits() < 16)
3549 ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
3550 Ops.push_back(ExtVal);
3551 }
3552 }
3553
3554 // Then any remaining arguments
3555 Ops.append(N->op_begin() + 2, N->op_end());
3556
3557 SDValue NewSt =
3558 DAG.getMemIntrinsicNode(Opcode, DL, DAG.getVTList(MVT::Other), Ops,
3559 N->getMemoryVT(), N->getMemOperand());
3560
3561 // return DCI.CombineTo(N, NewSt, true);
3562 return NewSt;
3563}
3564
3565SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
3566 StoreSDNode *Store = cast<StoreSDNode>(Op);
3567 EVT VT = Store->getMemoryVT();
3568
3569 if (VT == MVT::i1)
3570 return LowerSTOREi1(Op, DAG);
3571
3572 // Lower store of any other vector type, including v2f32 as we want to break
3573 // it apart since this is not a widely-supported type.
3574 return lowerSTOREVector(Op, DAG, STI);
3575}
3576
3577// st i1 v, addr
3578// =>
3579// v1 = zxt v to i16
3580// st.u8 i16, addr
3581SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
3582 SDNode *Node = Op.getNode();
3583 SDLoc dl(Node);
3584 StoreSDNode *ST = cast<StoreSDNode>(Node);
3585 SDValue Tmp1 = ST->getChain();
3586 SDValue Tmp2 = ST->getBasePtr();
3587 SDValue Tmp3 = ST->getValue();
3588 assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
3589 Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
3590 SDValue Result =
3591 DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), MVT::i8,
3592 ST->getAlign(), ST->getMemOperand()->getFlags());
3593 return Result;
3594}
3595
3596SDValue NVPTXTargetLowering::LowerCopyToReg_128(SDValue Op,
3597 SelectionDAG &DAG) const {
3598 // Change the CopyToReg to take in two 64-bit operands instead of a 128-bit
3599 // operand so that it can pass the legalization.
3600
3601 assert(Op.getOperand(1).getValueType() == MVT::i128 &&
3602 "Custom lowering for 128-bit CopyToReg only");
3603
3604 SDNode *Node = Op.getNode();
3605 SDLoc DL(Node);
3606
3607 SDValue Cast = DAG.getBitcast(MVT::v2i64, Op->getOperand(2));
3608 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Cast,
3609 DAG.getIntPtrConstant(0, DL));
3610 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Cast,
3611 DAG.getIntPtrConstant(1, DL));
3612
3614 SmallVector<EVT, 3> ResultsType(Node->values());
3615
3616 NewOps[0] = Op->getOperand(0); // Chain
3617 NewOps[1] = Op->getOperand(1); // Dst Reg
3618 NewOps[2] = Lo; // Lower 64-bit
3619 NewOps[3] = Hi; // Higher 64-bit
3620 if (Op.getNumOperands() == 4)
3621 NewOps[4] = Op->getOperand(3); // Glue if exists
3622
3623 return DAG.getNode(ISD::CopyToReg, DL, ResultsType, NewOps);
3624}
3625
3626unsigned NVPTXTargetLowering::getNumRegisters(
3627 LLVMContext &Context, EVT VT,
3628 std::optional<MVT> RegisterVT = std::nullopt) const {
3629 if (VT == MVT::i128 && RegisterVT == MVT::i128)
3630 return 1;
3631 return TargetLoweringBase::getNumRegisters(Context, VT, RegisterVT);
3632}
3633
3634bool NVPTXTargetLowering::splitValueIntoRegisterParts(
3635 SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
3636 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
3637 if (Val.getValueType() == MVT::i128 && NumParts == 1) {
3638 Parts[0] = Val;
3639 return true;
3640 }
3641 return false;
3642}
3643
3644// This creates target external symbol for a function parameter.
3645// Name of the symbol is composed from its index and the function name.
3646// Negative index corresponds to special parameter (unsized array) used for
3647// passing variable arguments.
3648SDValue NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int I,
3649 EVT T) const {
3650 StringRef SavedStr = nvTM->getStrPool().save(
3652 return DAG.getExternalSymbol(SavedStr.data(), T);
3653}
3654
3655SDValue NVPTXTargetLowering::getCallParamSymbol(SelectionDAG &DAG, int I,
3656 EVT T) const {
3657 const StringRef SavedStr = nvTM->getStrPool().save("param" + Twine(I));
3658 return DAG.getExternalSymbol(SavedStr.data(), T);
3659}
3660
3662 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3663 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3664 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3665 const DataLayout &DL = DAG.getDataLayout();
3666 LLVMContext &Ctx = *DAG.getContext();
3667 auto PtrVT = getPointerTy(DAG.getDataLayout());
3668
3669 const Function &F = DAG.getMachineFunction().getFunction();
3670
3671 SDValue Root = DAG.getRoot();
3672 SmallVector<SDValue, 16> OutChains;
3673
3674 // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
3675 // Ins.size() will be larger
3676 // * if there is an aggregate argument with multiple fields (each field
3677 // showing up separately in Ins)
3678 // * if there is a vector argument with more than typical vector-length
3679 // elements (generally if more than 4) where each vector element is
3680 // individually present in Ins.
3681 // So a different index should be used for indexing into Ins.
3682 // See similar issue in LowerCall.
3683
3684 auto AllIns = ArrayRef(Ins);
3685 for (const auto &Arg : F.args()) {
3686 const auto ArgIns = AllIns.take_while(
3687 [&](auto I) { return I.OrigArgIndex == Arg.getArgNo(); });
3688 AllIns = AllIns.drop_front(ArgIns.size());
3689
3690 Type *Ty = Arg.getType();
3691
3692 if (ArgIns.empty())
3693 report_fatal_error("Empty parameter types are not supported");
3694
3695 if (Arg.use_empty()) {
3696 // argument is dead
3697 for (const auto &In : ArgIns) {
3698 assert(!In.Used && "Arg.use_empty() is true but Arg is used?");
3699 InVals.push_back(DAG.getUNDEF(In.VT));
3700 }
3701 continue;
3702 }
3703
3704 SDValue ArgSymbol = getParamSymbol(DAG, Arg.getArgNo(), PtrVT);
3705
3706 // In the following cases, assign a node order of "i+1"
3707 // to newly created nodes. The SDNodes for params have to
3708 // appear in the same order as their order of appearance
3709 // in the original function. "i+1" holds that order.
3710 if (Arg.hasByValAttr()) {
3711 // Param has ByVal attribute
3712 // Return MoveParam(param symbol).
3713 // Ideally, the param symbol can be returned directly,
3714 // but when SDNode builder decides to use it in a CopyToReg(),
3715 // machine instruction fails because TargetExternalSymbol
3716 // (not lowered) is target dependent, and CopyToReg assumes
3717 // the source is lowered.
3718 assert(ArgIns.size() == 1 && "ByVal argument must be a pointer");
3719 const auto &ByvalIn = ArgIns[0];
3720 assert(getValueType(DL, Ty) == ByvalIn.VT &&
3721 "Ins type did not match function type");
3722 assert(ByvalIn.VT == PtrVT && "ByVal argument must be a pointer");
3723
3724 SDValue P;
3725 if (isKernelFunction(F)) {
3726 P = ArgSymbol;
3727 P.getNode()->setIROrder(Arg.getArgNo() + 1);
3728 } else {
3729 P = DAG.getNode(NVPTXISD::MoveParam, dl, ByvalIn.VT, ArgSymbol);
3730 P.getNode()->setIROrder(Arg.getArgNo() + 1);
3731 P = DAG.getAddrSpaceCast(dl, ByvalIn.VT, P, ADDRESS_SPACE_LOCAL,
3733 }
3734 InVals.push_back(P);
3735 } else {
3738 ComputePTXValueVTs(*this, DL, Ctx, CallConv, Ty, VTs, Offsets);
3739 assert(VTs.size() == ArgIns.size() && "Size mismatch");
3740 assert(VTs.size() == Offsets.size() && "Size mismatch");
3741
3742 const Align ArgAlign = getFunctionArgumentAlignment(
3743 &F, Ty, Arg.getArgNo() + AttributeList::FirstArgIndex, DL);
3744
3745 unsigned I = 0;
3746 const auto VI = VectorizePTXValueVTs(VTs, Offsets, ArgAlign);
3747 for (const unsigned NumElts : VI) {
3748 // i1 is loaded/stored as i8
3749 const EVT LoadVT = VTs[I] == MVT::i1 ? MVT::i8 : VTs[I];
3750 const EVT VecVT = getVectorizedVT(LoadVT, NumElts, Ctx);
3751
3752 SDValue VecAddr = DAG.getObjectPtrOffset(
3753 dl, ArgSymbol, TypeSize::getFixed(Offsets[I]));
3754
3755 const Align PartAlign = commonAlignment(ArgAlign, Offsets[I]);
3756 SDValue P =
3757 DAG.getLoad(VecVT, dl, Root, VecAddr,
3761 P.getNode()->setIROrder(Arg.getArgNo() + 1);
3762 for (const unsigned J : llvm::seq(NumElts)) {
3763 SDValue Elt = getExtractVectorizedValue(P, J, LoadVT, dl, DAG);
3764
3765 Elt = correctParamType(Elt, ArgIns[I + J].VT, ArgIns[I + J].Flags,
3766 DAG, dl);
3767 InVals.push_back(Elt);
3768 }
3769 I += NumElts;
3770 }
3771 }
3772 }
3773
3774 if (!OutChains.empty())
3775 DAG.setRoot(DAG.getTokenFactor(dl, OutChains));
3776
3777 return Chain;
3778}
3779
3780SDValue
3782 bool isVarArg,
3784 const SmallVectorImpl<SDValue> &OutVals,
3785 const SDLoc &dl, SelectionDAG &DAG) const {
3786 const Function &F = DAG.getMachineFunction().getFunction();
3787 Type *RetTy = F.getReturnType();
3788
3789 if (RetTy->isVoidTy()) {
3790 assert(OutVals.empty() && Outs.empty() && "Return value expected for void");
3791 return DAG.getNode(NVPTXISD::RET_GLUE, dl, MVT::Other, Chain);
3792 }
3793
3794 const DataLayout &DL = DAG.getDataLayout();
3795 LLVMContext &Ctx = *DAG.getContext();
3796
3797 const SDValue RetSymbol = DAG.getExternalSymbol("func_retval0", MVT::i32);
3798 const auto RetAlign = getFunctionParamOptimizedAlign(&F, RetTy, DL);
3799
3800 // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
3801 // 32-bits are sign extended or zero extended, depending on whether
3802 // they are signed or unsigned types.
3803 const bool ExtendIntegerRetVal =
3804 RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
3805
3808 ComputePTXValueVTs(*this, DL, Ctx, CallConv, RetTy, VTs, Offsets);
3809 assert(VTs.size() == OutVals.size() && "Bad return value decomposition");
3810
3811 const auto GetRetVal = [&](unsigned I) -> SDValue {
3812 SDValue RetVal = OutVals[I];
3814 RetVal.getValueType() &&
3815 "OutVal type should always be legal");
3816
3817 const EVT VTI = promoteScalarIntegerPTX(VTs[I]);
3818 const EVT StoreVT =
3819 ExtendIntegerRetVal ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
3820 return correctParamType(RetVal, StoreVT, Outs[I].Flags, DAG, dl);
3821 };
3822
3823 unsigned I = 0;
3824 const auto VI = VectorizePTXValueVTs(VTs, Offsets, RetAlign);
3825 for (const unsigned NumElts : VI) {
3826 const MaybeAlign CurrentAlign = ExtendIntegerRetVal
3827 ? MaybeAlign(std::nullopt)
3828 : commonAlignment(RetAlign, Offsets[I]);
3829
3831 NumElts, dl, DAG, [&](unsigned K) { return GetRetVal(I + K); });
3832
3833 SDValue Ptr =
3834 DAG.getObjectPtrOffset(dl, RetSymbol, TypeSize::getFixed(Offsets[I]));
3835
3836 Chain = DAG.getStore(Chain, dl, Val, Ptr,
3838
3839 I += NumElts;
3840 }
3841
3842 return DAG.getNode(NVPTXISD::RET_GLUE, dl, MVT::Other, Chain);
3843}
3844
3846 SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
3847 SelectionDAG &DAG) const {
3848 if (Constraint.size() > 1)
3849 return;
3851}
3852
3853// llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
3854// TgtMemIntrinsic
3855// because we need the information that is only available in the "Value" type
3856// of destination
3857// pointer. In particular, the address space information.
3859 IntrinsicInfo &Info, const CallInst &I,
3860 MachineFunction &MF, unsigned Intrinsic) const {
3861 switch (Intrinsic) {
3862 default:
3863 return false;
3864 case Intrinsic::nvvm_match_all_sync_i32p:
3865 case Intrinsic::nvvm_match_all_sync_i64p:
3866 Info.opc = ISD::INTRINSIC_W_CHAIN;
3867 // memVT is bogus. These intrinsics have IntrInaccessibleMemOnly attribute
3868 // in order to model data exchange with other threads, but perform no real
3869 // memory accesses.
3870 Info.memVT = MVT::i1;
3871
3872 // Our result depends on both our and other thread's arguments.
3874 return true;
3875 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col:
3876 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row:
3877 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col_stride:
3878 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row_stride:
3879 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col:
3880 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row:
3881 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col_stride:
3882 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row_stride:
3883 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col:
3884 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row:
3885 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col_stride:
3886 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row_stride:
3887 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col:
3888 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row:
3889 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col_stride:
3890 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row_stride:
3891 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col:
3892 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row:
3893 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col_stride:
3894 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row_stride:
3895 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col:
3896 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row:
3897 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col_stride:
3898 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row_stride: {
3899 Info.opc = ISD::INTRINSIC_W_CHAIN;
3900 Info.memVT = MVT::v8f16;
3901 Info.ptrVal = I.getArgOperand(0);
3902 Info.offset = 0;
3903 Info.flags = MachineMemOperand::MOLoad;
3904 Info.align = Align(16);
3905 return true;
3906 }
3907 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col:
3908 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col_stride:
3909 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col_stride:
3910 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col:
3911 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row:
3912 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row_stride:
3913 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row_stride:
3914 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row:
3915 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col:
3916 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col_stride:
3917 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row:
3918 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row_stride:
3919 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col:
3920 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col_stride:
3921 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col_stride:
3922 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col:
3923 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row:
3924 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row_stride:
3925 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row_stride:
3926 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row:
3927 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col:
3928 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col_stride:
3929 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row:
3930 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row_stride: {
3931 Info.opc = ISD::INTRINSIC_W_CHAIN;
3932 Info.memVT = MVT::v2i32;
3933 Info.ptrVal = I.getArgOperand(0);
3934 Info.offset = 0;
3935 Info.flags = MachineMemOperand::MOLoad;
3936 Info.align = Align(8);
3937 return true;
3938 }
3939
3940 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col:
3941 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col_stride:
3942 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col_stride:
3943 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col:
3944 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row:
3945 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row_stride:
3946 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row_stride:
3947 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row:
3948 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col:
3949 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col_stride:
3950 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row:
3951 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row_stride:
3952 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col:
3953 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col_stride:
3954 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row:
3955 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row_stride:
3956
3957 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col:
3958 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col_stride:
3959 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col_stride:
3960 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col:
3961 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row:
3962 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row_stride:
3963 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row_stride:
3964 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row:
3965 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col:
3966 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col_stride:
3967 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row:
3968 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row_stride:
3969 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col:
3970 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col_stride:
3971 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row:
3972 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row_stride:
3973 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_b16:
3974 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_trans_b16:
3975 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8:
3976 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8x16_b4x16_p64:
3977 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8x16_b6x16_p32:
3978 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x4_b8x16_b4x16_p64:
3979 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x4_b8x16_b6x16_p32: {
3980 Info.opc = ISD::INTRINSIC_W_CHAIN;
3981 Info.memVT = MVT::v4i32;
3982 Info.ptrVal = I.getArgOperand(0);
3983 Info.offset = 0;
3984 Info.flags = MachineMemOperand::MOLoad;
3985 Info.align = Align(16);
3986 return true;
3987 }
3988
3989 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col:
3990 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col_stride:
3991 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col_stride:
3992 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col:
3993 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row:
3994 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row_stride:
3995 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row_stride:
3996 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row:
3997
3998 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col:
3999 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col_stride:
4000 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col_stride:
4001 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col:
4002 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row:
4003 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row_stride:
4004 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row_stride:
4005 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row:
4006 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row:
4007 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row_stride:
4008 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col:
4009 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col_stride:
4010 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row:
4011 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row_stride:
4012 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row_stride:
4013 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row:
4014 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col:
4015 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col_stride:
4016 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col_stride:
4017 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col:
4018 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_b16:
4019 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_trans_b16:
4020 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x1_b8x16_b4x16_p64:
4021 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x1_b8x16_b6x16_p32: {
4022 Info.opc = ISD::INTRINSIC_W_CHAIN;
4023 Info.memVT = MVT::i32;
4024 Info.ptrVal = I.getArgOperand(0);
4025 Info.offset = 0;
4026 Info.flags = MachineMemOperand::MOLoad;
4027 Info.align = Align(4);
4028 return true;
4029 }
4030
4031 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col:
4032 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row:
4033 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col_stride:
4034 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row_stride:
4035 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col:
4036 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row:
4037 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col_stride:
4038 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row_stride:
4039 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col:
4040 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row:
4041 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col_stride:
4042 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row_stride: {
4043 Info.opc = ISD::INTRINSIC_W_CHAIN;
4044 Info.memVT = MVT::v4f16;
4045 Info.ptrVal = I.getArgOperand(0);
4046 Info.offset = 0;
4047 Info.flags = MachineMemOperand::MOLoad;
4048 Info.align = Align(16);
4049 return true;
4050 }
4051
4052 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col:
4053 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row:
4054 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col_stride:
4055 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row_stride:
4056 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col:
4057 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row:
4058 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col_stride:
4059 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row_stride:
4060 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col:
4061 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row:
4062 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col_stride:
4063 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row_stride:
4064 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col:
4065 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row:
4066 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col_stride:
4067 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row_stride: {
4068 Info.opc = ISD::INTRINSIC_W_CHAIN;
4069 Info.memVT = MVT::v8f32;
4070 Info.ptrVal = I.getArgOperand(0);
4071 Info.offset = 0;
4072 Info.flags = MachineMemOperand::MOLoad;
4073 Info.align = Align(16);
4074 return true;
4075 }
4076
4077 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col:
4078 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col_stride:
4079 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row:
4080 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row_stride:
4081
4082 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col:
4083 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col_stride:
4084 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row:
4085 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row_stride:
4086
4087 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col:
4088 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col_stride:
4089 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row:
4090 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row_stride:
4091 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col:
4092 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col_stride:
4093 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row:
4094 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row_stride:
4095 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col:
4096 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col_stride:
4097 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row:
4098 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row_stride: {
4099 Info.opc = ISD::INTRINSIC_W_CHAIN;
4100 Info.memVT = MVT::v8i32;
4101 Info.ptrVal = I.getArgOperand(0);
4102 Info.offset = 0;
4103 Info.flags = MachineMemOperand::MOLoad;
4104 Info.align = Align(16);
4105 return true;
4106 }
4107
4108 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col:
4109 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col_stride:
4110 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row:
4111 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row_stride:
4112 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col:
4113 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col_stride:
4114 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row:
4115 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row_stride:
4116 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_b16:
4117 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_trans_b16:
4118 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8:
4119 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8x16_b4x16_p64:
4120 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8x16_b6x16_p32:
4121 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x2_b8x16_b4x16_p64:
4122 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x2_b8x16_b6x16_p32: {
4123 Info.opc = ISD::INTRINSIC_W_CHAIN;
4124 Info.memVT = MVT::v2i32;
4125 Info.ptrVal = I.getArgOperand(0);
4126 Info.offset = 0;
4127 Info.flags = MachineMemOperand::MOLoad;
4128 Info.align = Align(8);
4129 return true;
4130 }
4131
4132 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col:
4133 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col_stride:
4134 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row:
4135 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row_stride:
4136
4137 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col:
4138 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col_stride:
4139 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row:
4140 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row_stride: {
4141 Info.opc = ISD::INTRINSIC_W_CHAIN;
4142 Info.memVT = MVT::f64;
4143 Info.ptrVal = I.getArgOperand(0);
4144 Info.offset = 0;
4145 Info.flags = MachineMemOperand::MOLoad;
4146 Info.align = Align(8);
4147 return true;
4148 }
4149
4150 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col:
4151 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col_stride:
4152 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row:
4153 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row_stride: {
4154 Info.opc = ISD::INTRINSIC_W_CHAIN;
4155 Info.memVT = MVT::v2f64;
4156 Info.ptrVal = I.getArgOperand(0);
4157 Info.offset = 0;
4158 Info.flags = MachineMemOperand::MOLoad;
4159 Info.align = Align(16);
4160 return true;
4161 }
4162
4163 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col:
4164 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row:
4165 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col_stride:
4166 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row_stride:
4167 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col:
4168 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row:
4169 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col_stride:
4170 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row_stride:
4171 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col:
4172 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row:
4173 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col_stride:
4174 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row_stride: {
4175 Info.opc = ISD::INTRINSIC_VOID;
4176 Info.memVT = MVT::v4f16;
4177 Info.ptrVal = I.getArgOperand(0);
4178 Info.offset = 0;
4179 Info.flags = MachineMemOperand::MOStore;
4180 Info.align = Align(16);
4181 return true;
4182 }
4183
4184 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col:
4185 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row:
4186 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col_stride:
4187 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row_stride:
4188 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col:
4189 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row:
4190 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col_stride:
4191 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row_stride:
4192 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col:
4193 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row:
4194 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col_stride:
4195 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row_stride:
4196 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col:
4197 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row:
4198 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col_stride:
4199 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row_stride: {
4200 Info.opc = ISD::INTRINSIC_VOID;
4201 Info.memVT = MVT::v8f32;
4202 Info.ptrVal = I.getArgOperand(0);
4203 Info.offset = 0;
4204 Info.flags = MachineMemOperand::MOStore;
4205 Info.align = Align(16);
4206 return true;
4207 }
4208
4209 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col:
4210 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col_stride:
4211 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row:
4212 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row_stride:
4213 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col:
4214 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col_stride:
4215 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row:
4216 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row_stride:
4217 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col:
4218 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col_stride:
4219 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row:
4220 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row_stride: {
4221 Info.opc = ISD::INTRINSIC_VOID;
4222 Info.memVT = MVT::v8i32;
4223 Info.ptrVal = I.getArgOperand(0);
4224 Info.offset = 0;
4225 Info.flags = MachineMemOperand::MOStore;
4226 Info.align = Align(16);
4227 return true;
4228 }
4229
4230 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col:
4231 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col_stride:
4232 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row:
4233 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row_stride:
4234 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col:
4235 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col_stride:
4236 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row:
4237 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row_stride:
4238 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x2_b16:
4239 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x2_trans_b16:
4240 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x2_trans_b8: {
4241 Info.opc = ISD::INTRINSIC_VOID;
4242 Info.memVT = MVT::v2i32;
4243 Info.ptrVal = I.getArgOperand(0);
4244 Info.offset = 0;
4245 Info.flags = MachineMemOperand::MOStore;
4246 Info.align = Align(8);
4247 return true;
4248 }
4249
4250 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col:
4251 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col_stride:
4252 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row:
4253 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row_stride: {
4254 Info.opc = ISD::INTRINSIC_VOID;
4255 Info.memVT = MVT::v2f64;
4256 Info.ptrVal = I.getArgOperand(0);
4257 Info.offset = 0;
4258 Info.flags = MachineMemOperand::MOStore;
4259 Info.align = Align(16);
4260 return true;
4261 }
4262
4263 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x1_b16:
4264 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x1_trans_b16:
4265 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x1_trans_b8: {
4266 Info.opc = ISD::INTRINSIC_VOID;
4267 Info.memVT = MVT::i32;
4268 Info.ptrVal = I.getArgOperand(0);
4269 Info.offset = 0;
4270 Info.flags = MachineMemOperand::MOStore;
4271 Info.align = Align(4);
4272 return true;
4273 }
4274
4275 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x4_b16:
4276 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x4_trans_b16:
4277 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x4_trans_b8: {
4278 Info.opc = ISD::INTRINSIC_VOID;
4279 Info.memVT = MVT::v4i32;
4280 Info.ptrVal = I.getArgOperand(0);
4281 Info.offset = 0;
4282 Info.flags = MachineMemOperand::MOStore;
4283 Info.align = Align(16);
4284 return true;
4285 }
4286
4287 case Intrinsic::nvvm_atomic_add_gen_f_cta:
4288 case Intrinsic::nvvm_atomic_add_gen_f_sys:
4289 case Intrinsic::nvvm_atomic_add_gen_i_cta:
4290 case Intrinsic::nvvm_atomic_add_gen_i_sys:
4291 case Intrinsic::nvvm_atomic_and_gen_i_cta:
4292 case Intrinsic::nvvm_atomic_and_gen_i_sys:
4293 case Intrinsic::nvvm_atomic_cas_gen_i_cta:
4294 case Intrinsic::nvvm_atomic_cas_gen_i_sys:
4295 case Intrinsic::nvvm_atomic_dec_gen_i_cta:
4296 case Intrinsic::nvvm_atomic_dec_gen_i_sys:
4297 case Intrinsic::nvvm_atomic_inc_gen_i_cta:
4298 case Intrinsic::nvvm_atomic_inc_gen_i_sys:
4299 case Intrinsic::nvvm_atomic_max_gen_i_cta:
4300 case Intrinsic::nvvm_atomic_max_gen_i_sys:
4301 case Intrinsic::nvvm_atomic_min_gen_i_cta:
4302 case Intrinsic::nvvm_atomic_min_gen_i_sys:
4303 case Intrinsic::nvvm_atomic_or_gen_i_cta:
4304 case Intrinsic::nvvm_atomic_or_gen_i_sys:
4305 case Intrinsic::nvvm_atomic_exch_gen_i_cta:
4306 case Intrinsic::nvvm_atomic_exch_gen_i_sys:
4307 case Intrinsic::nvvm_atomic_xor_gen_i_cta:
4308 case Intrinsic::nvvm_atomic_xor_gen_i_sys: {
4309 auto &DL = I.getDataLayout();
4310 Info.opc = ISD::INTRINSIC_W_CHAIN;
4311 Info.memVT = getValueType(DL, I.getType());
4312 Info.ptrVal = I.getArgOperand(0);
4313 Info.offset = 0;
4315 Info.align.reset();
4316 return true;
4317 }
4318
4319 case Intrinsic::nvvm_prefetch_tensormap: {
4320 auto &DL = I.getDataLayout();
4321 Info.opc = ISD::INTRINSIC_VOID;
4322 Info.memVT = getPointerTy(DL);
4323 Info.ptrVal = I.getArgOperand(0);
4324 Info.offset = 0;
4325 Info.flags =
4327 Info.align.reset();
4328 return true;
4329 }
4330
4331 case Intrinsic::nvvm_ldu_global_i:
4332 case Intrinsic::nvvm_ldu_global_f:
4333 case Intrinsic::nvvm_ldu_global_p: {
4334 Info.opc = ISD::INTRINSIC_W_CHAIN;
4335 Info.memVT = getValueType(I.getDataLayout(), I.getType());
4336 Info.ptrVal = I.getArgOperand(0);
4337 Info.offset = 0;
4338 Info.flags = MachineMemOperand::MOLoad;
4339 Info.align = cast<ConstantInt>(I.getArgOperand(1))->getMaybeAlignValue();
4340
4341 return true;
4342 }
4343 case Intrinsic::nvvm_tex_1d_v4f32_s32:
4344 case Intrinsic::nvvm_tex_1d_v4f32_f32:
4345 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
4346 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
4347 case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
4348 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
4349 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
4350 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
4351 case Intrinsic::nvvm_tex_2d_v4f32_s32:
4352 case Intrinsic::nvvm_tex_2d_v4f32_f32:
4353 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
4354 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
4355 case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
4356 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
4357 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
4358 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
4359 case Intrinsic::nvvm_tex_3d_v4f32_s32:
4360 case Intrinsic::nvvm_tex_3d_v4f32_f32:
4361 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
4362 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
4363 case Intrinsic::nvvm_tex_cube_v4f32_f32:
4364 case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
4365 case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
4366 case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
4367 case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
4368 case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
4369 case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
4370 case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
4371 case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
4372 case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
4373 case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
4374 case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
4375 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
4376 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
4377 case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
4378 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
4379 case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
4380 case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
4381 case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
4382 case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
4383 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
4384 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
4385 case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
4386 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
4387 case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
4388 case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
4389 case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
4390 case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
4391 case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
4392 case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
4393 case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
4394 case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
4395 case Intrinsic::nvvm_tex_unified_cube_grad_v4f32_f32:
4396 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4f32_f32:
4397 case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
4398 case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
4399 case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
4400 case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
4401 Info.opc = ISD::INTRINSIC_W_CHAIN;
4402 Info.memVT = MVT::v4f32;
4403 Info.ptrVal = nullptr;
4404 Info.offset = 0;
4405 Info.flags = MachineMemOperand::MOLoad;
4406 Info.align = Align(16);
4407 return true;
4408
4409 case Intrinsic::nvvm_tex_1d_v4s32_s32:
4410 case Intrinsic::nvvm_tex_1d_v4s32_f32:
4411 case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
4412 case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
4413 case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
4414 case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
4415 case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
4416 case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
4417 case Intrinsic::nvvm_tex_2d_v4s32_s32:
4418 case Intrinsic::nvvm_tex_2d_v4s32_f32:
4419 case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
4420 case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
4421 case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
4422 case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
4423 case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
4424 case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
4425 case Intrinsic::nvvm_tex_3d_v4s32_s32:
4426 case Intrinsic::nvvm_tex_3d_v4s32_f32:
4427 case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
4428 case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
4429 case Intrinsic::nvvm_tex_cube_v4s32_f32:
4430 case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
4431 case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
4432 case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
4433 case Intrinsic::nvvm_tex_cube_v4u32_f32:
4434 case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
4435 case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
4436 case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
4437 case Intrinsic::nvvm_tex_1d_v4u32_s32:
4438 case Intrinsic::nvvm_tex_1d_v4u32_f32:
4439 case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
4440 case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
4441 case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
4442 case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
4443 case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
4444 case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
4445 case Intrinsic::nvvm_tex_2d_v4u32_s32:
4446 case Intrinsic::nvvm_tex_2d_v4u32_f32:
4447 case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
4448 case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
4449 case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
4450 case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
4451 case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
4452 case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
4453 case Intrinsic::nvvm_tex_3d_v4u32_s32:
4454 case Intrinsic::nvvm_tex_3d_v4u32_f32:
4455 case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
4456 case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
4457 case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
4458 case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
4459 case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
4460 case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
4461 case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
4462 case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
4463 case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
4464 case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
4465 case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
4466 case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
4467 case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
4468 case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
4469 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
4470 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
4471 case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
4472 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
4473 case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
4474 case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
4475 case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
4476 case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
4477 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
4478 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
4479 case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
4480 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
4481 case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
4482 case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
4483 case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
4484 case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
4485 case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
4486 case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
4487 case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
4488 case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
4489 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
4490 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
4491 case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
4492 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
4493 case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
4494 case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
4495 case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
4496 case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
4497 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
4498 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
4499 case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
4500 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
4501 case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
4502 case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
4503 case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
4504 case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
4505 case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
4506 case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
4507 case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
4508 case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
4509 case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
4510 case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
4511 case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
4512 case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
4513 case Intrinsic::nvvm_tex_unified_cube_grad_v4s32_f32:
4514 case Intrinsic::nvvm_tex_unified_cube_grad_v4u32_f32:
4515 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4s32_f32:
4516 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4u32_f32:
4517 case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
4518 case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
4519 case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
4520 case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
4521 case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
4522 case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
4523 case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
4524 case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
4525 Info.opc = ISD::INTRINSIC_W_CHAIN;
4526 Info.memVT = MVT::v4i32;
4527 Info.ptrVal = nullptr;
4528 Info.offset = 0;
4529 Info.flags = MachineMemOperand::MOLoad;
4530 Info.align = Align(16);
4531 return true;
4532
4533 case Intrinsic::nvvm_suld_1d_i8_clamp:
4534 case Intrinsic::nvvm_suld_1d_v2i8_clamp:
4535 case Intrinsic::nvvm_suld_1d_v4i8_clamp:
4536 case Intrinsic::nvvm_suld_1d_array_i8_clamp:
4537 case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
4538 case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
4539 case Intrinsic::nvvm_suld_2d_i8_clamp:
4540 case Intrinsic::nvvm_suld_2d_v2i8_clamp:
4541 case Intrinsic::nvvm_suld_2d_v4i8_clamp:
4542 case Intrinsic::nvvm_suld_2d_array_i8_clamp:
4543 case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
4544 case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
4545 case Intrinsic::nvvm_suld_3d_i8_clamp:
4546 case Intrinsic::nvvm_suld_3d_v2i8_clamp:
4547 case Intrinsic::nvvm_suld_3d_v4i8_clamp:
4548 case Intrinsic::nvvm_suld_1d_i8_trap:
4549 case Intrinsic::nvvm_suld_1d_v2i8_trap:
4550 case Intrinsic::nvvm_suld_1d_v4i8_trap:
4551 case Intrinsic::nvvm_suld_1d_array_i8_trap:
4552 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
4553 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
4554 case Intrinsic::nvvm_suld_2d_i8_trap:
4555 case Intrinsic::nvvm_suld_2d_v2i8_trap:
4556 case Intrinsic::nvvm_suld_2d_v4i8_trap:
4557 case Intrinsic::nvvm_suld_2d_array_i8_trap:
4558 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
4559 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
4560 case Intrinsic::nvvm_suld_3d_i8_trap:
4561 case Intrinsic::nvvm_suld_3d_v2i8_trap:
4562 case Intrinsic::nvvm_suld_3d_v4i8_trap:
4563 case Intrinsic::nvvm_suld_1d_i8_zero:
4564 case Intrinsic::nvvm_suld_1d_v2i8_zero:
4565 case Intrinsic::nvvm_suld_1d_v4i8_zero:
4566 case Intrinsic::nvvm_suld_1d_array_i8_zero:
4567 case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
4568 case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
4569 case Intrinsic::nvvm_suld_2d_i8_zero:
4570 case Intrinsic::nvvm_suld_2d_v2i8_zero:
4571 case Intrinsic::nvvm_suld_2d_v4i8_zero:
4572 case Intrinsic::nvvm_suld_2d_array_i8_zero:
4573 case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
4574 case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
4575 case Intrinsic::nvvm_suld_3d_i8_zero:
4576 case Intrinsic::nvvm_suld_3d_v2i8_zero:
4577 case Intrinsic::nvvm_suld_3d_v4i8_zero:
4578 Info.opc = ISD::INTRINSIC_W_CHAIN;
4579 Info.memVT = MVT::i8;
4580 Info.ptrVal = nullptr;
4581 Info.offset = 0;
4582 Info.flags = MachineMemOperand::MOLoad;
4583 Info.align = Align(16);
4584 return true;
4585
4586 case Intrinsic::nvvm_suld_1d_i16_clamp:
4587 case Intrinsic::nvvm_suld_1d_v2i16_clamp:
4588 case Intrinsic::nvvm_suld_1d_v4i16_clamp:
4589 case Intrinsic::nvvm_suld_1d_array_i16_clamp:
4590 case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
4591 case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
4592 case Intrinsic::nvvm_suld_2d_i16_clamp:
4593 case Intrinsic::nvvm_suld_2d_v2i16_clamp:
4594 case Intrinsic::nvvm_suld_2d_v4i16_clamp:
4595 case Intrinsic::nvvm_suld_2d_array_i16_clamp:
4596 case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
4597 case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
4598 case Intrinsic::nvvm_suld_3d_i16_clamp:
4599 case Intrinsic::nvvm_suld_3d_v2i16_clamp:
4600 case Intrinsic::nvvm_suld_3d_v4i16_clamp:
4601 case Intrinsic::nvvm_suld_1d_i16_trap:
4602 case Intrinsic::nvvm_suld_1d_v2i16_trap:
4603 case Intrinsic::nvvm_suld_1d_v4i16_trap:
4604 case Intrinsic::nvvm_suld_1d_array_i16_trap:
4605 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
4606 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
4607 case Intrinsic::nvvm_suld_2d_i16_trap:
4608 case Intrinsic::nvvm_suld_2d_v2i16_trap:
4609 case Intrinsic::nvvm_suld_2d_v4i16_trap:
4610 case Intrinsic::nvvm_suld_2d_array_i16_trap:
4611 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
4612 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
4613 case Intrinsic::nvvm_suld_3d_i16_trap:
4614 case Intrinsic::nvvm_suld_3d_v2i16_trap:
4615 case Intrinsic::nvvm_suld_3d_v4i16_trap:
4616 case Intrinsic::nvvm_suld_1d_i16_zero:
4617 case Intrinsic::nvvm_suld_1d_v2i16_zero:
4618 case Intrinsic::nvvm_suld_1d_v4i16_zero:
4619 case Intrinsic::nvvm_suld_1d_array_i16_zero:
4620 case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
4621 case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
4622 case Intrinsic::nvvm_suld_2d_i16_zero:
4623 case Intrinsic::nvvm_suld_2d_v2i16_zero:
4624 case Intrinsic::nvvm_suld_2d_v4i16_zero:
4625 case Intrinsic::nvvm_suld_2d_array_i16_zero:
4626 case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
4627 case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
4628 case Intrinsic::nvvm_suld_3d_i16_zero:
4629 case Intrinsic::nvvm_suld_3d_v2i16_zero:
4630 case Intrinsic::nvvm_suld_3d_v4i16_zero:
4631 Info.opc = ISD::INTRINSIC_W_CHAIN;
4632 Info.memVT = MVT::i16;
4633 Info.ptrVal = nullptr;
4634 Info.offset = 0;
4635 Info.flags = MachineMemOperand::MOLoad;
4636 Info.align = Align(16);
4637 return true;
4638
4639 case Intrinsic::nvvm_suld_1d_i32_clamp:
4640 case Intrinsic::nvvm_suld_1d_v2i32_clamp:
4641 case Intrinsic::nvvm_suld_1d_v4i32_clamp:
4642 case Intrinsic::nvvm_suld_1d_array_i32_clamp:
4643 case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
4644 case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
4645 case Intrinsic::nvvm_suld_2d_i32_clamp:
4646 case Intrinsic::nvvm_suld_2d_v2i32_clamp:
4647 case Intrinsic::nvvm_suld_2d_v4i32_clamp:
4648 case Intrinsic::nvvm_suld_2d_array_i32_clamp:
4649 case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
4650 case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
4651 case Intrinsic::nvvm_suld_3d_i32_clamp:
4652 case Intrinsic::nvvm_suld_3d_v2i32_clamp:
4653 case Intrinsic::nvvm_suld_3d_v4i32_clamp:
4654 case Intrinsic::nvvm_suld_1d_i32_trap:
4655 case Intrinsic::nvvm_suld_1d_v2i32_trap:
4656 case Intrinsic::nvvm_suld_1d_v4i32_trap:
4657 case Intrinsic::nvvm_suld_1d_array_i32_trap:
4658 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
4659 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
4660 case Intrinsic::nvvm_suld_2d_i32_trap:
4661 case Intrinsic::nvvm_suld_2d_v2i32_trap:
4662 case Intrinsic::nvvm_suld_2d_v4i32_trap:
4663 case Intrinsic::nvvm_suld_2d_array_i32_trap:
4664 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
4665 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
4666 case Intrinsic::nvvm_suld_3d_i32_trap:
4667 case Intrinsic::nvvm_suld_3d_v2i32_trap:
4668 case Intrinsic::nvvm_suld_3d_v4i32_trap:
4669 case Intrinsic::nvvm_suld_1d_i32_zero:
4670 case Intrinsic::nvvm_suld_1d_v2i32_zero:
4671 case Intrinsic::nvvm_suld_1d_v4i32_zero:
4672 case Intrinsic::nvvm_suld_1d_array_i32_zero:
4673 case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
4674 case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
4675 case Intrinsic::nvvm_suld_2d_i32_zero:
4676 case Intrinsic::nvvm_suld_2d_v2i32_zero:
4677 case Intrinsic::nvvm_suld_2d_v4i32_zero:
4678 case Intrinsic::nvvm_suld_2d_array_i32_zero:
4679 case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
4680 case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
4681 case Intrinsic::nvvm_suld_3d_i32_zero:
4682 case Intrinsic::nvvm_suld_3d_v2i32_zero:
4683 case Intrinsic::nvvm_suld_3d_v4i32_zero:
4684 Info.opc = ISD::INTRINSIC_W_CHAIN;
4685 Info.memVT = MVT::i32;
4686 Info.ptrVal = nullptr;
4687 Info.offset = 0;
4688 Info.flags = MachineMemOperand::MOLoad;
4689 Info.align = Align(16);
4690 return true;
4691
4692 case Intrinsic::nvvm_suld_1d_i64_clamp:
4693 case Intrinsic::nvvm_suld_1d_v2i64_clamp:
4694 case Intrinsic::nvvm_suld_1d_array_i64_clamp:
4695 case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
4696 case Intrinsic::nvvm_suld_2d_i64_clamp:
4697 case Intrinsic::nvvm_suld_2d_v2i64_clamp:
4698 case Intrinsic::nvvm_suld_2d_array_i64_clamp:
4699 case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
4700 case Intrinsic::nvvm_suld_3d_i64_clamp:
4701 case Intrinsic::nvvm_suld_3d_v2i64_clamp:
4702 case Intrinsic::nvvm_suld_1d_i64_trap:
4703 case Intrinsic::nvvm_suld_1d_v2i64_trap:
4704 case Intrinsic::nvvm_suld_1d_array_i64_trap:
4705 case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
4706 case Intrinsic::nvvm_suld_2d_i64_trap:
4707 case Intrinsic::nvvm_suld_2d_v2i64_trap:
4708 case Intrinsic::nvvm_suld_2d_array_i64_trap:
4709 case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
4710 case Intrinsic::nvvm_suld_3d_i64_trap:
4711 case Intrinsic::nvvm_suld_3d_v2i64_trap:
4712 case Intrinsic::nvvm_suld_1d_i64_zero:
4713 case Intrinsic::nvvm_suld_1d_v2i64_zero:
4714 case Intrinsic::nvvm_suld_1d_array_i64_zero:
4715 case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
4716 case Intrinsic::nvvm_suld_2d_i64_zero:
4717 case Intrinsic::nvvm_suld_2d_v2i64_zero:
4718 case Intrinsic::nvvm_suld_2d_array_i64_zero:
4719 case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
4720 case Intrinsic::nvvm_suld_3d_i64_zero:
4721 case Intrinsic::nvvm_suld_3d_v2i64_zero:
4722 Info.opc = ISD::INTRINSIC_W_CHAIN;
4723 Info.memVT = MVT::i64;
4724 Info.ptrVal = nullptr;
4725 Info.offset = 0;
4726 Info.flags = MachineMemOperand::MOLoad;
4727 Info.align = Align(16);
4728 return true;
4729
4730 case Intrinsic::nvvm_tcgen05_ld_16x64b_x1:
4731 case Intrinsic::nvvm_tcgen05_ld_32x32b_x1:
4732 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x1: {
4733 Info.opc = ISD::INTRINSIC_W_CHAIN;
4734 Info.memVT = MVT::v1i32;
4735 Info.ptrVal = I.getArgOperand(0);
4736 Info.offset = 0;
4737 Info.flags = MachineMemOperand::MOLoad;
4738 Info.align.reset();
4739 return true;
4740 }
4741
4742 case Intrinsic::nvvm_tcgen05_ld_16x64b_x2:
4743 case Intrinsic::nvvm_tcgen05_ld_16x128b_x1:
4744 case Intrinsic::nvvm_tcgen05_ld_32x32b_x2:
4745 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2: {
4746 Info.opc = ISD::INTRINSIC_W_CHAIN;
4747 Info.memVT = MVT::v2i32;
4748 Info.ptrVal = I.getArgOperand(0);
4749 Info.offset = 0;
4750 Info.flags = MachineMemOperand::MOLoad;
4751 Info.align.reset();
4752 return true;
4753 }
4754
4755 case Intrinsic::nvvm_tcgen05_ld_16x64b_x4:
4756 case Intrinsic::nvvm_tcgen05_ld_16x128b_x2:
4757 case Intrinsic::nvvm_tcgen05_ld_32x32b_x4:
4758 case Intrinsic::nvvm_tcgen05_ld_16x256b_x1:
4759 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4: {
4760 Info.opc = ISD::INTRINSIC_W_CHAIN;
4761 Info.memVT = MVT::v4i32;
4762 Info.ptrVal = I.getArgOperand(0);
4763 Info.offset = 0;
4764 Info.flags = MachineMemOperand::MOLoad;
4765 Info.align.reset();
4766 return true;
4767 }
4768
4769 case Intrinsic::nvvm_tcgen05_ld_16x64b_x8:
4770 case Intrinsic::nvvm_tcgen05_ld_16x128b_x4:
4771 case Intrinsic::nvvm_tcgen05_ld_16x256b_x2:
4772 case Intrinsic::nvvm_tcgen05_ld_32x32b_x8:
4773 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8: {
4774 Info.opc = ISD::INTRINSIC_W_CHAIN;
4775 Info.memVT = MVT::v8i32;
4776 Info.ptrVal = I.getArgOperand(0);
4777 Info.offset = 0;
4778 Info.flags = MachineMemOperand::MOLoad;
4779 Info.align.reset();
4780 return true;
4781 }
4782
4783 case Intrinsic::nvvm_tcgen05_ld_16x64b_x16:
4784 case Intrinsic::nvvm_tcgen05_ld_16x128b_x8:
4785 case Intrinsic::nvvm_tcgen05_ld_16x256b_x4:
4786 case Intrinsic::nvvm_tcgen05_ld_32x32b_x16:
4787 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16: {
4788 Info.opc = ISD::INTRINSIC_W_CHAIN;
4789 Info.memVT = MVT::v16i32;
4790 Info.ptrVal = I.getArgOperand(0);
4791 Info.offset = 0;
4792 Info.flags = MachineMemOperand::MOLoad;
4793 Info.align.reset();
4794 return true;
4795 }
4796
4797 case Intrinsic::nvvm_tcgen05_ld_16x64b_x32:
4798 case Intrinsic::nvvm_tcgen05_ld_16x128b_x16:
4799 case Intrinsic::nvvm_tcgen05_ld_16x256b_x8:
4800 case Intrinsic::nvvm_tcgen05_ld_32x32b_x32:
4801 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32: {
4802 Info.opc = ISD::INTRINSIC_W_CHAIN;
4803 Info.memVT = MVT::v32i32;
4804 Info.ptrVal = I.getArgOperand(0);
4805 Info.offset = 0;
4806 Info.flags = MachineMemOperand::MOLoad;
4807 Info.align.reset();
4808 return true;
4809 }
4810
4811 case Intrinsic::nvvm_tcgen05_ld_16x64b_x64:
4812 case Intrinsic::nvvm_tcgen05_ld_16x128b_x32:
4813 case Intrinsic::nvvm_tcgen05_ld_16x256b_x16:
4814 case Intrinsic::nvvm_tcgen05_ld_32x32b_x64:
4815 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64: {
4816 Info.opc = ISD::INTRINSIC_W_CHAIN;
4817 Info.memVT = MVT::v64i32;
4818 Info.ptrVal = I.getArgOperand(0);
4819 Info.offset = 0;
4820 Info.flags = MachineMemOperand::MOLoad;
4821 Info.align.reset();
4822 return true;
4823 }
4824
4825 case Intrinsic::nvvm_tcgen05_ld_16x64b_x128:
4826 case Intrinsic::nvvm_tcgen05_ld_16x128b_x64:
4827 case Intrinsic::nvvm_tcgen05_ld_16x256b_x32:
4828 case Intrinsic::nvvm_tcgen05_ld_32x32b_x128:
4829 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128: {
4830 Info.opc = ISD::INTRINSIC_W_CHAIN;
4831 Info.memVT = MVT::v128i32;
4832 Info.ptrVal = I.getArgOperand(0);
4833 Info.offset = 0;
4834 Info.flags = MachineMemOperand::MOLoad;
4835 Info.align.reset();
4836 return true;
4837 }
4838
4839 case Intrinsic::nvvm_tcgen05_st_16x64b_x1:
4840 case Intrinsic::nvvm_tcgen05_st_32x32b_x1:
4841 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x1: {
4842 Info.opc = ISD::INTRINSIC_VOID;
4843 Info.memVT = MVT::i32;
4844 Info.ptrVal = I.getArgOperand(0);
4845 Info.offset = 0;
4846 Info.flags = MachineMemOperand::MOStore;
4847 Info.align.reset();
4848 return true;
4849 }
4850
4851 case Intrinsic::nvvm_tcgen05_st_16x64b_x2:
4852 case Intrinsic::nvvm_tcgen05_st_16x128b_x1:
4853 case Intrinsic::nvvm_tcgen05_st_32x32b_x2:
4854 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x2: {
4855 Info.opc = ISD::INTRINSIC_VOID;
4856 Info.memVT = MVT::v2i32;
4857 Info.ptrVal = I.getArgOperand(0);
4858 Info.offset = 0;
4859 Info.flags = MachineMemOperand::MOStore;
4860 Info.align.reset();
4861 return true;
4862 }
4863
4864 case Intrinsic::nvvm_tcgen05_st_16x64b_x4:
4865 case Intrinsic::nvvm_tcgen05_st_16x128b_x2:
4866 case Intrinsic::nvvm_tcgen05_st_16x256b_x1:
4867 case Intrinsic::nvvm_tcgen05_st_32x32b_x4:
4868 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x4: {
4869 Info.opc = ISD::INTRINSIC_VOID;
4870 Info.memVT = MVT::v4i32;
4871 Info.ptrVal = I.getArgOperand(0);
4872 Info.offset = 0;
4873 Info.flags = MachineMemOperand::MOStore;
4874 Info.align.reset();
4875 return true;
4876 }
4877
4878 case Intrinsic::nvvm_tcgen05_st_16x64b_x8:
4879 case Intrinsic::nvvm_tcgen05_st_16x128b_x4:
4880 case Intrinsic::nvvm_tcgen05_st_16x256b_x2:
4881 case Intrinsic::nvvm_tcgen05_st_32x32b_x8:
4882 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x8: {
4883 Info.opc = ISD::INTRINSIC_VOID;
4884 Info.memVT = MVT::v8i32;
4885 Info.ptrVal = I.getArgOperand(0);
4886 Info.offset = 0;
4887 Info.flags = MachineMemOperand::MOStore;
4888 Info.align.reset();
4889 return true;
4890 }
4891
4892 case Intrinsic::nvvm_tcgen05_st_16x64b_x16:
4893 case Intrinsic::nvvm_tcgen05_st_16x128b_x8:
4894 case Intrinsic::nvvm_tcgen05_st_16x256b_x4:
4895 case Intrinsic::nvvm_tcgen05_st_32x32b_x16:
4896 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x16: {
4897 Info.opc = ISD::INTRINSIC_VOID;
4898 Info.memVT = MVT::v16i32;
4899 Info.ptrVal = I.getArgOperand(0);
4900 Info.offset = 0;
4901 Info.flags = MachineMemOperand::MOStore;
4902 Info.align.reset();
4903 return true;
4904 }
4905
4906 case Intrinsic::nvvm_tcgen05_st_16x64b_x32:
4907 case Intrinsic::nvvm_tcgen05_st_16x128b_x16:
4908 case Intrinsic::nvvm_tcgen05_st_16x256b_x8:
4909 case Intrinsic::nvvm_tcgen05_st_32x32b_x32:
4910 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x32: {
4911 Info.opc = ISD::INTRINSIC_VOID;
4912 Info.memVT = MVT::v32i32;
4913 Info.ptrVal = I.getArgOperand(0);
4914 Info.offset = 0;
4915 Info.flags = MachineMemOperand::MOStore;
4916 Info.align.reset();
4917 return true;
4918 }
4919
4920 case Intrinsic::nvvm_tcgen05_st_16x64b_x64:
4921 case Intrinsic::nvvm_tcgen05_st_16x128b_x32:
4922 case Intrinsic::nvvm_tcgen05_st_16x256b_x16:
4923 case Intrinsic::nvvm_tcgen05_st_32x32b_x64:
4924 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x64: {
4925 Info.opc = ISD::INTRINSIC_VOID;
4926 Info.memVT = MVT::v64i32;
4927 Info.ptrVal = I.getArgOperand(0);
4928 Info.offset = 0;
4929 Info.flags = MachineMemOperand::MOStore;
4930 Info.align.reset();
4931 return true;
4932 }
4933
4934 case Intrinsic::nvvm_tcgen05_st_16x64b_x128:
4935 case Intrinsic::nvvm_tcgen05_st_16x128b_x64:
4936 case Intrinsic::nvvm_tcgen05_st_16x256b_x32:
4937 case Intrinsic::nvvm_tcgen05_st_32x32b_x128:
4938 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x128: {
4939 Info.opc = ISD::INTRINSIC_VOID;
4940 Info.memVT = MVT::v128i32;
4941 Info.ptrVal = I.getArgOperand(0);
4942 Info.offset = 0;
4943 Info.flags = MachineMemOperand::MOStore;
4944 Info.align.reset();
4945 return true;
4946 }
4947 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
4948 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
4949 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
4950 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
4951 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
4952 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
4953 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
4954 case Intrinsic::
4955 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
4956 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
4957 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
4958 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
4959 case Intrinsic::
4960 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift: {
4961 // We are reading and writing back to TMem
4962 Info.opc = ISD::INTRINSIC_VOID;
4963 Info.memVT = MVT::v4i32;
4964 Info.ptrVal = I.getArgOperand(0);
4965 Info.offset = 0;
4967 Info.align = Align(16);
4968 return true;
4969 }
4970
4971 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
4972 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
4973 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
4974 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
4975 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
4976 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
4977 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
4978 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
4979 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
4980 case Intrinsic::
4981 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
4982 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
4983 case Intrinsic::
4984 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift: {
4985 // We are reading and writing back to TMem
4986 Info.opc = ISD::INTRINSIC_VOID;
4987 Info.memVT = MVT::v8i32;
4988 Info.ptrVal = I.getArgOperand(0);
4989 Info.offset = 0;
4991 Info.align = Align(16);
4992 return true;
4993 }
4994 }
4995 return false;
4996}
4997
4998/// getFunctionParamOptimizedAlign - since function arguments are passed via
4999/// .param space, we may want to increase their alignment in a way that
5000/// ensures that we can effectively vectorize their loads & stores. We can
5001/// increase alignment only if the function has internal or has private
5002/// linkage as for other linkage types callers may already rely on default
5003/// alignment. To allow using 128-bit vectorized loads/stores, this function
5004/// ensures that alignment is 16 or greater.
5006 const Function *F, Type *ArgTy, const DataLayout &DL) const {
5007 // Capping the alignment to 128 bytes as that is the maximum alignment
5008 // supported by PTX.
5009 const Align ABITypeAlign = std::min(Align(128), DL.getABITypeAlign(ArgTy));
5010
5011 // If a function has linkage different from internal or private, we
5012 // must use default ABI alignment as external users rely on it. Same
5013 // for a function that may be called from a function pointer.
5014 if (!F || !F->hasLocalLinkage() ||
5015 F->hasAddressTaken(/*Users=*/nullptr,
5016 /*IgnoreCallbackUses=*/false,
5017 /*IgnoreAssumeLikeCalls=*/true,
5018 /*IgnoreLLVMUsed=*/true))
5019 return ABITypeAlign;
5020
5021 assert(!isKernelFunction(*F) && "Expect kernels to have non-local linkage");
5022 return std::max(Align(16), ABITypeAlign);
5023}
5024
5025/// Helper for computing alignment of a device function byval parameter.
5027 const Function *F, Type *ArgTy, Align InitialAlign,
5028 const DataLayout &DL) const {
5029 Align ArgAlign = InitialAlign;
5030 // Try to increase alignment to enhance vectorization options.
5031 if (F)
5032 ArgAlign = std::max(ArgAlign, getFunctionParamOptimizedAlign(F, ArgTy, DL));
5033
5034 // Old ptx versions have a bug. When PTX code takes address of
5035 // byval parameter with alignment < 4, ptxas generates code to
5036 // spill argument into memory. Alas on sm_50+ ptxas generates
5037 // SASS code that fails with misaligned access. To work around
5038 // the problem, make sure that we align byval parameters by at
5039 // least 4. This bug seems to be fixed at least starting from
5040 // ptxas > 9.0.
5041 // TODO: remove this after verifying the bug is not reproduced
5042 // on non-deprecated ptxas versions.
5044 ArgAlign = std::max(ArgAlign, Align(4));
5045
5046 return ArgAlign;
5047}
5048
5049// Helper for getting a function parameter name. Name is composed from
5050// its index and the function name. Negative index corresponds to special
5051// parameter (unsized array) used for passing variable arguments.
5053 int Idx) const {
5054 std::string ParamName;
5055 raw_string_ostream ParamStr(ParamName);
5056
5057 ParamStr << getTargetMachine().getSymbol(F)->getName();
5058 if (Idx < 0)
5059 ParamStr << "_vararg";
5060 else
5061 ParamStr << "_param_" << Idx;
5062
5063 return ParamName;
5064}
5065
5066/// isLegalAddressingMode - Return true if the addressing mode represented
5067/// by AM is legal for this target, for a load/store of the specified type.
5068/// Used to guide target specific optimizations, like loop strength reduction
5069/// (LoopStrengthReduce.cpp) and memory optimization for address mode
5070/// (CodeGenPrepare.cpp)
5072 const AddrMode &AM, Type *Ty,
5073 unsigned AS, Instruction *I) const {
5074 // AddrMode - This represents an addressing mode of:
5075 // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
5076 //
5077 // The legal address modes are
5078 // - [avar]
5079 // - [areg]
5080 // - [areg+immoff]
5081 // - [immAddr]
5082
5083 // immoff must fit in a signed 32-bit int
5084 if (!APInt(64, AM.BaseOffs).isSignedIntN(32))
5085 return false;
5086
5087 if (AM.BaseGV)
5088 return !AM.BaseOffs && !AM.HasBaseReg && !AM.Scale;
5089
5090 switch (AM.Scale) {
5091 case 0: // "r", "r+i" or "i" is allowed
5092 break;
5093 case 1:
5094 if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
5095 return false;
5096 // Otherwise we have r+i.
5097 break;
5098 default:
5099 // No scale > 1 is allowed
5100 return false;
5101 }
5102 return true;
5103}
5104
5105//===----------------------------------------------------------------------===//
5106// NVPTX Inline Assembly Support
5107//===----------------------------------------------------------------------===//
5108
5109/// getConstraintType - Given a constraint letter, return the type of
5110/// constraint it is for this target.
5113 if (Constraint.size() == 1) {
5114 switch (Constraint[0]) {
5115 default:
5116 break;
5117 case 'b':
5118 case 'r':
5119 case 'h':
5120 case 'c':
5121 case 'l':
5122 case 'f':
5123 case 'd':
5124 case 'q':
5125 case '0':
5126 case 'N':
5127 return C_RegisterClass;
5128 }
5129 }
5130 return TargetLowering::getConstraintType(Constraint);
5131}
5132
5133std::pair<unsigned, const TargetRegisterClass *>
5135 StringRef Constraint,
5136 MVT VT) const {
5137 if (Constraint.size() == 1) {
5138 switch (Constraint[0]) {
5139 case 'b':
5140 return std::make_pair(0U, &NVPTX::B1RegClass);
5141 case 'c':
5142 case 'h':
5143 return std::make_pair(0U, &NVPTX::B16RegClass);
5144 case 'r':
5145 case 'f':
5146 return std::make_pair(0U, &NVPTX::B32RegClass);
5147 case 'l':
5148 case 'N':
5149 case 'd':
5150 return std::make_pair(0U, &NVPTX::B64RegClass);
5151 case 'q': {
5152 if (STI.getSmVersion() < 70)
5153 report_fatal_error("Inline asm with 128 bit operands is only "
5154 "supported for sm_70 and higher!");
5155 return std::make_pair(0U, &NVPTX::B128RegClass);
5156 }
5157 }
5158 }
5159 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
5160}
5161
5162//===----------------------------------------------------------------------===//
5163// NVPTX DAG Combining
5164//===----------------------------------------------------------------------===//
5165
5167 CodeGenOptLevel OptLevel) const {
5168 // Always honor command-line argument
5169 if (FMAContractLevelOpt.getNumOccurrences() > 0)
5170 return FMAContractLevelOpt > 0;
5171
5172 // Do not contract if we're not optimizing the code.
5173 if (OptLevel == CodeGenOptLevel::None)
5174 return false;
5175
5176 // Honor TargetOptions flags that explicitly say fusion is okay.
5178 return true;
5179
5180 return false;
5181}
5182
5183static bool isConstZero(const SDValue &Operand) {
5184 const auto *Const = dyn_cast<ConstantSDNode>(Operand);
5185 return Const && Const->getZExtValue() == 0;
5186}
5187
5188/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
5189/// operands N0 and N1. This is a helper for PerformADDCombine that is
5190/// called with the default operands, and if that fails, with commuted
5191/// operands.
5192static SDValue
5195 EVT VT = N0.getValueType();
5196
5197 // Since integer multiply-add costs the same as integer multiply
5198 // but is more costly than integer add, do the fusion only when
5199 // the mul is only used in the add.
5200 // TODO: this may not be true for later architectures, consider relaxing this
5201 if (!N0.getNode()->hasOneUse())
5202 return SDValue();
5203
5204 // fold (add (select cond, 0, (mul a, b)), c)
5205 // -> (select cond, c, (add (mul a, b), c))
5206 //
5207 if (N0.getOpcode() == ISD::SELECT) {
5208 unsigned ZeroOpNum;
5209 if (isConstZero(N0->getOperand(1)))
5210 ZeroOpNum = 1;
5211 else if (isConstZero(N0->getOperand(2)))
5212 ZeroOpNum = 2;
5213 else
5214 return SDValue();
5215
5216 SDValue M = N0->getOperand((ZeroOpNum == 1) ? 2 : 1);
5217 if (M->getOpcode() != ISD::MUL || !M.getNode()->hasOneUse())
5218 return SDValue();
5219
5220 SDLoc DL(N);
5221 SDValue Mul =
5222 DCI.DAG.getNode(ISD::MUL, DL, VT, M->getOperand(0), M->getOperand(1));
5223 SDValue MAD = DCI.DAG.getNode(ISD::ADD, DL, VT, Mul, N1);
5224 return DCI.DAG.getSelect(SDLoc(N), VT, N0->getOperand(0),
5225 ((ZeroOpNum == 1) ? N1 : MAD),
5226 ((ZeroOpNum == 1) ? MAD : N1));
5227 }
5228
5229 return SDValue();
5230}
5231
5232static SDValue
5235 CodeGenOptLevel OptLevel) {
5236 EVT VT = N0.getValueType();
5237 if (N0.getOpcode() == ISD::FMUL) {
5238 const auto *TLI = static_cast<const NVPTXTargetLowering *>(
5239 &DCI.DAG.getTargetLoweringInfo());
5240 if (!(TLI->allowFMA(DCI.DAG.getMachineFunction(), OptLevel) ||
5241 (N->getFlags().hasAllowContract() &&
5242 N0->getFlags().hasAllowContract())))
5243 return SDValue();
5244
5245 // For floating point:
5246 // Do the fusion only when the mul has less than 5 uses and all
5247 // are add.
5248 // The heuristic is that if a use is not an add, then that use
5249 // cannot be fused into fma, therefore mul is still needed anyway.
5250 // If there are more than 4 uses, even if they are all add, fusing
5251 // them will increase register pressue.
5252 //
5253 int numUses = 0;
5254 int nonAddCount = 0;
5255 for (const SDNode *User : N0.getNode()->users()) {
5256 numUses++;
5257 if (User->getOpcode() != ISD::FADD)
5258 ++nonAddCount;
5259 if (numUses >= 5)
5260 return SDValue();
5261 }
5262 if (nonAddCount) {
5263 int orderNo = N->getIROrder();
5264 int orderNo2 = N0.getNode()->getIROrder();
5265 // simple heuristics here for considering potential register
5266 // pressure, the logics here is that the differnce are used
5267 // to measure the distance between def and use, the longer distance
5268 // more likely cause register pressure.
5269 if (orderNo - orderNo2 < 500)
5270 return SDValue();
5271
5272 // Now, check if at least one of the FMUL's operands is live beyond the
5273 // node N, which guarantees that the FMA will not increase register
5274 // pressure at node N.
5275 bool opIsLive = false;
5276 const SDNode *left = N0.getOperand(0).getNode();
5277 const SDNode *right = N0.getOperand(1).getNode();
5278
5279 if (isa<ConstantSDNode>(left) || isa<ConstantSDNode>(right))
5280 opIsLive = true;
5281
5282 if (!opIsLive)
5283 for (const SDNode *User : left->users()) {
5284 int orderNo3 = User->getIROrder();
5285 if (orderNo3 > orderNo) {
5286 opIsLive = true;
5287 break;
5288 }
5289 }
5290
5291 if (!opIsLive)
5292 for (const SDNode *User : right->users()) {
5293 int orderNo3 = User->getIROrder();
5294 if (orderNo3 > orderNo) {
5295 opIsLive = true;
5296 break;
5297 }
5298 }
5299
5300 if (!opIsLive)
5301 return SDValue();
5302 }
5303
5304 return DCI.DAG.getNode(ISD::FMA, SDLoc(N), VT, N0.getOperand(0),
5305 N0.getOperand(1), N1);
5306 }
5307
5308 return SDValue();
5309}
5310
5311/// Fold unpacking movs into a load by increasing the number of return values.
5312///
5313/// ex:
5314/// L: v2f16,ch = load <p>
5315/// a: f16 = extractelt L:0, 0
5316/// b: f16 = extractelt L:0, 1
5317/// use(a, b)
5318///
5319/// ...is turned into...
5320///
5321/// L: f16,f16,ch = LoadV2 <p>
5322/// use(L:0, L:1)
5323static SDValue
5325 // Don't run this optimization before the legalizer
5326 if (!DCI.isAfterLegalizeDAG())
5327 return SDValue();
5328
5329 EVT ElementVT = N->getValueType(0);
5330 // Avoid non-packed types and v4i8
5331 if (!NVPTX::isPackedVectorTy(ElementVT) || ElementVT == MVT::v4i8)
5332 return SDValue();
5333
5334 // Check whether all outputs are either used by an extractelt or are
5335 // glue/chain nodes
5336 if (!all_of(N->uses(), [&](SDUse &U) {
5337 // Skip glue, chain nodes
5338 if (U.getValueType() == MVT::Glue || U.getValueType() == MVT::Other)
5339 return true;
5340 if (U.getUser()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
5341 if (N->getOpcode() != ISD::LOAD)
5342 return true;
5343 // Since this is an ISD::LOAD, check all extractelts are used. If
5344 // any are not used, we don't want to defeat another optimization that
5345 // will narrow the load.
5346 //
5347 // For example:
5348 //
5349 // L: v2f16,ch = load <p>
5350 // e0: f16 = extractelt L:0, 0
5351 // e1: f16 = extractelt L:0, 1 <-- unused
5352 // store e0
5353 //
5354 // Can be optimized by DAGCombiner to:
5355 //
5356 // L: f16,ch = load <p>
5357 // store L:0
5358 return !U.getUser()->use_empty();
5359 }
5360
5361 // Otherwise, this use prevents us from splitting a value.
5362 return false;
5363 }))
5364 return SDValue();
5365
5366 auto *LD = cast<MemSDNode>(N);
5367 SDLoc DL(LD);
5368
5369 // the new opcode after we double the number of operands
5370 unsigned Opcode;
5371 SmallVector<SDValue> Operands(LD->ops());
5372 unsigned OldNumOutputs; // non-glue, non-chain outputs
5373 switch (LD->getOpcode()) {
5374 case ISD::LOAD:
5375 OldNumOutputs = 1;
5376 // Any packed type is legal, so the legalizer will not have lowered
5377 // ISD::LOAD -> NVPTXISD::Load (unless it's under-aligned). We have to do it
5378 // here.
5379 Opcode = NVPTXISD::LoadV2;
5380 Operands.push_back(DCI.DAG.getIntPtrConstant(
5381 cast<LoadSDNode>(LD)->getExtensionType(), DL));
5382 break;
5383 case NVPTXISD::LoadV2:
5384 OldNumOutputs = 2;
5385 Opcode = NVPTXISD::LoadV4;
5386 break;
5387 case NVPTXISD::LoadV4:
5388 // V8 is only supported for f32. Don't forget, we're not changing the load
5389 // size here. This is already a 256-bit load.
5390 if (ElementVT != MVT::v2f32)
5391 return SDValue();
5392 OldNumOutputs = 4;
5393 Opcode = NVPTXISD::LoadV8;
5394 break;
5395 case NVPTXISD::LoadV8:
5396 // PTX doesn't support the next doubling of outputs
5397 return SDValue();
5398 }
5399
5400 // the non-glue, non-chain outputs in the new load
5401 const unsigned NewNumOutputs = OldNumOutputs * 2;
5402 SmallVector<EVT> NewVTs(NewNumOutputs, ElementVT.getVectorElementType());
5403 // add remaining chain and glue values
5404 NewVTs.append(LD->value_begin() + OldNumOutputs, LD->value_end());
5405
5406 // Create the new load
5407 SDValue NewLoad = DCI.DAG.getMemIntrinsicNode(
5408 Opcode, DL, DCI.DAG.getVTList(NewVTs), Operands, LD->getMemoryVT(),
5409 LD->getMemOperand());
5410
5411 // Now we use a combination of BUILD_VECTORs and a MERGE_VALUES node to keep
5412 // the outputs the same. These nodes will be optimized away in later
5413 // DAGCombiner iterations.
5415 for (unsigned I : seq(OldNumOutputs))
5416 Results.push_back(DCI.DAG.getBuildVector(
5417 ElementVT, DL, {NewLoad.getValue(I * 2), NewLoad.getValue(I * 2 + 1)}));
5418 // Add remaining chain and glue nodes
5419 for (unsigned I : seq(NewLoad->getNumValues() - NewNumOutputs))
5420 Results.push_back(NewLoad.getValue(NewNumOutputs + I));
5421
5422 return DCI.DAG.getMergeValues(Results, DL);
5423}
5424
5425/// Fold packing movs into a store.
5426///
5427/// ex:
5428/// v1: v2f16 = BUILD_VECTOR a:f16, b:f16
5429/// v2: v2f16 = BUILD_VECTOR c:f16, d:f16
5430/// StoreV2 v1, v2
5431///
5432/// ...is turned into...
5433///
5434/// StoreV4 a, b, c, d
5437 unsigned Front, unsigned Back) {
5438 // We want to run this as late as possible since other optimizations may
5439 // eliminate the BUILD_VECTORs.
5440 if (!DCI.isAfterLegalizeDAG())
5441 return SDValue();
5442
5443 // Get the type of the operands being stored.
5444 EVT ElementVT = N->getOperand(Front).getValueType();
5445
5446 // Avoid non-packed types and v4i8
5447 if (!NVPTX::isPackedVectorTy(ElementVT) || ElementVT == MVT::v4i8)
5448 return SDValue();
5449
5450 auto *ST = cast<MemSDNode>(N);
5451
5452 // The new opcode after we double the number of operands.
5453 unsigned Opcode;
5454 switch (N->getOpcode()) {
5455 case ISD::STORE:
5456 // Any packed type is legal, so the legalizer will not have lowered
5457 // ISD::STORE -> NVPTXISD::Store (unless it's under-aligned). We have to do
5458 // it here.
5459 Opcode = NVPTXISD::StoreV2;
5460 break;
5461 case NVPTXISD::StoreV2:
5462 Opcode = NVPTXISD::StoreV4;
5463 break;
5464 case NVPTXISD::StoreV4:
5465 // V8 is only supported for f32. Don't forget, we're not changing the store
5466 // size here. This is already a 256-bit store.
5467 if (ElementVT != MVT::v2f32)
5468 return SDValue();
5469 Opcode = NVPTXISD::StoreV8;
5470 break;
5471 case NVPTXISD::StoreV8:
5472 // PTX doesn't support the next doubling of operands
5473 return SDValue();
5474 default:
5475 llvm_unreachable("Unhandled store opcode");
5476 }
5477
5478 // Scan the operands and if they're all BUILD_VECTORs, we'll have gathered
5479 // their elements.
5480 SmallVector<SDValue, 4> Operands(N->ops().take_front(Front));
5481 for (SDValue BV : N->ops().drop_front(Front).drop_back(Back)) {
5482 if (BV.getOpcode() != ISD::BUILD_VECTOR)
5483 return SDValue();
5484
5485 // If the operand has multiple uses, this optimization can increase register
5486 // pressure.
5487 if (!BV.hasOneUse())
5488 return SDValue();
5489
5490 // DAGCombiner visits nodes bottom-up. Check the BUILD_VECTOR operands for
5491 // any signs they may be folded by some other pattern or rule.
5492 for (SDValue Op : BV->ops()) {
5493 // Peek through bitcasts
5494 if (Op.getOpcode() == ISD::BITCAST)
5495 Op = Op.getOperand(0);
5496
5497 // This may be folded into a PRMT.
5498 if (Op.getValueType() == MVT::i16 && Op.getOpcode() == ISD::TRUNCATE &&
5499 Op->getOperand(0).getValueType() == MVT::i32)
5500 return SDValue();
5501
5502 // This may be folded into cvt.bf16x2
5503 if (Op.getOpcode() == ISD::FP_ROUND)
5504 return SDValue();
5505 }
5506 Operands.append({BV.getOperand(0), BV.getOperand(1)});
5507 }
5508 Operands.append(N->op_end() - Back, N->op_end());
5509
5510 // Now we replace the store
5511 return DCI.DAG.getMemIntrinsicNode(Opcode, SDLoc(N), N->getVTList(), Operands,
5512 ST->getMemoryVT(), ST->getMemOperand());
5513}
5514
5516 const NVPTXSubtarget &STI) {
5517
5518 if (DCI.isBeforeLegalize() && N->getOpcode() == ISD::STORE) {
5519 // Here is our chance to custom lower a store with a non-simple type.
5520 // Unfortunately, we can't do this in the legalizer because there is no
5521 // way to setOperationAction for an non-simple type.
5523 if (!ST->getValue().getValueType().isSimple())
5524 return lowerSTOREVector(SDValue(ST, 0), DCI.DAG, STI);
5525 }
5526
5527 return combinePackingMovIntoStore(N, DCI, 1, 2);
5528}
5529
5531 const NVPTXSubtarget &STI) {
5532 if (DCI.isBeforeLegalize() && N->getOpcode() == ISD::LOAD) {
5533 // Here is our chance to custom lower a load with a non-simple type.
5534 // Unfortunately, we can't do this in the legalizer because there is no
5535 // way to setOperationAction for an non-simple type.
5536 if (!N->getValueType(0).isSimple())
5537 return lowerLoadVector(N, DCI.DAG, STI);
5538 }
5539
5540 return combineUnpackingMovIntoLoad(N, DCI);
5541}
5542
5543/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
5544///
5547 CodeGenOptLevel OptLevel) {
5548 if (OptLevel == CodeGenOptLevel::None)
5549 return SDValue();
5550
5551 SDValue N0 = N->getOperand(0);
5552 SDValue N1 = N->getOperand(1);
5553
5554 // Skip non-integer, non-scalar case
5555 EVT VT = N0.getValueType();
5556 if (VT.isVector() || VT != MVT::i32)
5557 return SDValue();
5558
5559 // First try with the default operand order.
5560 if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI))
5561 return Result;
5562
5563 // If that didn't work, try again with the operands commuted.
5564 return PerformADDCombineWithOperands(N, N1, N0, DCI);
5565}
5566
5567/// PerformFADDCombine - Target-specific dag combine xforms for ISD::FADD.
5568///
5571 CodeGenOptLevel OptLevel) {
5572 SDValue N0 = N->getOperand(0);
5573 SDValue N1 = N->getOperand(1);
5574
5575 EVT VT = N0.getValueType();
5576 if (VT.isVector() || !(VT == MVT::f32 || VT == MVT::f64))
5577 return SDValue();
5578
5579 // First try with the default operand order.
5580 if (SDValue Result = PerformFADDCombineWithOperands(N, N0, N1, DCI, OptLevel))
5581 return Result;
5582
5583 // If that didn't work, try again with the operands commuted.
5584 return PerformFADDCombineWithOperands(N, N1, N0, DCI, OptLevel);
5585}
5586
5587/// Get 3-input version of a 2-input min/max opcode
5588static unsigned getMinMax3Opcode(unsigned MinMax2Opcode) {
5589 switch (MinMax2Opcode) {
5590 case ISD::FMAXNUM:
5591 case ISD::FMAXIMUMNUM:
5592 return NVPTXISD::FMAXNUM3;
5593 case ISD::FMINNUM:
5594 case ISD::FMINIMUMNUM:
5595 return NVPTXISD::FMINNUM3;
5596 case ISD::FMAXIMUM:
5597 return NVPTXISD::FMAXIMUM3;
5598 case ISD::FMINIMUM:
5599 return NVPTXISD::FMINIMUM3;
5600 default:
5601 llvm_unreachable("Invalid 2-input min/max opcode");
5602 }
5603}
5604
5605/// PerformFMinMaxCombine - Combine (fmaxnum (fmaxnum a, b), c) into
5606/// (fmaxnum3 a, b, c). Also covers other llvm min/max intrinsics.
5609 unsigned PTXVersion, unsigned SmVersion) {
5610
5611 // 3-input min/max requires PTX 8.8+ and SM_100+, and only supports f32s
5612 EVT VT = N->getValueType(0);
5613 if (VT != MVT::f32 || PTXVersion < 88 || SmVersion < 100)
5614 return SDValue();
5615
5616 SDValue Op0 = N->getOperand(0);
5617 SDValue Op1 = N->getOperand(1);
5618 unsigned MinMaxOp2 = N->getOpcode();
5619 unsigned MinMaxOp3 = getMinMax3Opcode(MinMaxOp2);
5620
5621 if (Op0.getOpcode() == MinMaxOp2 && Op0.hasOneUse()) {
5622 // (maxnum (maxnum a, b), c) -> (maxnum3 a, b, c)
5623 SDValue A = Op0.getOperand(0);
5624 SDValue B = Op0.getOperand(1);
5625 SDValue C = Op1;
5626 return DCI.DAG.getNode(MinMaxOp3, SDLoc(N), VT, A, B, C, N->getFlags());
5627 } else if (Op1.getOpcode() == MinMaxOp2 && Op1.hasOneUse()) {
5628 // (maxnum a, (maxnum b, c)) -> (maxnum3 a, b, c)
5629 SDValue A = Op0;
5630 SDValue B = Op1.getOperand(0);
5631 SDValue C = Op1.getOperand(1);
5632 return DCI.DAG.getNode(MinMaxOp3, SDLoc(N), VT, A, B, C, N->getFlags());
5633 }
5634 return SDValue();
5635}
5636
5639 CodeGenOptLevel OptLevel) {
5640 assert(N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM);
5641
5642 // Don't do anything at less than -O2.
5643 if (OptLevel < CodeGenOptLevel::Default)
5644 return SDValue();
5645
5646 SelectionDAG &DAG = DCI.DAG;
5647 SDLoc DL(N);
5648 EVT VT = N->getValueType(0);
5649 bool IsSigned = N->getOpcode() == ISD::SREM;
5650 unsigned DivOpc = IsSigned ? ISD::SDIV : ISD::UDIV;
5651
5652 const SDValue &Num = N->getOperand(0);
5653 const SDValue &Den = N->getOperand(1);
5654
5655 for (const SDNode *U : Num->users()) {
5656 if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&
5657 U->getOperand(1) == Den) {
5658 // Num % Den -> Num - (Num / Den) * Den
5659 return DAG.getNode(ISD::SUB, DL, VT, Num,
5660 DAG.getNode(ISD::MUL, DL, VT,
5661 DAG.getNode(DivOpc, DL, VT, Num, Den),
5662 Den));
5663 }
5664 }
5665 return SDValue();
5666}
5667
5668// (sign_extend|zero_extend (mul|shl) x, y) -> (mul.wide x, y)
5670 CodeGenOptLevel OptLevel) {
5671 if (OptLevel == CodeGenOptLevel::None)
5672 return SDValue();
5673
5674 SDValue Op = N->getOperand(0);
5675 if (!Op.hasOneUse())
5676 return SDValue();
5677 EVT ToVT = N->getValueType(0);
5678 EVT FromVT = Op.getValueType();
5679 if (!((ToVT == MVT::i32 && FromVT == MVT::i16) ||
5680 (ToVT == MVT::i64 && FromVT == MVT::i32)))
5681 return SDValue();
5682 if (!(Op.getOpcode() == ISD::MUL ||
5683 (Op.getOpcode() == ISD::SHL && isa<ConstantSDNode>(Op.getOperand(1)))))
5684 return SDValue();
5685
5686 SDLoc DL(N);
5687 unsigned ExtOpcode = N->getOpcode();
5688 unsigned Opcode = 0;
5689 if (ExtOpcode == ISD::SIGN_EXTEND && Op->getFlags().hasNoSignedWrap())
5690 Opcode = NVPTXISD::MUL_WIDE_SIGNED;
5691 else if (ExtOpcode == ISD::ZERO_EXTEND && Op->getFlags().hasNoUnsignedWrap())
5692 Opcode = NVPTXISD::MUL_WIDE_UNSIGNED;
5693 else
5694 return SDValue();
5695 SDValue RHS = Op.getOperand(1);
5696 if (Op.getOpcode() == ISD::SHL) {
5697 const auto ShiftAmt = Op.getConstantOperandVal(1);
5698 const auto MulVal = APInt(ToVT.getSizeInBits(), 1) << ShiftAmt;
5699 RHS = DCI.DAG.getConstant(MulVal, DL, ToVT);
5700 }
5701 return DCI.DAG.getNode(Opcode, DL, ToVT, Op.getOperand(0), RHS);
5702}
5703
5709
5710/// IsMulWideOperandDemotable - Checks if the provided DAG node is an operand
5711/// that can be demoted to \p OptSize bits without loss of information. The
5712/// signedness of the operand, if determinable, is placed in \p S.
5714 unsigned OptSize,
5715 OperandSignedness &S) {
5716 S = Unknown;
5717
5718 if (Op.getOpcode() == ISD::SIGN_EXTEND ||
5719 Op.getOpcode() == ISD::SIGN_EXTEND_INREG) {
5720 EVT OrigVT = Op.getOperand(0).getValueType();
5721 if (OrigVT.getFixedSizeInBits() <= OptSize) {
5722 S = Signed;
5723 return true;
5724 }
5725 } else if (Op.getOpcode() == ISD::ZERO_EXTEND) {
5726 EVT OrigVT = Op.getOperand(0).getValueType();
5727 if (OrigVT.getFixedSizeInBits() <= OptSize) {
5728 S = Unsigned;
5729 return true;
5730 }
5731 }
5732
5733 return false;
5734}
5735
5736/// AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can
5737/// be demoted to \p OptSize bits without loss of information. If the operands
5738/// contain a constant, it should appear as the RHS operand. The signedness of
5739/// the operands is placed in \p IsSigned.
5741 unsigned OptSize,
5742 bool &IsSigned) {
5743 OperandSignedness LHSSign;
5744
5745 // The LHS operand must be a demotable op
5746 if (!IsMulWideOperandDemotable(LHS, OptSize, LHSSign))
5747 return false;
5748
5749 // We should have been able to determine the signedness from the LHS
5750 if (LHSSign == Unknown)
5751 return false;
5752
5753 IsSigned = (LHSSign == Signed);
5754
5755 // The RHS can be a demotable op or a constant
5757 const APInt &Val = CI->getAPIntValue();
5758 if (LHSSign == Unsigned) {
5759 return Val.isIntN(OptSize);
5760 } else {
5761 return Val.isSignedIntN(OptSize);
5762 }
5763 } else {
5764 OperandSignedness RHSSign;
5765 if (!IsMulWideOperandDemotable(RHS, OptSize, RHSSign))
5766 return false;
5767
5768 return LHSSign == RHSSign;
5769 }
5770}
5771
5772/// TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply
5773/// of M/2 bits that produces an M-bit result (i.e. mul.wide). This transform
5774/// works on both multiply DAG nodes and SHL DAG nodes with a constant shift
5775/// amount.
5778 EVT MulType = N->getValueType(0);
5779 if (MulType != MVT::i32 && MulType != MVT::i64) {
5780 return SDValue();
5781 }
5782
5783 SDLoc DL(N);
5784 unsigned OptSize = MulType.getSizeInBits() >> 1;
5785 SDValue LHS = N->getOperand(0);
5786 SDValue RHS = N->getOperand(1);
5787
5788 // Canonicalize the multiply so the constant (if any) is on the right
5789 if (N->getOpcode() == ISD::MUL) {
5790 if (isa<ConstantSDNode>(LHS)) {
5791 std::swap(LHS, RHS);
5792 }
5793 }
5794
5795 // If we have a SHL, determine the actual multiply amount
5796 if (N->getOpcode() == ISD::SHL) {
5798 if (!ShlRHS) {
5799 return SDValue();
5800 }
5801
5802 APInt ShiftAmt = ShlRHS->getAPIntValue();
5803 unsigned BitWidth = MulType.getSizeInBits();
5804 if (ShiftAmt.sge(0) && ShiftAmt.slt(BitWidth)) {
5805 APInt MulVal = APInt(BitWidth, 1) << ShiftAmt;
5806 RHS = DCI.DAG.getConstant(MulVal, DL, MulType);
5807 } else {
5808 return SDValue();
5809 }
5810 }
5811
5812 bool Signed;
5813 // Verify that our operands are demotable
5814 if (!AreMulWideOperandsDemotable(LHS, RHS, OptSize, Signed)) {
5815 return SDValue();
5816 }
5817
5818 EVT DemotedVT;
5819 if (MulType == MVT::i32) {
5820 DemotedVT = MVT::i16;
5821 } else {
5822 DemotedVT = MVT::i32;
5823 }
5824
5825 // Truncate the operands to the correct size. Note that these are just for
5826 // type consistency and will (likely) be eliminated in later phases.
5827 SDValue TruncLHS =
5828 DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, LHS);
5829 SDValue TruncRHS =
5830 DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, RHS);
5831
5832 unsigned Opc;
5833 if (Signed) {
5834 Opc = NVPTXISD::MUL_WIDE_SIGNED;
5835 } else {
5836 Opc = NVPTXISD::MUL_WIDE_UNSIGNED;
5837 }
5838
5839 return DCI.DAG.getNode(Opc, DL, MulType, TruncLHS, TruncRHS);
5840}
5841
5842static bool isConstOne(const SDValue &Operand) {
5843 const auto *Const = dyn_cast<ConstantSDNode>(Operand);
5844 return Const && Const->getZExtValue() == 1;
5845}
5846
5848 if (Add->getOpcode() != ISD::ADD)
5849 return SDValue();
5850
5851 if (isConstOne(Add->getOperand(0)))
5852 return Add->getOperand(1);
5853
5854 if (isConstOne(Add->getOperand(1)))
5855 return Add->getOperand(0);
5856
5857 return SDValue();
5858}
5859
5862
5864 SDValue Mul = DCI.DAG.getNode(ISD::MUL, DL, VT, X, Y);
5865 return DCI.DAG.getNode(ISD::ADD, DL, VT, Mul, X);
5866 }
5867
5868 return SDValue();
5869}
5870
5872 SDLoc DL,
5874 if (Select->getOpcode() != ISD::SELECT)
5875 return SDValue();
5876
5877 SDValue Cond = Select->getOperand(0);
5878
5879 unsigned ConstOpNo;
5880 if (isConstOne(Select->getOperand(1)))
5881 ConstOpNo = 1;
5882 else if (isConstOne(Select->getOperand(2)))
5883 ConstOpNo = 2;
5884 else
5885 return SDValue();
5886
5887 SDValue Y = Select->getOperand((ConstOpNo == 1) ? 2 : 1);
5888
5889 // Do not combine if the resulting sequence is not obviously profitable.
5891 return SDValue();
5892
5893 SDValue NewMul = DCI.DAG.getNode(ISD::MUL, DL, VT, X, Y);
5894
5895 return DCI.DAG.getNode(ISD::SELECT, DL, VT, Cond,
5896 (ConstOpNo == 1) ? X : NewMul,
5897 (ConstOpNo == 1) ? NewMul : X);
5898}
5899
5900static SDValue
5903
5904 EVT VT = N0.getValueType();
5905 if (VT.isVector())
5906 return SDValue();
5907
5908 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
5909 return SDValue();
5910
5911 SDLoc DL(N);
5912
5913 // (mul x, (add y, 1)) -> (add (mul x, y), x)
5914 if (SDValue Res = combineMADConstOne(N0, N1, VT, DL, DCI))
5915 return Res;
5916 if (SDValue Res = combineMADConstOne(N1, N0, VT, DL, DCI))
5917 return Res;
5918
5919 // (mul x, (select y, 1)) -> (select (mul x, y), x)
5920 if (SDValue Res = combineMulSelectConstOne(N0, N1, VT, DL, DCI))
5921 return Res;
5922 if (SDValue Res = combineMulSelectConstOne(N1, N0, VT, DL, DCI))
5923 return Res;
5924
5925 return SDValue();
5926}
5927
5928/// PerformMULCombine - Runs PTX-specific DAG combine patterns on MUL nodes.
5931 CodeGenOptLevel OptLevel) {
5932 if (OptLevel == CodeGenOptLevel::None)
5933 return SDValue();
5934
5935 if (SDValue Ret = TryMULWIDECombine(N, DCI))
5936 return Ret;
5937
5938 SDValue N0 = N->getOperand(0);
5939 SDValue N1 = N->getOperand(1);
5940 return PerformMULCombineWithOperands(N, N0, N1, DCI);
5941}
5942
5943/// PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
5946 CodeGenOptLevel OptLevel) {
5947 if (OptLevel > CodeGenOptLevel::None) {
5948 // Try mul.wide combining at OptLevel > 0
5949 if (SDValue Ret = TryMULWIDECombine(N, DCI))
5950 return Ret;
5951 }
5952
5953 return SDValue();
5954}
5955
5958 unsigned int SmVersion) {
5959 EVT CCType = N->getValueType(0);
5960 SDValue A = N->getOperand(0);
5961 SDValue B = N->getOperand(1);
5962
5963 EVT AType = A.getValueType();
5964 if (!(CCType == MVT::v2i1 && (AType == MVT::v2f16 || AType == MVT::v2bf16)))
5965 return SDValue();
5966
5967 if (A.getValueType() == MVT::v2bf16 && SmVersion < 90)
5968 return SDValue();
5969
5970 SDLoc DL(N);
5971 // setp.f16x2 returns two scalar predicates, which we need to
5972 // convert back to v2i1. The returned result will be scalarized by
5973 // the legalizer, but the comparison will remain a single vector
5974 // instruction.
5975 SDValue CCNode = DCI.DAG.getNode(
5976 A.getValueType() == MVT::v2f16 ? NVPTXISD::SETP_F16X2
5978 DL, DCI.DAG.getVTList(MVT::i1, MVT::i1), {A, B, N->getOperand(2)});
5979 return DCI.DAG.getNode(ISD::BUILD_VECTOR, DL, CCType, CCNode.getValue(0),
5980 CCNode.getValue(1));
5981}
5982
5985 SDValue Vector = N->getOperand(0);
5986 if (Vector->getOpcode() == ISD::FREEZE)
5987 Vector = Vector->getOperand(0);
5988 SDLoc DL(N);
5989 EVT VectorVT = Vector.getValueType();
5990 if (Vector->getOpcode() == ISD::LOAD && VectorVT.isSimple() &&
5991 IsPTXVectorType(VectorVT.getSimpleVT()))
5992 return SDValue(); // Native vector loads already combine nicely w/
5993 // extract_vector_elt.
5994 // Don't mess with singletons or packed types (v2*32, v2*16, v4i8 and v8i8),
5995 // we already handle them OK.
5996 if (VectorVT.getVectorNumElements() == 1 ||
5997 NVPTX::isPackedVectorTy(VectorVT) || VectorVT == MVT::v8i8)
5998 return SDValue();
5999
6000 // Don't mess with undef values as sra may be simplified to 0, not undef.
6001 if (Vector->isUndef() || ISD::allOperandsUndef(Vector.getNode()))
6002 return SDValue();
6003
6004 uint64_t VectorBits = VectorVT.getSizeInBits();
6005 // We only handle the types we can extract in-register.
6006 if (!(VectorBits == 16 || VectorBits == 32 || VectorBits == 64))
6007 return SDValue();
6008
6009 ConstantSDNode *Index = dyn_cast<ConstantSDNode>(N->getOperand(1));
6010 // Index == 0 is handled by generic DAG combiner.
6011 if (!Index || Index->getZExtValue() == 0)
6012 return SDValue();
6013
6014 MVT IVT = MVT::getIntegerVT(VectorBits);
6015 EVT EltVT = VectorVT.getVectorElementType();
6016 EVT EltIVT = EltVT.changeTypeToInteger();
6017 uint64_t EltBits = EltVT.getScalarSizeInBits();
6018
6019 SDValue Result = DCI.DAG.getNode(
6020 ISD::TRUNCATE, DL, EltIVT,
6021 DCI.DAG.getNode(
6022 ISD::SRA, DL, IVT, DCI.DAG.getNode(ISD::BITCAST, DL, IVT, Vector),
6023 DCI.DAG.getConstant(Index->getZExtValue() * EltBits, DL, IVT)));
6024
6025 // If element has non-integer type, bitcast it back to the expected type.
6026 if (EltVT != EltIVT)
6027 Result = DCI.DAG.getNode(ISD::BITCAST, DL, EltVT, Result);
6028 // Past legalizer, we may need to extent i8 -> i16 to match the register type.
6029 if (EltVT != N->getValueType(0))
6030 Result = DCI.DAG.getNode(ISD::ANY_EXTEND, DL, N->getValueType(0), Result);
6031
6032 return Result;
6033}
6034
6037 SDValue VA = N->getOperand(1);
6038 EVT VectorVT = VA.getValueType();
6039 if (VectorVT != MVT::v4i8)
6040 return SDValue();
6041
6042 // We need to split vselect into individual per-element operations Because we
6043 // use BFE/BFI instruction for byte extraction/insertion, we do end up with
6044 // 32-bit values, so we may as well do comparison as i32 to avoid conversions
6045 // to/from i16 normally used for i8 values.
6047 SDLoc DL(N);
6048 SDValue VCond = N->getOperand(0);
6049 SDValue VB = N->getOperand(2);
6050 for (int I = 0; I < 4; ++I) {
6051 SDValue C = DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i1, VCond,
6052 DCI.DAG.getConstant(I, DL, MVT::i32));
6053 SDValue EA = DCI.DAG.getAnyExtOrTrunc(
6054 DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i8, VA,
6055 DCI.DAG.getConstant(I, DL, MVT::i32)),
6056 DL, MVT::i32);
6057 SDValue EB = DCI.DAG.getAnyExtOrTrunc(
6058 DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i8, VB,
6059 DCI.DAG.getConstant(I, DL, MVT::i32)),
6060 DL, MVT::i32);
6061 E.push_back(DCI.DAG.getAnyExtOrTrunc(
6062 DCI.DAG.getNode(ISD::SELECT, DL, MVT::i32, C, EA, EB), DL, MVT::i8));
6063 }
6064 return DCI.DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i8, E);
6065}
6066
6067static SDValue
6069 auto VT = N->getValueType(0);
6070 if (!DCI.isAfterLegalizeDAG() ||
6071 // only process v2*16 types
6072 !(NVPTX::isPackedVectorTy(VT) && VT.is32BitVector() &&
6073 VT.getVectorNumElements() == 2))
6074 return SDValue();
6075
6076 auto Op0 = N->getOperand(0);
6077 auto Op1 = N->getOperand(1);
6078
6079 // Start out by assuming we want to take the lower 2 bytes of each i32
6080 // operand.
6081 uint64_t Op0Bytes = 0x10;
6082 uint64_t Op1Bytes = 0x54;
6083
6084 std::pair<SDValue *, uint64_t *> OpData[2] = {{&Op0, &Op0Bytes},
6085 {&Op1, &Op1Bytes}};
6086
6087 // Check that each operand is an i16, truncated from an i32 operand. We'll
6088 // select individual bytes from those original operands. Optionally, fold in a
6089 // shift right of that original operand.
6090 for (auto &[Op, OpBytes] : OpData) {
6091 // Eat up any bitcast
6092 if (Op->getOpcode() == ISD::BITCAST)
6093 *Op = Op->getOperand(0);
6094
6095 if (!(Op->getValueType() == MVT::i16 && Op->getOpcode() == ISD::TRUNCATE &&
6096 Op->getOperand(0).getValueType() == MVT::i32))
6097 return SDValue();
6098
6099 // If the truncate has multiple uses, this optimization can increase
6100 // register pressure
6101 if (!Op->hasOneUse())
6102 return SDValue();
6103
6104 *Op = Op->getOperand(0);
6105
6106 // Optionally, fold in a shift-right of the original operand and let permute
6107 // pick the two higher bytes of the original value directly.
6108 if (Op->getOpcode() == ISD::SRL && isa<ConstantSDNode>(Op->getOperand(1))) {
6109 if (cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue() == 16) {
6110 // Shift the PRMT byte selector to pick upper bytes from each respective
6111 // value, instead of the lower ones: 0x10 -> 0x32, 0x54 -> 0x76
6112 assert((*OpBytes == 0x10 || *OpBytes == 0x54) &&
6113 "PRMT selector values out of range");
6114 *OpBytes += 0x22;
6115 *Op = Op->getOperand(0);
6116 }
6117 }
6118 }
6119
6120 SDLoc DL(N);
6121 auto &DAG = DCI.DAG;
6122
6123 auto PRMT =
6124 getPRMT(DAG.getBitcast(MVT::i32, Op0), DAG.getBitcast(MVT::i32, Op1),
6125 (Op1Bytes << 8) | Op0Bytes, DL, DAG);
6126 return DAG.getBitcast(VT, PRMT);
6127}
6128
6131 auto *ASCN1 = cast<AddrSpaceCastSDNode>(N);
6132
6133 if (auto *ASCN2 = dyn_cast<AddrSpaceCastSDNode>(ASCN1->getOperand(0))) {
6134 assert(ASCN2->getDestAddressSpace() == ASCN1->getSrcAddressSpace());
6135
6136 // Fold asc[B -> A](asc[A -> B](x)) -> x
6137 if (ASCN1->getDestAddressSpace() == ASCN2->getSrcAddressSpace())
6138 return ASCN2->getOperand(0);
6139 }
6140
6141 return SDValue();
6142}
6143
6144// Given a constant selector value and a prmt mode, return the selector value
6145// normalized to the generic prmt mode. See the PTX ISA documentation for more
6146// details:
6147// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-prmt
6148static APInt getPRMTSelector(const APInt &Selector, unsigned Mode) {
6149 assert(Selector.getBitWidth() == 32 && "PRMT must have i32 operands");
6150
6152 return Selector;
6153
6154 const unsigned V = Selector.trunc(2).getZExtValue();
6155
6156 const auto GetSelector = [](unsigned S0, unsigned S1, unsigned S2,
6157 unsigned S3) {
6158 return APInt(32, S0 | (S1 << 4) | (S2 << 8) | (S3 << 12));
6159 };
6160
6161 switch (Mode) {
6163 return GetSelector(V, V + 1, V + 2, V + 3);
6165 return GetSelector(V, (V - 1) & 7, (V - 2) & 7, (V - 3) & 7);
6167 return GetSelector(V, V, V, V);
6169 return GetSelector(V, std::max(V, 1U), std::max(V, 2U), 3U);
6171 return GetSelector(0, std::min(V, 1U), std::min(V, 2U), V);
6173 unsigned V1 = (V & 1) << 1;
6174 return GetSelector(V1, V1 + 1, V1, V1 + 1);
6175 }
6176 default:
6177 llvm_unreachable("Invalid PRMT mode");
6178 }
6179}
6180
6181static APInt computePRMT(APInt A, APInt B, APInt Selector, unsigned Mode) {
6182 assert(A.getBitWidth() == 32 && B.getBitWidth() == 32 &&
6183 Selector.getBitWidth() == 32 && "PRMT must have i32 operands");
6184 // {b, a} = {{b7, b6, b5, b4}, {b3, b2, b1, b0}}
6185 APInt BitField = B.concat(A);
6186 APInt SelectorVal = getPRMTSelector(Selector, Mode);
6187 APInt Result(32, 0);
6188 for (unsigned I : llvm::seq(4U)) {
6189 APInt Sel = SelectorVal.extractBits(4, I * 4);
6190 unsigned Idx = Sel.getLoBits(3).getZExtValue();
6191 unsigned Sign = Sel.getHiBits(1).getZExtValue();
6192 APInt Byte = BitField.extractBits(8, Idx * 8);
6193 if (Sign)
6194 Byte = Byte.ashr(8);
6195 Result.insertBits(Byte, I * 8);
6196 }
6197 return Result;
6198}
6199
6201 CodeGenOptLevel OptLevel) {
6202 if (OptLevel == CodeGenOptLevel::None)
6203 return SDValue();
6204
6205 // Constant fold PRMT
6206 if (isa<ConstantSDNode>(N->getOperand(0)) &&
6207 isa<ConstantSDNode>(N->getOperand(1)) &&
6208 isa<ConstantSDNode>(N->getOperand(2)))
6209 return DCI.DAG.getConstant(computePRMT(N->getConstantOperandAPInt(0),
6210 N->getConstantOperandAPInt(1),
6211 N->getConstantOperandAPInt(2),
6212 N->getConstantOperandVal(3)),
6213 SDLoc(N), N->getValueType(0));
6214 return SDValue();
6215}
6216
6217// During call lowering we wrap the return values in a ProxyReg node which
6218// depend on the chain value produced by the completed call. This ensures that
6219// the full call is emitted in cases where libcalls are used to legalize
6220// operations. To improve the functioning of other DAG combines we pull all
6221// operations we can through one of these nodes, ensuring that the ProxyReg
6222// directly wraps a load. That is:
6223//
6224// (ProxyReg (zext (load retval0))) => (zext (ProxyReg (load retval0)))
6225//
6228 switch (R.getOpcode()) {
6229 case ISD::TRUNCATE:
6230 case ISD::ANY_EXTEND:
6231 case ISD::SIGN_EXTEND:
6232 case ISD::ZERO_EXTEND:
6233 case ISD::BITCAST: {
6234 if (SDValue V = sinkProxyReg(R.getOperand(0), Chain, DCI))
6235 return DCI.DAG.getNode(R.getOpcode(), SDLoc(R), R.getValueType(), V);
6236 return SDValue();
6237 }
6238 case ISD::SHL:
6239 case ISD::SRL:
6240 case ISD::SRA:
6241 case ISD::OR: {
6242 if (SDValue A = sinkProxyReg(R.getOperand(0), Chain, DCI))
6243 if (SDValue B = sinkProxyReg(R.getOperand(1), Chain, DCI))
6244 return DCI.DAG.getNode(R.getOpcode(), SDLoc(R), R.getValueType(), A, B);
6245 return SDValue();
6246 }
6247 case ISD::Constant:
6248 return R;
6249 case ISD::LOAD:
6250 case NVPTXISD::LoadV2:
6251 case NVPTXISD::LoadV4: {
6252 return DCI.DAG.getNode(NVPTXISD::ProxyReg, SDLoc(R), R.getValueType(),
6253 {Chain, R});
6254 }
6255 case ISD::BUILD_VECTOR: {
6256 if (DCI.isBeforeLegalize())
6257 return SDValue();
6258
6260 for (auto &Op : R->ops()) {
6261 SDValue V = sinkProxyReg(Op, Chain, DCI);
6262 if (!V)
6263 return SDValue();
6264 Ops.push_back(V);
6265 }
6266 return DCI.DAG.getNode(ISD::BUILD_VECTOR, SDLoc(R), R.getValueType(), Ops);
6267 }
6269 if (DCI.isBeforeLegalize())
6270 return SDValue();
6271
6272 if (SDValue V = sinkProxyReg(R.getOperand(0), Chain, DCI))
6274 R.getValueType(), V, R.getOperand(1));
6275 return SDValue();
6276 }
6277 default:
6278 return SDValue();
6279 }
6280}
6281
6284
6285 SDValue Chain = N->getOperand(0);
6286 SDValue Reg = N->getOperand(1);
6287
6288 // If the ProxyReg is not wrapping a load, try to pull the operations through
6289 // the ProxyReg.
6290 if (Reg.getOpcode() != ISD::LOAD) {
6291 if (SDValue V = sinkProxyReg(Reg, Chain, DCI))
6292 return V;
6293 }
6294
6295 return SDValue();
6296}
6297
6298SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
6299 DAGCombinerInfo &DCI) const {
6301 switch (N->getOpcode()) {
6302 default:
6303 break;
6304 case ISD::ADD:
6305 return PerformADDCombine(N, DCI, OptLevel);
6306 case ISD::ADDRSPACECAST:
6307 return combineADDRSPACECAST(N, DCI);
6308 case ISD::SIGN_EXTEND:
6309 case ISD::ZERO_EXTEND:
6310 return combineMulWide(N, DCI, OptLevel);
6311 case ISD::BUILD_VECTOR:
6312 return PerformBUILD_VECTORCombine(N, DCI);
6314 return PerformEXTRACTCombine(N, DCI);
6315 case ISD::FADD:
6316 return PerformFADDCombine(N, DCI, OptLevel);
6317 case ISD::FMAXNUM:
6318 case ISD::FMINNUM:
6319 case ISD::FMAXIMUM:
6320 case ISD::FMINIMUM:
6321 case ISD::FMAXIMUMNUM:
6322 case ISD::FMINIMUMNUM:
6323 return PerformFMinMaxCombine(N, DCI, STI.getPTXVersion(),
6324 STI.getSmVersion());
6325 case ISD::LOAD:
6326 case NVPTXISD::LoadV2:
6327 case NVPTXISD::LoadV4:
6328 return combineLOAD(N, DCI, STI);
6329 case ISD::MUL:
6330 return PerformMULCombine(N, DCI, OptLevel);
6331 case NVPTXISD::PRMT:
6332 return combinePRMT(N, DCI, OptLevel);
6333 case NVPTXISD::ProxyReg:
6334 return combineProxyReg(N, DCI);
6335 case ISD::SETCC:
6336 return PerformSETCCCombine(N, DCI, STI.getSmVersion());
6337 case ISD::SHL:
6338 return PerformSHLCombine(N, DCI, OptLevel);
6339 case ISD::SREM:
6340 case ISD::UREM:
6341 return PerformREMCombine(N, DCI, OptLevel);
6342 case ISD::STORE:
6343 case NVPTXISD::StoreV2:
6344 case NVPTXISD::StoreV4:
6345 return combineSTORE(N, DCI, STI);
6346 case ISD::VSELECT:
6347 return PerformVSELECTCombine(N, DCI);
6348 }
6349 return SDValue();
6350}
6351
6354 // Handle bitcasting to v2i8 without hitting the default promotion
6355 // strategy which goes through stack memory.
6356 SDValue Op(Node, 0);
6357 EVT ToVT = Op->getValueType(0);
6358 if (ToVT != MVT::v2i8) {
6359 return;
6360 }
6361
6362 // Bitcast to i16 and unpack elements into a vector
6363 SDLoc DL(Node);
6364 SDValue AsInt = DAG.getBitcast(MVT::i16, Op->getOperand(0));
6365 SDValue Vec0 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, AsInt);
6366 SDValue Const8 = DAG.getConstant(8, DL, MVT::i16);
6367 SDValue Vec1 =
6368 DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
6369 DAG.getNode(ISD::SRL, DL, MVT::i16, {AsInt, Const8}));
6370 Results.push_back(
6371 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i8, {Vec0, Vec1}));
6372}
6373
6376 SDValue Chain = N->getOperand(0);
6377 SDValue Intrin = N->getOperand(1);
6378 SDLoc DL(N);
6379
6380 // Get the intrinsic ID
6381 unsigned IntrinNo = Intrin.getNode()->getAsZExtVal();
6382 switch (IntrinNo) {
6383 default:
6384 return;
6385 case Intrinsic::nvvm_ldu_global_i:
6386 case Intrinsic::nvvm_ldu_global_f:
6387 case Intrinsic::nvvm_ldu_global_p: {
6388 EVT ResVT = N->getValueType(0);
6389
6390 if (ResVT.isVector()) {
6391 // Vector LDG/LDU
6392
6393 unsigned NumElts = ResVT.getVectorNumElements();
6394 EVT EltVT = ResVT.getVectorElementType();
6395
6396 // Since LDU/LDG are target nodes, we cannot rely on DAG type
6397 // legalization.
6398 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
6399 // loaded type to i16 and propagate the "real" type as the memory type.
6400 bool NeedTrunc = false;
6401 if (EltVT.getSizeInBits() < 16) {
6402 EltVT = MVT::i16;
6403 NeedTrunc = true;
6404 }
6405
6406 unsigned Opcode = 0;
6407 SDVTList LdResVTs;
6408
6409 switch (NumElts) {
6410 default:
6411 return;
6412 case 2:
6413 Opcode = NVPTXISD::LDUV2;
6414 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
6415 break;
6416 case 4: {
6417 Opcode = NVPTXISD::LDUV4;
6418 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
6419 LdResVTs = DAG.getVTList(ListVTs);
6420 break;
6421 }
6422 }
6423
6424 SmallVector<SDValue, 8> OtherOps;
6425
6426 // Copy regular operands
6427
6428 OtherOps.push_back(Chain); // Chain
6429 // Skip operand 1 (intrinsic ID)
6430 // Others
6431 OtherOps.append(N->op_begin() + 2, N->op_end());
6432
6434
6435 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
6436 MemSD->getMemoryVT(),
6437 MemSD->getMemOperand());
6438
6439 SmallVector<SDValue, 4> ScalarRes;
6440
6441 for (unsigned i = 0; i < NumElts; ++i) {
6442 SDValue Res = NewLD.getValue(i);
6443 if (NeedTrunc)
6444 Res =
6445 DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
6446 ScalarRes.push_back(Res);
6447 }
6448
6449 SDValue LoadChain = NewLD.getValue(NumElts);
6450
6451 SDValue BuildVec =
6452 DAG.getBuildVector(ResVT, DL, ScalarRes);
6453
6454 Results.push_back(BuildVec);
6455 Results.push_back(LoadChain);
6456 } else {
6457 // i8 LDG/LDU
6458 assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
6459 "Custom handling of non-i8 ldu/ldg?");
6460
6461 // Just copy all operands as-is
6463
6464 // Force output to i16
6465 SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
6466
6468
6469 // We make sure the memory type is i8, which will be used during isel
6470 // to select the proper instruction.
6471 SDValue NewLD =
6473 MVT::i8, MemSD->getMemOperand());
6474
6475 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
6476 NewLD.getValue(0)));
6477 Results.push_back(NewLD.getValue(1));
6478 }
6479 return;
6480 }
6481
6482 case Intrinsic::nvvm_tcgen05_ld_16x64b_x4:
6483 case Intrinsic::nvvm_tcgen05_ld_16x64b_x8:
6484 case Intrinsic::nvvm_tcgen05_ld_16x64b_x16:
6485 case Intrinsic::nvvm_tcgen05_ld_16x64b_x32:
6486 case Intrinsic::nvvm_tcgen05_ld_16x64b_x64:
6487 case Intrinsic::nvvm_tcgen05_ld_16x64b_x128:
6488 case Intrinsic::nvvm_tcgen05_ld_32x32b_x4:
6489 case Intrinsic::nvvm_tcgen05_ld_32x32b_x8:
6490 case Intrinsic::nvvm_tcgen05_ld_32x32b_x16:
6491 case Intrinsic::nvvm_tcgen05_ld_32x32b_x32:
6492 case Intrinsic::nvvm_tcgen05_ld_32x32b_x64:
6493 case Intrinsic::nvvm_tcgen05_ld_32x32b_x128:
6494 case Intrinsic::nvvm_tcgen05_ld_16x128b_x2:
6495 case Intrinsic::nvvm_tcgen05_ld_16x128b_x4:
6496 case Intrinsic::nvvm_tcgen05_ld_16x128b_x8:
6497 case Intrinsic::nvvm_tcgen05_ld_16x128b_x16:
6498 case Intrinsic::nvvm_tcgen05_ld_16x128b_x32:
6499 case Intrinsic::nvvm_tcgen05_ld_16x128b_x64:
6500 case Intrinsic::nvvm_tcgen05_ld_16x256b_x1:
6501 case Intrinsic::nvvm_tcgen05_ld_16x256b_x2:
6502 case Intrinsic::nvvm_tcgen05_ld_16x256b_x4:
6503 case Intrinsic::nvvm_tcgen05_ld_16x256b_x8:
6504 case Intrinsic::nvvm_tcgen05_ld_16x256b_x16:
6505 case Intrinsic::nvvm_tcgen05_ld_16x256b_x32:
6506 if (auto Res = lowerTcgen05Ld(N, DAG)) {
6507 Results.push_back(Res->first);
6508 Results.push_back(Res->second);
6509 }
6510 return;
6511
6512 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4:
6513 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8:
6514 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16:
6515 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32:
6516 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64:
6517 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128:
6518 if (auto Res = lowerTcgen05Ld(N, DAG, /*HasOffset=*/true)) {
6519 Results.push_back(Res->first);
6520 Results.push_back(Res->second);
6521 }
6522 return;
6523 }
6524}
6525
6528 // Change the CopyFromReg to output 2 64-bit results instead of a 128-bit
6529 // result so that it can pass the legalization
6530 SDLoc DL(N);
6531 SDValue Chain = N->getOperand(0);
6532 SDValue Reg = N->getOperand(1);
6533 SDValue Glue = N->getOperand(2);
6534
6535 assert(Reg.getValueType() == MVT::i128 &&
6536 "Custom lowering for CopyFromReg with 128-bit reg only");
6537 SmallVector<EVT, 4> ResultsType = {MVT::i64, MVT::i64, N->getValueType(1),
6538 N->getValueType(2)};
6539 SmallVector<SDValue, 3> NewOps = {Chain, Reg, Glue};
6540
6541 SDValue NewValue = DAG.getNode(ISD::CopyFromReg, DL, ResultsType, NewOps);
6542 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128,
6543 {NewValue.getValue(0), NewValue.getValue(1)});
6544
6545 Results.push_back(Pair);
6546 Results.push_back(NewValue.getValue(2));
6547 Results.push_back(NewValue.getValue(3));
6548}
6549
6551 const TargetLowering &TLI,
6553 SDValue Chain = N->getOperand(0);
6554 SDValue Reg = N->getOperand(1);
6555
6556 MVT VT = TLI.getRegisterType(*DAG.getContext(), Reg.getValueType());
6557
6558 SDValue NewReg = DAG.getAnyExtOrTrunc(Reg, SDLoc(N), VT);
6559 SDValue NewProxy =
6560 DAG.getNode(NVPTXISD::ProxyReg, SDLoc(N), VT, {Chain, NewReg});
6561 SDValue Res = DAG.getAnyExtOrTrunc(NewProxy, SDLoc(N), N->getValueType(0));
6562
6563 Results.push_back(Res);
6564}
6565
6567 const NVPTXSubtarget &STI,
6569 assert(N->getValueType(0) == MVT::i128 &&
6570 "Custom lowering for atomic128 only supports i128");
6571
6573 SDLoc dl(N);
6574
6575 if (!STI.hasAtomSwap128()) {
6578 "Support for b128 atomics introduced in PTX ISA version 8.3 and "
6579 "requires target sm_90.",
6580 dl.getDebugLoc()));
6581
6582 Results.push_back(DAG.getUNDEF(MVT::i128));
6583 Results.push_back(AN->getOperand(0)); // Chain
6584 return;
6585 }
6586
6588 Ops.push_back(AN->getOperand(0)); // Chain
6589 Ops.push_back(AN->getOperand(1)); // Ptr
6590 for (const auto &Op : AN->ops().drop_front(2)) {
6591 // Low part
6592 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i64, Op,
6593 DAG.getIntPtrConstant(0, dl)));
6594 // High part
6595 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i64, Op,
6596 DAG.getIntPtrConstant(1, dl)));
6597 }
6598 unsigned Opcode = N->getOpcode() == ISD::ATOMIC_SWAP
6601 SDVTList Tys = DAG.getVTList(MVT::i64, MVT::i64, MVT::Other);
6602 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, MVT::i128,
6603 AN->getMemOperand());
6604 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i128,
6605 {Result.getValue(0), Result.getValue(1)}));
6606 Results.push_back(Result.getValue(2));
6607}
6608
6609void NVPTXTargetLowering::ReplaceNodeResults(
6611 switch (N->getOpcode()) {
6612 default:
6613 report_fatal_error("Unhandled custom legalization");
6614 case ISD::BITCAST:
6615 ReplaceBITCAST(N, DAG, Results);
6616 return;
6617 case ISD::LOAD:
6618 replaceLoadVector(N, DAG, Results, STI);
6619 return;
6622 return;
6623 case ISD::CopyFromReg:
6625 return;
6626 case NVPTXISD::ProxyReg:
6627 replaceProxyReg(N, DAG, *this, Results);
6628 return;
6629 case ISD::ATOMIC_CMP_SWAP:
6630 case ISD::ATOMIC_SWAP:
6631 replaceAtomicSwap128(N, DAG, STI, Results);
6632 return;
6633 }
6634}
6635
6638 Type *Ty = AI->getValOperand()->getType();
6639
6640 if (AI->isFloatingPointOperation()) {
6642 if (Ty->isHalfTy() && STI.getSmVersion() >= 70 &&
6643 STI.getPTXVersion() >= 63)
6645 if (Ty->isBFloatTy() && STI.getSmVersion() >= 90 &&
6646 STI.getPTXVersion() >= 78)
6648 if (Ty->isFloatTy())
6650 if (Ty->isDoubleTy() && STI.hasAtomAddF64())
6652 }
6654 }
6655
6656 assert(Ty->isIntegerTy() && "Ty should be integer at this point");
6657 const unsigned BitWidth = cast<IntegerType>(Ty)->getBitWidth();
6658
6659 switch (AI->getOperation()) {
6660 default:
6663 if (BitWidth == 128)
6665 [[fallthrough]];
6669 switch (BitWidth) {
6670 case 8:
6671 case 16:
6673 case 32:
6675 case 64:
6676 if (STI.hasAtomBitwise64())
6679 case 128:
6681 default:
6682 llvm_unreachable("unsupported width encountered");
6683 }
6690 switch (BitWidth) {
6691 case 8:
6692 case 16:
6694 case 32:
6696 case 64:
6697 if (STI.hasAtomMinMax64())
6700 case 128:
6702 default:
6703 llvm_unreachable("unsupported width encountered");
6704 }
6707 switch (BitWidth) {
6708 case 32:
6710 case 8:
6711 case 16:
6712 case 64:
6713 case 128:
6715 default:
6716 llvm_unreachable("unsupported width encountered");
6717 }
6718 }
6719
6721}
6722
6724 const Instruction *I) const {
6725 auto *CI = dyn_cast<AtomicCmpXchgInst>(I);
6726 // When CAS bitwidth is not supported on the hardware, the CAS is emulated
6727 // using a retry loop that uses a higher-bitwidth monotonic CAS. We enforce
6728 // the memory order using explicit fences around the retry loop.
6729 // The memory order of natively supported CAS operations can be enforced
6730 // by lowering to an atom.cas with the right memory synchronizing effect.
6731 // However, atom.cas only supports relaxed, acquire, release and acq_rel.
6732 // So we also use explicit fences for enforcing memory order for
6733 // seq_cast CAS with natively-supported bitwidths.
6734 return CI &&
6735 (cast<IntegerType>(CI->getCompareOperand()->getType())->getBitWidth() <
6736 STI.getMinCmpXchgSizeInBits() ||
6737 CI->getMergedOrdering() == AtomicOrdering::SequentiallyConsistent);
6738}
6739
6741 const Instruction *I) const {
6742 auto *CI = dyn_cast<AtomicCmpXchgInst>(I);
6743 bool BitwidthSupportedAndIsSeqCst =
6744 CI && CI->getMergedOrdering() == AtomicOrdering::SequentiallyConsistent &&
6745 cast<IntegerType>(CI->getCompareOperand()->getType())->getBitWidth() >=
6746 STI.getMinCmpXchgSizeInBits();
6747 return BitwidthSupportedAndIsSeqCst ? AtomicOrdering::Acquire
6749}
6750
6752 Instruction *Inst,
6753 AtomicOrdering Ord) const {
6754 if (!isa<AtomicCmpXchgInst>(Inst))
6755 return TargetLoweringBase::emitLeadingFence(Builder, Inst, Ord);
6756
6757 // Specialize for cmpxchg
6758 // Emit a fence.sc leading fence for cmpxchg seq_cst which are not emulated
6759 SyncScope::ID SSID = cast<AtomicCmpXchgInst>(Inst)->getSyncScopeID();
6760 if (isReleaseOrStronger(Ord))
6761 return Builder.CreateFence(Ord == AtomicOrdering::SequentiallyConsistent
6762 ? Ord
6764 SSID);
6765
6766 return nullptr;
6767}
6768
6770 Instruction *Inst,
6771 AtomicOrdering Ord) const {
6772 // Specialize for cmpxchg
6773 if (!isa<AtomicCmpXchgInst>(Inst))
6774 return TargetLoweringBase::emitTrailingFence(Builder, Inst, Ord);
6775
6776 auto *CI = cast<AtomicCmpXchgInst>(Inst);
6777 auto CASWidth =
6778 cast<IntegerType>(CI->getCompareOperand()->getType())->getBitWidth();
6779 SyncScope::ID SSID = CI->getSyncScopeID();
6780 // Do not emit a trailing fence for cmpxchg seq_cst which are not emulated
6781 if (isAcquireOrStronger(Ord) &&
6783 CASWidth < STI.getMinCmpXchgSizeInBits()))
6784 return Builder.CreateFence(AtomicOrdering::Acquire, SSID);
6785
6786 return nullptr;
6787}
6788
6789// Rather than default to SINT when both UINT and SINT are custom, we only
6790// change the opcode when UINT is not legal and SINT is. UINT is preferred when
6791// both are custom since unsigned CVT instructions can lead to slightly better
6792// SASS code with fewer instructions.
6794 EVT ToVT) const {
6795 if (isOperationLegal(Op, ToVT))
6796 return Op;
6797 switch (Op) {
6798 case ISD::FP_TO_UINT:
6800 return ISD::FP_TO_SINT;
6801 break;
6805 break;
6806 case ISD::VP_FP_TO_UINT:
6807 if (isOperationLegal(ISD::VP_FP_TO_SINT, ToVT))
6808 return ISD::VP_FP_TO_SINT;
6809 break;
6810 default:
6811 break;
6812 }
6813 return Op;
6814}
6815
6816// Pin NVPTXTargetObjectFile's vtables to this file.
6818
6823
6825 const SelectionDAG &DAG, unsigned Depth) {
6826 SDValue A = Op.getOperand(0);
6827 SDValue B = Op.getOperand(1);
6828 ConstantSDNode *Selector = dyn_cast<ConstantSDNode>(Op.getOperand(2));
6829 unsigned Mode = Op.getConstantOperandVal(3);
6830
6831 if (!Selector)
6832 return;
6833
6834 KnownBits AKnown = DAG.computeKnownBits(A, Depth);
6835 KnownBits BKnown = DAG.computeKnownBits(B, Depth);
6836
6837 // {b, a} = {{b7, b6, b5, b4}, {b3, b2, b1, b0}}
6838 assert(AKnown.getBitWidth() == 32 && BKnown.getBitWidth() == 32 &&
6839 "PRMT must have i32 operands");
6840 assert(Known.getBitWidth() == 32 && "PRMT must have i32 result");
6841 KnownBits BitField = BKnown.concat(AKnown);
6842
6843 APInt SelectorVal = getPRMTSelector(Selector->getAPIntValue(), Mode);
6844 for (unsigned I : llvm::seq(4)) {
6845 APInt Sel = SelectorVal.extractBits(4, I * 4);
6846 unsigned Idx = Sel.getLoBits(3).getZExtValue();
6847 unsigned Sign = Sel.getHiBits(1).getZExtValue();
6848 KnownBits Byte = BitField.extractBits(8, Idx * 8);
6849 if (Sign)
6850 Byte = KnownBits::ashr(Byte, 8);
6851 Known.insertBits(Byte, I * 8);
6852 }
6853}
6854
6855static void computeKnownBitsForLoadV(const SDValue Op, KnownBits &Known) {
6857
6858 // We can't do anything without knowing the sign bit.
6859 auto ExtType = LD->getConstantOperandVal(LD->getNumOperands() - 1);
6860 if (ExtType == ISD::SEXTLOAD)
6861 return;
6862
6863 // ExtLoading to vector types is weird and may not work well with known bits.
6864 auto DestVT = LD->getValueType(0);
6865 if (DestVT.isVector())
6866 return;
6867
6868 assert(Known.getBitWidth() == DestVT.getSizeInBits());
6869 auto ElementBitWidth = NVPTXDAGToDAGISel::getFromTypeWidthForLoad(LD);
6870 Known.Zero.setHighBits(Known.getBitWidth() - ElementBitWidth);
6871}
6872
6874 const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
6875 const SelectionDAG &DAG, unsigned Depth) const {
6876 Known.resetAll();
6877
6878 switch (Op.getOpcode()) {
6879 case NVPTXISD::PRMT:
6880 computeKnownBitsForPRMT(Op, Known, DAG, Depth);
6881 break;
6882 case NVPTXISD::LoadV2:
6883 case NVPTXISD::LoadV4:
6884 case NVPTXISD::LoadV8:
6886 break;
6887 default:
6888 break;
6889 }
6890}
6891
6892static std::pair<APInt, APInt> getPRMTDemandedBits(const APInt &SelectorVal,
6893 const APInt &DemandedBits) {
6894 APInt DemandedLHS = APInt(32, 0);
6895 APInt DemandedRHS = APInt(32, 0);
6896
6897 for (unsigned I : llvm::seq(4)) {
6898 if (DemandedBits.extractBits(8, I * 8).isZero())
6899 continue;
6900
6901 APInt Sel = SelectorVal.extractBits(4, I * 4);
6902 unsigned Idx = Sel.getLoBits(3).getZExtValue();
6903 unsigned Sign = Sel.getHiBits(1).getZExtValue();
6904
6905 APInt &Src = Idx < 4 ? DemandedLHS : DemandedRHS;
6906 unsigned ByteStart = (Idx % 4) * 8;
6907 if (Sign)
6908 Src.setBit(ByteStart + 7);
6909 else
6910 Src.setBits(ByteStart, ByteStart + 8);
6911 }
6912
6913 return {DemandedLHS, DemandedRHS};
6914}
6915
6916// Replace undef with 0 as this is easier for other optimizations such as
6917// known bits.
6919 if (!Op)
6920 return SDValue();
6921 if (Op.isUndef())
6922 return DAG.getConstant(0, SDLoc(), MVT::i32);
6923 return Op;
6924}
6925
6927 const APInt &DemandedBits,
6928 SelectionDAG &DAG,
6929 const TargetLowering &TLI,
6930 unsigned Depth) {
6931 assert(PRMT.getOpcode() == NVPTXISD::PRMT);
6932 SDValue Op0 = PRMT.getOperand(0);
6933 SDValue Op1 = PRMT.getOperand(1);
6934 auto *SelectorConst = dyn_cast<ConstantSDNode>(PRMT.getOperand(2));
6935 if (!SelectorConst)
6936 return SDValue();
6937
6938 unsigned Mode = PRMT.getConstantOperandVal(3);
6939 const APInt Selector = getPRMTSelector(SelectorConst->getAPIntValue(), Mode);
6940
6941 // Try to simplify the PRMT to one of the inputs if the used bytes are all
6942 // from the same input in the correct order.
6943 const unsigned LeadingBytes = DemandedBits.countLeadingZeros() / 8;
6944 const unsigned SelBits = (4 - LeadingBytes) * 4;
6945 if (Selector.getLoBits(SelBits) == APInt(32, 0x3210).getLoBits(SelBits))
6946 return Op0;
6947 if (Selector.getLoBits(SelBits) == APInt(32, 0x7654).getLoBits(SelBits))
6948 return Op1;
6949
6950 auto [DemandedLHS, DemandedRHS] = getPRMTDemandedBits(Selector, DemandedBits);
6951
6952 // Attempt to avoid multi-use ops if we don't need anything from them.
6953 SDValue DemandedOp0 =
6954 TLI.SimplifyMultipleUseDemandedBits(Op0, DemandedLHS, DAG, Depth + 1);
6955 SDValue DemandedOp1 =
6956 TLI.SimplifyMultipleUseDemandedBits(Op1, DemandedRHS, DAG, Depth + 1);
6957
6958 DemandedOp0 = canonicalizePRMTInput(DemandedOp0, DAG);
6959 DemandedOp1 = canonicalizePRMTInput(DemandedOp1, DAG);
6960 if ((DemandedOp0 && DemandedOp0 != Op0) ||
6961 (DemandedOp1 && DemandedOp1 != Op1)) {
6962 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
6963 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
6964 return getPRMT(Op0, Op1, Selector.getZExtValue(), SDLoc(PRMT), DAG);
6965 }
6966
6967 return SDValue();
6968}
6969
6971 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
6972 KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const {
6973 Known.resetAll();
6974
6975 switch (Op.getOpcode()) {
6976 case NVPTXISD::PRMT:
6978 *this, Depth)) {
6979 TLO.CombineTo(Op, Result);
6980 return true;
6981 }
6982 break;
6983 default:
6984 break;
6985 }
6986
6987 computeKnownBitsForTargetNode(Op, Known, DemandedElts, TLO.DAG, Depth);
6988 return false;
6989}
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
constexpr LLT S1
constexpr LLT F32
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombineWithOperands - Try DAG combinations for an ADD with operands N0 and N1.
static SDValue PerformADDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
static SDValue PerformVSELECTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue PerformBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformBUILD_VECTORCombine - Target-specific dag combine xforms for ISD::BUILD_VECTOR.
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file contains the declarations of entities that describe floating point environment and related ...
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first DebugLoc that has line number information, given a range of instructions.
Register Reg
Register const TargetRegisterInfo * TRI
#define T
NVPTX address space definition.
static bool shouldConvertToIndirectCall(const CallBase *CB, const GlobalAddressSDNode *Func)
static SDValue combineADDRSPACECAST(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static cl::opt< bool > sched4reg("nvptx-sched4reg", cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false))
static SDValue lowerTcgen05St(SDValue Op, SelectionDAG &DAG)
static SDValue PerformEXTRACTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static cl::opt< NVPTX::DivPrecisionLevel > UsePrecDivF32("nvptx-prec-divf32", cl::Hidden, cl::desc("NVPTX Specific: Override the precision of the lowering for f32 fdiv"), cl::values(clEnumValN(NVPTX::DivPrecisionLevel::Approx, "0", "Use div.approx"), clEnumValN(NVPTX::DivPrecisionLevel::Full, "1", "Use div.full"), clEnumValN(NVPTX::DivPrecisionLevel::IEEE754, "2", "Use IEEE Compliant F32 div.rnd if available (default)"), clEnumValN(NVPTX::DivPrecisionLevel::IEEE754_NoFTZ, "3", "Use IEEE Compliant F32 div.rnd if available, no FTZ")), cl::init(NVPTX::DivPrecisionLevel::IEEE754))
static bool isConstOne(const SDValue &Operand)
static cl::opt< unsigned > FMAContractLevelOpt("nvptx-fma-level", cl::Hidden, cl::desc("NVPTX Specific: FMA contraction (0: don't do it" " 1: do it 2: do it aggressively"), cl::init(2))
static bool IsPTXVectorType(MVT VT)
static SDValue lowerLOADi1(LoadSDNode *LD, SelectionDAG &DAG)
static SDValue lowerIntrinsicVoid(SDValue Op, SelectionDAG &DAG)
static MachinePointerInfo refinePtrAS(SDValue &Ptr, SelectionDAG &DAG, const DataLayout &DL, const TargetLowering &TL)
static SDValue lowerROT(SDValue Op, SelectionDAG &DAG)
static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL, LLVMContext &Ctx, CallingConv::ID CallConv, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > &Offsets, uint64_t StartingOffset=0)
ComputePTXValueVTs - For the given Type Ty, returns the set of primitive legal-ish MVTs that compose ...
static void ReplaceBITCAST(SDNode *Node, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static void replaceAtomicSwap128(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI, SmallVectorImpl< SDValue > &Results)
static unsigned getMinMax3Opcode(unsigned MinMax2Opcode)
Get 3-input version of a 2-input min/max opcode.
static SDValue lowerSTOREVector(SDValue Op, SelectionDAG &DAG, const NVPTXSubtarget &STI)
static SDValue lowerLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
static void replaceProxyReg(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI, SmallVectorImpl< SDValue > &Results)
static void ReplaceCopyFromReg_128(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static SDValue lowerCTLZCTPOP(SDValue Op, SelectionDAG &DAG)
static SDValue combineMADConstOne(SDValue X, SDValue Add, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
static SDValue combinePRMT(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue combinePackingMovIntoStore(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned Front, unsigned Back)
Fold packing movs into a store.
static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static SDValue getBuildVectorizedValue(unsigned N, const SDLoc &dl, SelectionDAG &DAG, T GetElement)
static SDValue getExtractVectorizedValue(SDValue V, unsigned I, EVT VT, const SDLoc &dl, SelectionDAG &DAG)
static unsigned canMergeParamLoadStoresStartingAt(unsigned Idx, uint32_t AccessSize, const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< T > &Offsets, Align ParamAlignment)
static EVT getVectorizedVT(EVT VT, unsigned N, LLVMContext &C)
static SDValue lowerIntrinsicWOChain(SDValue Op, SelectionDAG &DAG)
static SDValue PerformFMinMaxCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned PTXVersion, unsigned SmVersion)
PerformFMinMaxCombine - Combine (fmaxnum (fmaxnum a, b), c) into (fmaxnum3 a, b, c).
static SDValue combineMulWide(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue PerformFADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static std::optional< unsigned > getScalar3OpcodeForReduction(unsigned ReductionOpcode)
Get 3-input scalar reduction opcode.
static SDValue lowerIntrinsicWChain(SDValue Op, SelectionDAG &DAG)
static bool isConstZero(const SDValue &Operand)
static SDValue LowerVectorArith(SDValue Op, SelectionDAG &DAG)
static SDValue LowerTcgen05MMADisableOutputLane(SDValue Op, SelectionDAG &DAG)
static bool IsMulWideOperandDemotable(SDValue Op, unsigned OptSize, OperandSignedness &S)
IsMulWideOperandDemotable - Checks if the provided DAG node is an operand that can be demoted to OptS...
static unsigned getTcgen05MMADisableOutputLane(unsigned IID)
static std::pair< APInt, APInt > getPRMTDemandedBits(const APInt &SelectorVal, const APInt &DemandedBits)
static APInt computePRMT(APInt A, APInt B, APInt Selector, unsigned Mode)
static ISD::NodeType getScalarOpcodeForReduction(unsigned ReductionOpcode)
static SDValue PerformREMCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue PerformMULCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI)
static void computeKnownBitsForPRMT(const SDValue Op, KnownBits &Known, const SelectionDAG &DAG, unsigned Depth)
static SDValue combineUnpackingMovIntoLoad(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Fold unpacking movs into a load by increasing the number of return values.
static SDValue LowerClusterLaunchControlQueryCancel(SDValue Op, SelectionDAG &DAG)
static std::optional< std::pair< SDValue, SDValue > > lowerTcgen05Ld(SDNode *N, SelectionDAG &DAG, bool HasOffset=false)
static SDValue lowerCvtRSIntrinsics(SDValue Op, SelectionDAG &DAG)
static std::optional< std::pair< SDValue, SDValue > > replaceLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
replaceLoadVector - Convert vector loads into multi-output scalar loads.
static SDValue expandFSH64(SDValue A, SDValue B, SDValue ShiftAmount, SDLoc DL, unsigned Opcode, SelectionDAG &DAG)
static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS, unsigned OptSize, bool &IsSigned)
AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can be demoted to OptSize bits...
static SDValue TryMULWIDECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply of M/2 bits that produces...
static SDValue lowerPrmtIntrinsic(SDValue Op, SelectionDAG &DAG)
static SDValue combineMulSelectConstOne(SDValue X, SDValue Select, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
static SDValue buildTreeReduction(const SmallVector< SDValue > &Elements, EVT EltTy, ArrayRef< std::pair< unsigned, unsigned > > Ops, const SDLoc &DL, const SDNodeFlags Flags, SelectionDAG &DAG)
Reduces the elements using the scalar operations provided.
static SDValue combineProxyReg(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SmallVector< unsigned, 16 > VectorizePTXValueVTs(const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< T > &Offsets, Align ParamAlignment, bool IsVAArg=false)
static SDValue getPRMT(SDValue A, SDValue B, SDValue Selector, SDLoc DL, SelectionDAG &DAG, unsigned Mode=NVPTX::PTXPrmtMode::NONE)
static SDValue matchMADConstOnePattern(SDValue Add)
static SDValue correctParamType(SDValue V, EVT ExpectedVT, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, SDLoc dl)
static ISD::NodeType getExtOpcode(const ISD::ArgFlagsTy &Flags)
static cl::opt< bool > UsePrecSqrtF32("nvptx-prec-sqrtf32", cl::Hidden, cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."), cl::init(true))
static void computeKnownBitsForLoadV(const SDValue Op, KnownBits &Known)
static APInt getPRMTSelector(const APInt &Selector, unsigned Mode)
static EVT promoteScalarIntegerPTX(const EVT VT)
PromoteScalarIntegerPTX Used to make sure the arguments/returns are suitable for passing and promote ...
static SDValue simplifyDemandedBitsForPRMT(SDValue PRMT, const APInt &DemandedBits, SelectionDAG &DAG, const TargetLowering &TLI, unsigned Depth)
static SDValue lowerFREM(SDValue Op, SelectionDAG &DAG)
static SDValue canonicalizePRMTInput(SDValue Op, SelectionDAG &DAG)
static SDValue sinkProxyReg(SDValue R, SDValue Chain, TargetLowering::DAGCombinerInfo &DCI)
static SDValue lowerFSH(SDValue Op, SelectionDAG &DAG)
static SDValue PromoteBinOpToF32(SDNode *N, SelectionDAG &DAG)
static SDValue PerformSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned int SmVersion)
static std::optional< std::pair< unsigned int, MVT > > getVectorLoweringShape(EVT VectorEVT, const NVPTXSubtarget &STI, unsigned AddressSpace)
static cl::opt< bool > ForceMinByValParamAlign("nvptx-force-min-byval-param-align", cl::Hidden, cl::desc("NVPTX Specific: force 4-byte minimal alignment for byval" " params of device functions."), cl::init(false))
static cl::opt< bool > UseApproxLog2F32("nvptx-approx-log2f32", cl::desc("NVPTX Specific: whether to use lg2.approx for log2"), cl::init(false))
Whereas CUDA's implementation (see libdevice) uses ex2.approx for exp2(), it does NOT use lg2....
static SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG)
static SDValue combineLOAD(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)
static SDValue combineSTORE(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)
static SDValue PerformSHLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
MachineInstr unsigned OpIdx
uint64_t High
#define P(N)
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This file describes how to lower LLVM code to machine code.
Value * RHS
Value * LHS
BinaryOperator * Mul
static const fltSemantics & IEEEsingle()
Definition APFloat.h:296
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Definition APFloat.h:1080
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt getLoBits(unsigned numBits) const
Compute an APInt containing numBits lowbits from this APInt.
Definition APInt.cpp:644
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1541
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
Definition APInt.h:1392
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
Definition APInt.cpp:639
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:936
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1331
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1489
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition APInt.h:436
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition APInt.h:1131
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
Definition APInt.cpp:482
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition APInt.h:433
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
Definition APInt.h:1238
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
const T & back() const
back - Get the last element.
Definition ArrayRef.h:151
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition ArrayRef.h:201
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:186
an instruction that atomically reads a memory location, combines it with another value,...
@ Add
*p = old + v
@ FAdd
*p = old + v
@ Min
*p = old <signed v ? old : v
@ Sub
*p = old - v
@ And
*p = old & v
@ Xor
*p = old ^ v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
@ UDecWrap
Decrement one until a minimum value or zero.
bool isFloatingPointOperation() const
BinOp getOperation() const
This is an SDNode representing atomic operations.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
FunctionType * getFunctionType() const
This class represents a function call, abstracting a target machine's calling convention.
const APInt & getAPIntValue() const
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Diagnostic information for unsupported feature in backend.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
Definition Function.cpp:640
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
MCSection * getDataSection() const
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Definition MCSection.h:517
StringRef getName() const
getName - Get the symbol name.
Definition MCSymbol.h:188
Machine Value Type.
static auto integer_fixedlen_vector_valuetypes()
SimpleValueType SimpleTy
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
static auto fp_fixedlen_vector_valuetypes()
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
const std::vector< MachineJumpTableEntry > & getJumpTables() const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
This is an abstract virtual class for memory operations.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
EVT getMemoryVT() const
Return the type of the in-memory value.
static unsigned getFromTypeWidthForLoad(const MemSDNode *Mem)
bool hasAtomSwap128() const
bool hasF32x2Instructions() const
bool has256BitVectorLoadStore(unsigned AS) const
AtomicOrdering atomicOperationOrderAfterFenceSplit(const Instruction *I) const override
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
const NVPTXTargetMachine * nvTM
bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const override
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
NVPTXTargetLowering(const NVPTXTargetMachine &TM, const NVPTXSubtarget &STI)
std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &, const SmallVectorImpl< ISD::OutputArg > &, std::optional< unsigned > FirstVAArg, const CallBase &CB, unsigned UniqueCallSite) const
unsigned getPreferredFPToIntOpcode(unsigned Op, EVT FromVT, EVT ToVT) const override
bool useF32FTZ(const MachineFunction &MF) const
SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const
Align getFunctionArgumentAlignment(const Function *F, Type *Ty, unsigned Idx, const DataLayout &DL) const
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &ExtraSteps, bool &UseOneConst, bool Reciprocal) const override
Hooks for building estimates in place of slower divisions and square roots.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
std::string getParamName(const Function *F, int Idx) const
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
NVPTX::DivPrecisionLevel getDivF32Level(const MachineFunction &MF, const SDNode &N) const
bool shouldInsertFencesForAtomic(const Instruction *) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
Align getFunctionParamOptimizedAlign(const Function *F, Type *ArgTy, const DataLayout &DL) const
getFunctionParamOptimizedAlign - since function arguments are passed via .param space,...
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, EVT VT) const override
Return the ValueType of the result of SETCC operations.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
Align getFunctionByValParamAlign(const Function *F, Type *ArgTy, Align InitialAlign, const DataLayout &DL) const
Helper for computing alignment of a device function byval parameter.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool allowFMA(MachineFunction &MF, CodeGenOptLevel OptLevel) const
bool usePrecSqrtF32(const SDNode *N=nullptr) const
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
MCSection * SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const override
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
unsigned getIROrder() const
Return the node ordering.
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
bool isUndef() const
Returns true if the node type is UNDEF or POISON.
iterator_range< user_iterator > users()
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
SectionKind - This is a simple POD value that classifies the properties of a section.
Definition SectionKind.h:22
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
LLVM_ABI SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
const TargetSubtargetInfo & getSubtarget() const
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)
Return a GlobalAddress of the function from the current module with name matching the given ExternalS...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
LLVM_ABI SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setMaxDivRemBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum div/rem the backend supports.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
Convenience method to set an operation to Promote and specify the type in a single call.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth)
Tells the code generator which bitwidths to bypass.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
virtual Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
virtual Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
Inserts in the IR a target-specific intrinsic specifying a fence.
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contrib...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const
Truncate Op to ResultVT.
SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const
Expand round(fp) to fp conversion.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
TargetOptions Options
MCSymbol * getSymbol(const GlobalValue *GV) const
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetFrameLowering * getFrameLowering() const
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
A raw_ostream that writes to an std::string.
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt pow(const APInt &X, int64_t N)
Compute X^N for N>=0.
Definition APInt.cpp:3155
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition ISDOpcodes.h:41
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:807
@ CTLZ_ZERO_UNDEF
Definition ISDOpcodes.h:780
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:270
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:771
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:289
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:259
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:841
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:511
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition ISDOpcodes.h:215
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition ISDOpcodes.h:868
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition ISDOpcodes.h:577
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:410
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition ISDOpcodes.h:744
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:275
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition ISDOpcodes.h:249
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:832
@ SSUBO
Same for subtraction.
Definition ISDOpcodes.h:347
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition ISDOpcodes.h:369
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:784
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
Definition ISDOpcodes.h:242
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition ISDOpcodes.h:225
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition ISDOpcodes.h:343
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:701
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:762
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition ISDOpcodes.h:642
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
Definition ISDOpcodes.h:607
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition ISDOpcodes.h:569
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
Definition ISDOpcodes.h:219
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:838
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition ISDOpcodes.h:799
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
Definition ISDOpcodes.h:379
@ SMULO
Same for multiplication.
Definition ISDOpcodes.h:351
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:876
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition ISDOpcodes.h:724
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition ISDOpcodes.h:793
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:323
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition ISDOpcodes.h:110
@ STRICT_FP_TO_UINT
Definition ISDOpcodes.h:471
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:470
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:914
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:736
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition ISDOpcodes.h:200
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:299
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
Definition ISDOpcodes.h:236
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition ISDOpcodes.h:558
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition ISDOpcodes.h:947
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:844
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition ISDOpcodes.h:821
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:527
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition ISDOpcodes.h:360
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:333
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition ISDOpcodes.h:208
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition ISDOpcodes.h:549
LLVM_ABI bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
@ Bitcast
Perform the operation on a different, but equivalently sized type.
@ ATOMIC_CMP_SWAP_B128
These nodes are used to lower atomic instructions with i128 type.
bool isPackedVectorTy(EVT VT)
DivPrecisionLevel
Definition NVPTX.h:251
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:532
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:829
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool shouldEmitPTXNoReturn(const Value *V, const TargetMachine &TM)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
MaybeAlign getAlign(const CallInst &I, unsigned Index)
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1655
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition Analysis.cpp:119
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2472
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
Definition MathExtras.h:385
bool isReleaseOrStronger(AtomicOrdering AO)
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition STLExtras.h:1968
unsigned promoteScalarArgumentSize(unsigned size)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
bool shouldPassAsArray(Type *Ty)
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
@ Default
-O2, -Os, -Oz
Definition CodeGen.h:85
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAcquireOrStronger(AtomicOrdering AO)
constexpr unsigned BitWidth
bool isKernelFunction(const Function &F)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
Function * getMaybeBitcastedCallee(const CallBase *CB)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition Sequence.h:305
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
Extended Value Type.
Definition ValueTypes.h:35
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition ValueTypes.h:395
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:137
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition ValueTypes.h:74
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
Definition ValueTypes.h:121
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition ValueTypes.h:284
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition ValueTypes.h:300
ElementCount getVectorElementCount() const
Definition ValueTypes.h:350
bool is32BitVector() const
Return true if this is a 32-bit vector type.
Definition ValueTypes.h:197
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:373
uint64_t getScalarSizeInBits() const
Definition ValueTypes.h:385
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:316
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition ValueTypes.h:381
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:168
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition ValueTypes.h:323
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Definition ValueTypes.h:256
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition ValueTypes.h:328
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition ValueTypes.h:157
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
Definition ValueTypes.h:102
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition ValueTypes.h:336
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition ValueTypes.h:152
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
Definition KnownBits.h:233
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
void resetAll()
Resets the known state of all bits.
Definition KnownBits.h:74
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
Definition KnownBits.h:219
This class contains a discriminated union of information about pointers in memory operands,...
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
These are IR-level optimization flags that may be propagated to SDNodes.
bool hasAllowContract() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...