LLVM 23.0.0git
GCNSubtarget.cpp
Go to the documentation of this file.
1//===-- GCNSubtarget.cpp - GCN Subtarget Information ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// Implements the GCN specific subclass of TargetSubtarget.
11//
12//===----------------------------------------------------------------------===//
13
14#include "GCNSubtarget.h"
15#include "AMDGPUCallLowering.h"
17#include "AMDGPULegalizerInfo.h"
20#include "AMDGPUTargetMachine.h"
28#include "llvm/IR/MDBuilder.h"
29#include <algorithm>
30
31using namespace llvm;
32
33#define DEBUG_TYPE "gcn-subtarget"
34
35#define GET_SUBTARGETINFO_TARGET_DESC
36#define GET_SUBTARGETINFO_CTOR
37#define AMDGPUSubtarget GCNSubtarget
38#include "AMDGPUGenSubtargetInfo.inc"
39#undef AMDGPUSubtarget
40
42 "amdgpu-vgpr-index-mode",
43 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
44 cl::init(false));
45
46static cl::opt<bool> UseAA("amdgpu-use-aa-in-codegen",
47 cl::desc("Enable the use of AA during codegen."),
48 cl::init(true));
49
51 NSAThreshold("amdgpu-nsa-threshold",
52 cl::desc("Number of addresses from which to enable MIMG NSA."),
54
56
58 StringRef GPU,
59 StringRef FS) {
60 // Determine default and user-specified characteristics
61 //
62 // We want to be able to turn these off, but making this a subtarget feature
63 // for SI has the unhelpful behavior that it unsets everything else if you
64 // disable it.
65 //
66 // Similarly we want enable-prt-strict-null to be on by default and not to
67 // unset everything else if it is disabled
68
69 SmallString<256> FullFS("+load-store-opt,+enable-ds128,");
70
71 // Turn on features that HSA ABI requires. Also turn on FlatForGlobal by
72 // default
73 if (isAmdHsaOS())
74 FullFS += "+flat-for-global,+unaligned-access-mode,+trap-handler,";
75
76 FullFS += "+enable-prt-strict-null,"; // This is overridden by a disable in FS
77
78 // Disable mutually exclusive bits.
79 if (FS.contains_insensitive("+wavefrontsize")) {
80 if (!FS.contains_insensitive("wavefrontsize16"))
81 FullFS += "-wavefrontsize16,";
82 if (!FS.contains_insensitive("wavefrontsize32"))
83 FullFS += "-wavefrontsize32,";
84 if (!FS.contains_insensitive("wavefrontsize64"))
85 FullFS += "-wavefrontsize64,";
86 }
87
88 FullFS += FS;
89
90 ParseSubtargetFeatures(GPU, /*TuneCPU*/ GPU, FullFS);
91
92 // Implement the "generic" processors, which acts as the default when no
93 // generation features are enabled (e.g for -mcpu=''). HSA OS defaults to
94 // the first amdgcn target that supports flat addressing. Other OSes defaults
95 // to the first amdgcn target.
99 // Assume wave64 for the unknown target, if not explicitly set.
100 if (getWavefrontSizeLog2() == 0)
102 } else if (!hasFeature(AMDGPU::FeatureWavefrontSize32) &&
103 !hasFeature(AMDGPU::FeatureWavefrontSize64)) {
104 // If there is no default wave size it must be a generation before gfx10,
105 // these have FeatureWavefrontSize64 in their definition already. For gfx10+
106 // set wave32 as a default.
107 ToggleFeature(AMDGPU::FeatureWavefrontSize32);
109 }
110
111 // We don't support FP64 for EG/NI atm.
113
114 // Targets must either support 64-bit offsets for MUBUF instructions, and/or
115 // support flat operations, otherwise they cannot access a 64-bit global
116 // address space
117 assert(hasAddr64() || hasFlat());
118 // Unless +-flat-for-global is specified, turn on FlatForGlobal for targets
119 // that do not support ADDR64 variants of MUBUF instructions. Such targets
120 // cannot use a 64 bit offset with a MUBUF instruction to access the global
121 // address space
122 if (!hasAddr64() && !FS.contains("flat-for-global") && !UseFlatForGlobal) {
123 ToggleFeature(AMDGPU::FeatureUseFlatForGlobal);
124 UseFlatForGlobal = true;
125 }
126 // Unless +-flat-for-global is specified, use MUBUF instructions for global
127 // address space access if flat operations are not available.
128 if (!hasFlat() && !FS.contains("flat-for-global") && UseFlatForGlobal) {
129 ToggleFeature(AMDGPU::FeatureUseFlatForGlobal);
130 UseFlatForGlobal = false;
131 }
132
133 // Set defaults if needed.
134 if (MaxPrivateElementSize == 0)
136
137 if (LDSBankCount == 0)
138 LDSBankCount = 32;
139
142
143 if (FlatOffsetBitWidth == 0)
145
147
150
151 // InstCacheLineSize is set from TableGen subtarget features
152 // (FeatureInstCacheLineSize64 / FeatureInstCacheLineSize128).
153 // Fall back to 64 if no feature was specified (e.g. generic targets).
154 if (InstCacheLineSize == 0)
156
158 "InstCacheLineSize must be a power of 2");
159
160 TargetID.setTargetIDFromFeaturesString(FS);
161
162 LLVM_DEBUG(dbgs() << "xnack setting for subtarget: "
163 << TargetID.getXnackSetting() << '\n');
164 LLVM_DEBUG(dbgs() << "sramecc setting for subtarget: "
165 << TargetID.getSramEccSetting() << '\n');
166
167 return *this;
168}
169
171 LLVMContext &Ctx = F.getContext();
172 if (hasFeature(AMDGPU::FeatureWavefrontSize32) &&
173 hasFeature(AMDGPU::FeatureWavefrontSize64)) {
174 Ctx.diagnose(DiagnosticInfoUnsupported(
175 F, "must specify exactly one of wavefrontsize32 and wavefrontsize64"));
176 }
177}
178
180 const GCNTargetMachine &TM)
181 : // clang-format off
182 AMDGPUGenSubtargetInfo(TT, GPU, /*TuneCPU*/ GPU, FS),
183 AMDGPUSubtarget(TT),
184 TargetID(*this),
185 InstrItins(getInstrItineraryForCPU(GPU)),
186 InstrInfo(initializeSubtargetDependencies(TT, GPU, FS)),
187 TLInfo(TM, *this),
188 // Frame index expansion sometimes assumes the low bit of SP is 0
189 FrameLowering(TargetFrameLowering::StackGrowsUp, getStackAlignment(), 0,
190 /*TransAl=*/Align(4)) {
191 // clang-format on
194
195 TSInfo = std::make_unique<AMDGPUSelectionDAGInfo>();
196
197 CallLoweringInfo = std::make_unique<AMDGPUCallLowering>(*getTargetLowering());
198 InlineAsmLoweringInfo =
199 std::make_unique<InlineAsmLowering>(getTargetLowering());
200 Legalizer = std::make_unique<AMDGPULegalizerInfo>(*this, TM);
201 RegBankInfo = std::make_unique<AMDGPURegisterBankInfo>(*this);
202 InstSelector =
203 std::make_unique<AMDGPUInstructionSelector>(*this, *RegBankInfo, TM);
204}
205
207 return TSInfo.get();
208}
209
210unsigned GCNSubtarget::getConstantBusLimit(unsigned Opcode) const {
211 if (getGeneration() < GFX10)
212 return 1;
213
214 switch (Opcode) {
215 case AMDGPU::V_LSHLREV_B64_e64:
216 case AMDGPU::V_LSHLREV_B64_gfx10:
217 case AMDGPU::V_LSHLREV_B64_e64_gfx11:
218 case AMDGPU::V_LSHLREV_B64_e32_gfx12:
219 case AMDGPU::V_LSHLREV_B64_e64_gfx12:
220 case AMDGPU::V_LSHL_B64_e64:
221 case AMDGPU::V_LSHRREV_B64_e64:
222 case AMDGPU::V_LSHRREV_B64_gfx10:
223 case AMDGPU::V_LSHRREV_B64_e64_gfx11:
224 case AMDGPU::V_LSHRREV_B64_e64_gfx12:
225 case AMDGPU::V_LSHR_B64_e64:
226 case AMDGPU::V_ASHRREV_I64_e64:
227 case AMDGPU::V_ASHRREV_I64_gfx10:
228 case AMDGPU::V_ASHRREV_I64_e64_gfx11:
229 case AMDGPU::V_ASHRREV_I64_e64_gfx12:
230 case AMDGPU::V_ASHR_I64_e64:
231 return 1;
232 }
233
234 return 2;
235}
236
237/// This list was mostly derived from experimentation.
238bool GCNSubtarget::zeroesHigh16BitsOfDest(unsigned Opcode) const {
239 switch (Opcode) {
240 case AMDGPU::V_CVT_F16_F32_e32:
241 case AMDGPU::V_CVT_F16_F32_e64:
242 case AMDGPU::V_CVT_F16_U16_e32:
243 case AMDGPU::V_CVT_F16_U16_e64:
244 case AMDGPU::V_CVT_F16_I16_e32:
245 case AMDGPU::V_CVT_F16_I16_e64:
246 case AMDGPU::V_RCP_F16_e64:
247 case AMDGPU::V_RCP_F16_e32:
248 case AMDGPU::V_RSQ_F16_e64:
249 case AMDGPU::V_RSQ_F16_e32:
250 case AMDGPU::V_SQRT_F16_e64:
251 case AMDGPU::V_SQRT_F16_e32:
252 case AMDGPU::V_LOG_F16_e64:
253 case AMDGPU::V_LOG_F16_e32:
254 case AMDGPU::V_EXP_F16_e64:
255 case AMDGPU::V_EXP_F16_e32:
256 case AMDGPU::V_SIN_F16_e64:
257 case AMDGPU::V_SIN_F16_e32:
258 case AMDGPU::V_COS_F16_e64:
259 case AMDGPU::V_COS_F16_e32:
260 case AMDGPU::V_FLOOR_F16_e64:
261 case AMDGPU::V_FLOOR_F16_e32:
262 case AMDGPU::V_CEIL_F16_e64:
263 case AMDGPU::V_CEIL_F16_e32:
264 case AMDGPU::V_TRUNC_F16_e64:
265 case AMDGPU::V_TRUNC_F16_e32:
266 case AMDGPU::V_RNDNE_F16_e64:
267 case AMDGPU::V_RNDNE_F16_e32:
268 case AMDGPU::V_FRACT_F16_e64:
269 case AMDGPU::V_FRACT_F16_e32:
270 case AMDGPU::V_FREXP_MANT_F16_e64:
271 case AMDGPU::V_FREXP_MANT_F16_e32:
272 case AMDGPU::V_FREXP_EXP_I16_F16_e64:
273 case AMDGPU::V_FREXP_EXP_I16_F16_e32:
274 case AMDGPU::V_LDEXP_F16_e64:
275 case AMDGPU::V_LDEXP_F16_e32:
276 case AMDGPU::V_LSHLREV_B16_e64:
277 case AMDGPU::V_LSHLREV_B16_e32:
278 case AMDGPU::V_LSHRREV_B16_e64:
279 case AMDGPU::V_LSHRREV_B16_e32:
280 case AMDGPU::V_ASHRREV_I16_e64:
281 case AMDGPU::V_ASHRREV_I16_e32:
282 case AMDGPU::V_ADD_U16_e64:
283 case AMDGPU::V_ADD_U16_e32:
284 case AMDGPU::V_SUB_U16_e64:
285 case AMDGPU::V_SUB_U16_e32:
286 case AMDGPU::V_SUBREV_U16_e64:
287 case AMDGPU::V_SUBREV_U16_e32:
288 case AMDGPU::V_MUL_LO_U16_e64:
289 case AMDGPU::V_MUL_LO_U16_e32:
290 case AMDGPU::V_ADD_F16_e64:
291 case AMDGPU::V_ADD_F16_e32:
292 case AMDGPU::V_SUB_F16_e64:
293 case AMDGPU::V_SUB_F16_e32:
294 case AMDGPU::V_SUBREV_F16_e64:
295 case AMDGPU::V_SUBREV_F16_e32:
296 case AMDGPU::V_MUL_F16_e64:
297 case AMDGPU::V_MUL_F16_e32:
298 case AMDGPU::V_MAX_F16_e64:
299 case AMDGPU::V_MAX_F16_e32:
300 case AMDGPU::V_MIN_F16_e64:
301 case AMDGPU::V_MIN_F16_e32:
302 case AMDGPU::V_MAX_U16_e64:
303 case AMDGPU::V_MAX_U16_e32:
304 case AMDGPU::V_MIN_U16_e64:
305 case AMDGPU::V_MIN_U16_e32:
306 case AMDGPU::V_MAX_I16_e64:
307 case AMDGPU::V_MAX_I16_e32:
308 case AMDGPU::V_MIN_I16_e64:
309 case AMDGPU::V_MIN_I16_e32:
310 case AMDGPU::V_MAD_F16_e64:
311 case AMDGPU::V_MAD_U16_e64:
312 case AMDGPU::V_MAD_I16_e64:
313 case AMDGPU::V_FMA_F16_e64:
314 case AMDGPU::V_DIV_FIXUP_F16_e64:
315 // On gfx10, all 16-bit instructions preserve the high bits.
317 case AMDGPU::V_MADAK_F16:
318 case AMDGPU::V_MADMK_F16:
319 case AMDGPU::V_MAC_F16_e64:
320 case AMDGPU::V_MAC_F16_e32:
321 case AMDGPU::V_FMAMK_F16:
322 case AMDGPU::V_FMAAK_F16:
323 case AMDGPU::V_FMAC_F16_e64:
324 case AMDGPU::V_FMAC_F16_e32:
325 // In gfx9, the preferred handling of the unused high 16-bits changed. Most
326 // instructions maintain the legacy behavior of 0ing. Some instructions
327 // changed to preserving the high bits.
329 case AMDGPU::V_MAD_MIXLO_F16:
330 case AMDGPU::V_MAD_MIXHI_F16:
331 default:
332 return false;
333 }
334}
335
337 const SchedRegion &Region) const {
338 // Track register pressure so the scheduler can try to decrease
339 // pressure once register usage is above the threshold defined by
340 // SIRegisterInfo::getRegPressureSetLimit()
341 Policy.ShouldTrackPressure = true;
342
343 const Function &F = Region.RegionBegin->getMF()->getFunction();
344 if (AMDGPU::getSchedStrategy(F) == "coexec") {
345 Policy.OnlyTopDown = true;
346 Policy.OnlyBottomUp = false;
347 return;
348 }
349
350 // Enabling both top down and bottom up scheduling seems to give us less
351 // register spills than just using one of these approaches on its own.
352 Policy.OnlyTopDown = false;
353 Policy.OnlyBottomUp = false;
354
355 // Enabling ShouldTrackLaneMasks crashes the SI Machine Scheduler.
356 if (!enableSIScheduler())
357 Policy.ShouldTrackLaneMasks = true;
358}
359
361 const SchedRegion &Region) const {
362 const Function &F = Region.RegionBegin->getMF()->getFunction();
363 Attribute PostRADirectionAttr = F.getFnAttribute("amdgpu-post-ra-direction");
364 if (!PostRADirectionAttr.isValid())
365 return;
366
367 StringRef PostRADirectionStr = PostRADirectionAttr.getValueAsString();
368 if (PostRADirectionStr == "topdown") {
369 Policy.OnlyTopDown = true;
370 Policy.OnlyBottomUp = false;
371 } else if (PostRADirectionStr == "bottomup") {
372 Policy.OnlyTopDown = false;
373 Policy.OnlyBottomUp = true;
374 } else if (PostRADirectionStr == "bidirectional") {
375 Policy.OnlyTopDown = false;
376 Policy.OnlyBottomUp = false;
377 } else {
379 F, F.getSubprogram(), "invalid value for postRA direction attribute");
380 F.getContext().diagnose(Diag);
381 }
382
383 LLVM_DEBUG({
384 const char *DirStr = "default";
385 if (Policy.OnlyTopDown && !Policy.OnlyBottomUp)
386 DirStr = "topdown";
387 else if (!Policy.OnlyTopDown && Policy.OnlyBottomUp)
388 DirStr = "bottomup";
389 else if (!Policy.OnlyTopDown && !Policy.OnlyBottomUp)
390 DirStr = "bidirectional";
391
392 dbgs() << "Post-MI-sched direction (" << F.getName() << "): " << DirStr
393 << '\n';
394 });
395}
396
398 if (isWave32()) {
399 // Fix implicit $vcc operands after MIParser has verified that they match
400 // the instruction definitions.
401 for (auto &MBB : MF) {
402 for (auto &MI : MBB)
403 InstrInfo.fixImplicitOperands(MI);
404 }
405 }
406}
407
409 return InstrInfo.pseudoToMCOpcode(AMDGPU::V_MAD_F16_e64) != -1;
410}
411
413 return hasVGPRIndexMode() && (!hasMovrel() || EnableVGPRIndexMode);
414}
415
416bool GCNSubtarget::useAA() const { return UseAA; }
417
422
423unsigned
425 unsigned DynamicVGPRBlockSize) const {
427 DynamicVGPRBlockSize);
428}
429
430unsigned
431GCNSubtarget::getBaseReservedNumSGPRs(const bool HasFlatScratch) const {
433 return 2; // VCC. FLAT_SCRATCH and XNACK are no longer in SGPRs.
434
435 if (HasFlatScratch || HasArchitectedFlatScratch) {
437 return 6; // FLAT_SCRATCH, XNACK, VCC (in that order).
439 return 4; // FLAT_SCRATCH, VCC (in that order).
440 }
441
442 if (isXNACKEnabled())
443 return 4; // XNACK, VCC (in that order).
444 return 2; // VCC.
445}
446
451
453 // In principle we do not need to reserve SGPR pair used for flat_scratch if
454 // we know flat instructions do not access the stack anywhere in the
455 // program. For now assume it's needed if we have flat instructions.
456 const bool KernelUsesFlatScratch = hasFlatAddressSpace();
457 return getBaseReservedNumSGPRs(KernelUsesFlatScratch);
458}
459
460std::pair<unsigned, unsigned>
462 unsigned NumSGPRs, unsigned NumVGPRs) const {
463 unsigned DynamicVGPRBlockSize = AMDGPU::getDynamicVGPRBlockSize(F);
464 // Temporarily check both the attribute and the subtarget feature until the
465 // latter is removed.
466 if (DynamicVGPRBlockSize == 0 && isDynamicVGPREnabled())
467 DynamicVGPRBlockSize = getDynamicVGPRBlockSize();
468
469 auto [MinOcc, MaxOcc] = getOccupancyWithWorkGroupSizes(LDSSize, F);
470 unsigned SGPROcc = getOccupancyWithNumSGPRs(NumSGPRs);
471 unsigned VGPROcc = getOccupancyWithNumVGPRs(NumVGPRs, DynamicVGPRBlockSize);
472
473 // Maximum occupancy may be further limited by high SGPR/VGPR usage.
474 MaxOcc = std::min(MaxOcc, std::min(SGPROcc, VGPROcc));
475 return {std::min(MinOcc, MaxOcc), MaxOcc};
476}
477
479 const Function &F, std::pair<unsigned, unsigned> WavesPerEU,
480 unsigned PreloadedSGPRs, unsigned ReservedNumSGPRs) const {
481 // Compute maximum number of SGPRs function can use using default/requested
482 // minimum number of waves per execution unit.
483 unsigned MaxNumSGPRs = getMaxNumSGPRs(WavesPerEU.first, false);
484 unsigned MaxAddressableNumSGPRs = getMaxNumSGPRs(WavesPerEU.first, true);
485
486 // Check if maximum number of SGPRs was explicitly requested using
487 // "amdgpu-num-sgpr" attribute.
488 unsigned Requested =
489 F.getFnAttributeAsParsedInteger("amdgpu-num-sgpr", MaxNumSGPRs);
490
491 if (Requested != MaxNumSGPRs) {
492 // Make sure requested value does not violate subtarget's specifications.
493 if (Requested && (Requested <= ReservedNumSGPRs))
494 Requested = 0;
495
496 // If more SGPRs are required to support the input user/system SGPRs,
497 // increase to accommodate them.
498 //
499 // FIXME: This really ends up using the requested number of SGPRs + number
500 // of reserved special registers in total. Theoretically you could re-use
501 // the last input registers for these special registers, but this would
502 // require a lot of complexity to deal with the weird aliasing.
503 unsigned InputNumSGPRs = PreloadedSGPRs;
504 if (Requested && Requested < InputNumSGPRs)
505 Requested = InputNumSGPRs;
506
507 // Make sure requested value is compatible with values implied by
508 // default/requested minimum/maximum number of waves per execution unit.
509 if (Requested && Requested > getMaxNumSGPRs(WavesPerEU.first, false))
510 Requested = 0;
511 if (WavesPerEU.second && Requested &&
512 Requested < getMinNumSGPRs(WavesPerEU.second))
513 Requested = 0;
514
515 if (Requested)
516 MaxNumSGPRs = Requested;
517 }
518
519 if (hasSGPRInitBug())
521
522 return std::min(MaxNumSGPRs - ReservedNumSGPRs, MaxAddressableNumSGPRs);
523}
524
526 const Function &F = MF.getFunction();
530}
531
533 using USI = GCNUserSGPRUsageInfo;
534 // Max number of user SGPRs
535 const unsigned MaxUserSGPRs =
536 USI::getNumUserSGPRForField(USI::PrivateSegmentBufferID) +
537 USI::getNumUserSGPRForField(USI::DispatchPtrID) +
538 USI::getNumUserSGPRForField(USI::QueuePtrID) +
539 USI::getNumUserSGPRForField(USI::KernargSegmentPtrID) +
540 USI::getNumUserSGPRForField(USI::DispatchIdID) +
541 USI::getNumUserSGPRForField(USI::FlatScratchInitID) +
542 USI::getNumUserSGPRForField(USI::ImplicitBufferPtrID);
543
544 // Max number of system SGPRs
545 const unsigned MaxSystemSGPRs = 1 + // WorkGroupIDX
546 1 + // WorkGroupIDY
547 1 + // WorkGroupIDZ
548 1 + // WorkGroupInfo
549 1; // private segment wave byte offset
550
551 // Max number of synthetic SGPRs
552 const unsigned SyntheticSGPRs = 1; // LDSKernelId
553
554 return MaxUserSGPRs + MaxSystemSGPRs + SyntheticSGPRs;
555}
556
561
563 const Function &F, std::pair<unsigned, unsigned> NumVGPRBounds) const {
564 const auto [Min, Max] = NumVGPRBounds;
565
566 // Check if maximum number of VGPRs was explicitly requested using
567 // "amdgpu-num-vgpr" attribute.
568
569 unsigned Requested = F.getFnAttributeAsParsedInteger("amdgpu-num-vgpr", Max);
570 if (Requested != Max && hasGFX90AInsts())
571 Requested *= 2;
572
573 // Make sure requested value is inside the range of possible VGPR usage.
574 return std::clamp(Requested, Min, Max);
575}
576
578 // Temporarily check both the attribute and the subtarget feature, until the
579 // latter is removed.
580 unsigned DynamicVGPRBlockSize = AMDGPU::getDynamicVGPRBlockSize(F);
581 if (DynamicVGPRBlockSize == 0 && isDynamicVGPREnabled())
582 DynamicVGPRBlockSize = getDynamicVGPRBlockSize();
583
584 std::pair<unsigned, unsigned> Waves = getWavesPerEU(F);
585 return getBaseMaxNumVGPRs(
586 F, {getMinNumVGPRs(Waves.second, DynamicVGPRBlockSize),
587 getMaxNumVGPRs(Waves.first, DynamicVGPRBlockSize)});
588}
589
591 return getMaxNumVGPRs(MF.getFunction());
592}
593
594std::pair<unsigned, unsigned>
596 const unsigned MaxVectorRegs = getMaxNumVGPRs(F);
597
598 unsigned MaxNumVGPRs = MaxVectorRegs;
599 unsigned MaxNumAGPRs = 0;
600 unsigned NumArchVGPRs = getAddressableNumArchVGPRs();
601
602 // On GFX90A, the number of VGPRs and AGPRs need not be equal. Theoretically,
603 // a wave may have up to 512 total vector registers combining together both
604 // VGPRs and AGPRs. Hence, in an entry function without calls and without
605 // AGPRs used within it, it is possible to use the whole vector register
606 // budget for VGPRs.
607 //
608 // TODO: it shall be possible to estimate maximum AGPR/VGPR pressure and split
609 // register file accordingly.
610 if (hasGFX90AInsts()) {
611 unsigned MinNumAGPRs = 0;
612 const unsigned TotalNumAGPRs = AMDGPU::AGPR_32RegClass.getNumRegs();
613
614 const std::pair<unsigned, unsigned> DefaultNumAGPR = {~0u, ~0u};
615
616 // TODO: The lower bound should probably force the number of required
617 // registers up, overriding amdgpu-waves-per-eu.
618 std::tie(MinNumAGPRs, MaxNumAGPRs) =
619 AMDGPU::getIntegerPairAttribute(F, "amdgpu-agpr-alloc", DefaultNumAGPR,
620 /*OnlyFirstRequired=*/true);
621
622 if (MinNumAGPRs == DefaultNumAGPR.first) {
623 // Default to splitting half the registers if AGPRs are required.
624 MinNumAGPRs = MaxNumAGPRs = MaxVectorRegs / 2;
625 } else {
626 // Align to accum_offset's allocation granularity.
627 MinNumAGPRs = alignTo(MinNumAGPRs, 4);
628
629 MinNumAGPRs = std::min(MinNumAGPRs, TotalNumAGPRs);
630 }
631
632 // Clamp values to be inbounds of our limits, and ensure min <= max.
633
634 MaxNumAGPRs = std::min(std::max(MinNumAGPRs, MaxNumAGPRs), MaxVectorRegs);
635 MinNumAGPRs = std::min(std::min(MinNumAGPRs, TotalNumAGPRs), MaxNumAGPRs);
636
637 MaxNumVGPRs = std::min(MaxVectorRegs - MinNumAGPRs, NumArchVGPRs);
638 MaxNumAGPRs = std::min(MaxVectorRegs - MaxNumVGPRs, MaxNumAGPRs);
639
640 assert(MaxNumVGPRs + MaxNumAGPRs <= MaxVectorRegs &&
641 MaxNumAGPRs <= TotalNumAGPRs && MaxNumVGPRs <= NumArchVGPRs &&
642 "invalid register counts");
643 } else if (hasMAIInsts()) {
644 // On gfx908 the number of AGPRs always equals the number of VGPRs.
645 MaxNumAGPRs = MaxNumVGPRs = MaxVectorRegs;
646 }
647
648 return std::pair(MaxNumVGPRs, MaxNumAGPRs);
649}
650
651// Check to which source operand UseOpIdx points to and return a pointer to the
652// operand of the corresponding source modifier.
653// Return nullptr if UseOpIdx either doesn't point to src0/1/2 or if there is no
654// operand for the corresponding source modifier.
655static const MachineOperand *
657 const SIInstrInfo &InstrInfo) {
658 AMDGPU::OpName UseName =
659 AMDGPU::getOperandIdxName(UseI.getOpcode(), UseOpIdx);
660 switch (UseName) {
661 case AMDGPU::OpName::src0:
662 return InstrInfo.getNamedOperand(UseI, AMDGPU::OpName::src0_modifiers);
663 case AMDGPU::OpName::src1:
664 return InstrInfo.getNamedOperand(UseI, AMDGPU::OpName::src1_modifiers);
665 case AMDGPU::OpName::src2:
666 return InstrInfo.getNamedOperand(UseI, AMDGPU::OpName::src2_modifiers);
667 default:
668 return nullptr;
669 }
670}
671
672// Get the subreg idx of the subreg that is used by the given instruction
673// operand, considering the given op_sel modifier.
674// Return 0 if the whole register is used or as a conservative fallback.
676 const SIInstrInfo &InstrInfo,
677 const MachineInstr &I,
678 const MachineOperand &Op) {
679 if (!InstrInfo.isVOP3P(I) || InstrInfo.isWMMA(I) || InstrInfo.isSWMMAC(I))
680 return AMDGPU::NoSubRegister;
681
682 const MachineOperand *OpMod =
683 getVOP3PSourceModifierFromOpIdx(I, Op.getOperandNo(), InstrInfo);
684 if (!OpMod)
685 return AMDGPU::NoSubRegister;
686
687 // Note: the FMA_MIX* and MAD_MIX* instructions have different semantics for
688 // the op_sel and op_sel_hi source modifiers:
689 // - op_sel: selects low/high operand bits as input to the operation;
690 // has only meaning for 16-bit source operands
691 // - op_sel_hi: specifies the size of the source operands (16 or 32 bits);
692 // a value of 0 indicates 32 bit, 1 indicates 16 bit
693 // For the other VOP3P instructions, the semantics are:
694 // - op_sel: selects low/high operand bits as input to the operation which
695 // results in the lower-half of the destination
696 // - op_sel_hi: selects the low/high operand bits as input to the operation
697 // which results in the higher-half of the destination
698 int64_t OpSel = OpMod->getImm() & SISrcMods::OP_SEL_0;
699 int64_t OpSelHi = OpMod->getImm() & SISrcMods::OP_SEL_1;
700
701 // Check if all parts of the register are being used (= op_sel and op_sel_hi
702 // differ for VOP3P or op_sel_hi=0 for VOP3PMix). In that case we can return
703 // early.
704 if ((!InstrInfo.isVOP3PMix(I) && (!OpSel || !OpSelHi) &&
705 (OpSel || OpSelHi)) ||
706 (InstrInfo.isVOP3PMix(I) && !OpSelHi))
707 return AMDGPU::NoSubRegister;
708
709 const MachineRegisterInfo &MRI = I.getParent()->getParent()->getRegInfo();
710 const TargetRegisterClass *RC = TRI.getRegClassForOperandReg(MRI, Op);
711
712 if (unsigned SubRegIdx = OpSel ? AMDGPU::sub1 : AMDGPU::sub0;
713 TRI.getSubClassWithSubReg(RC, SubRegIdx) == RC)
714 return SubRegIdx;
715 if (unsigned SubRegIdx = OpSel ? AMDGPU::hi16 : AMDGPU::lo16;
716 TRI.getSubClassWithSubReg(RC, SubRegIdx) == RC)
717 return SubRegIdx;
718
719 return AMDGPU::NoSubRegister;
720}
721
722Register GCNSubtarget::getRealSchedDependency(const MachineInstr &DefI,
723 int DefOpIdx,
724 const MachineInstr &UseI,
725 int UseOpIdx) const {
726 const SIRegisterInfo *TRI = getRegisterInfo();
727 const MachineOperand &DefOp = DefI.getOperand(DefOpIdx);
728 const MachineOperand &UseOp = UseI.getOperand(UseOpIdx);
729 Register DefReg = DefOp.getReg();
730 Register UseReg = UseOp.getReg();
731
732 // If the registers aren't restricted to a sub-register, there is no point in
733 // further analysis. This check makes only sense for virtual registers because
734 // physical registers may form a tuple and thus be part of a superregister
735 // although they are not a subregister themselves (vgpr0 is a "subreg" of
736 // vgpr0_vgpr1 without being a subreg in itself).
737 unsigned DefSubRegIdx = DefOp.getSubReg();
738 if (DefReg.isVirtual() && DefSubRegIdx == AMDGPU::NoSubRegister)
739 return DefReg;
740 unsigned UseSubRegIdx = getEffectiveSubRegIdx(*TRI, InstrInfo, UseI, UseOp);
741 if (UseReg.isVirtual() && UseSubRegIdx == AMDGPU::NoSubRegister)
742 return DefReg;
743
744 if (!TRI->checkSubRegInterference(DefReg, DefSubRegIdx, UseReg, UseSubRegIdx))
745 return Register(); // No real dependency
746
747 // UseReg might be smaller or larger than DefReg, depending on the subreg and
748 // on whether DefReg is a subreg, too. -> Find the smaller one. This does not
749 // apply to virtual registers because we cannot construct a subreg for them.
750 if (DefReg.isVirtual())
751 return DefReg;
752 MCRegister DefMCReg =
753 DefSubRegIdx ? TRI->getSubReg(DefReg, DefSubRegIdx) : DefReg.asMCReg();
754 MCRegister UseMCReg =
755 UseSubRegIdx ? TRI->getSubReg(UseReg, UseSubRegIdx) : UseReg.asMCReg();
756 return TRI->isSubRegisterEq(DefMCReg, UseMCReg) ? UseMCReg : DefMCReg;
757}
758
760 SUnit *Def, int DefOpIdx, SUnit *Use, int UseOpIdx, SDep &Dep,
761 const TargetSchedModel *SchedModel) const {
762 if (Dep.getKind() != SDep::Kind::Data || !Dep.getReg() || !Def->isInstr() ||
763 !Use->isInstr())
764 return;
765
766 MachineInstr *DefI = Def->getInstr();
767 MachineInstr *UseI = Use->getInstr();
768
769 if (Register Reg = getRealSchedDependency(*DefI, DefOpIdx, *UseI, UseOpIdx)) {
770 Dep.setReg(Reg);
771 } else {
772 Dep = SDep(Def, SDep::Artificial);
773 return; // This is not a data dependency anymore.
774 }
775
776 if (DefI->isBundle()) {
778 auto Reg = Dep.getReg();
781 unsigned Lat = 0;
782 for (++I; I != E && I->isBundledWithPred(); ++I) {
783 if (I->isMetaInstruction())
784 continue;
785 if (I->modifiesRegister(Reg, TRI))
786 Lat = InstrInfo.getInstrLatency(getInstrItineraryData(), *I);
787 else if (Lat)
788 --Lat;
789 }
790 Dep.setLatency(Lat);
791 } else if (UseI->isBundle()) {
793 auto Reg = Dep.getReg();
796 unsigned Lat = InstrInfo.getInstrLatency(getInstrItineraryData(), *DefI);
797 for (++I; I != E && I->isBundledWithPred() && Lat; ++I) {
798 if (I->isMetaInstruction())
799 continue;
800 if (I->readsRegister(Reg, TRI))
801 break;
802 --Lat;
803 }
804 Dep.setLatency(Lat);
805 } else if (Dep.getLatency() == 0 && Dep.getReg() == AMDGPU::VCC_LO) {
806 // Work around the fact that SIInstrInfo::fixImplicitOperands modifies
807 // implicit operands which come from the MCInstrDesc, which can fool
808 // ScheduleDAGInstrs::addPhysRegDataDeps into treating them as implicit
809 // pseudo operands.
810 Dep.setLatency(InstrInfo.getSchedModel().computeOperandLatency(
811 DefI, DefOpIdx, UseI, UseOpIdx));
812 }
813}
814
817 return 0; // Not MIMG encoding.
818
819 if (NSAThreshold.getNumOccurrences() > 0)
820 return std::max(NSAThreshold.getValue(), 2u);
821
823 "amdgpu-nsa-threshold", -1);
824 if (Value > 0)
825 return std::max(Value, 2);
826
827 return NSAThreshold;
828}
829
831 const GCNSubtarget &ST)
832 : ST(ST) {
833 const CallingConv::ID CC = F.getCallingConv();
834 const bool IsKernel =
836
837 if (IsKernel && (!F.arg_empty() || ST.getImplicitArgNumBytes(F) != 0))
838 KernargSegmentPtr = true;
839
840 bool IsAmdHsaOrMesa = ST.isAmdHsaOrMesa(F);
841 if (IsAmdHsaOrMesa && !ST.hasFlatScratchEnabled())
842 PrivateSegmentBuffer = true;
843 else if (ST.isMesaGfxShader(F))
844 ImplicitBufferPtr = true;
845
846 if (!AMDGPU::isGraphics(CC)) {
847 if (!F.hasFnAttribute("amdgpu-no-dispatch-ptr"))
848 DispatchPtr = true;
849
850 // FIXME: Can this always be disabled with < COv5?
851 if (!F.hasFnAttribute("amdgpu-no-queue-ptr"))
852 QueuePtr = true;
853
854 if (!F.hasFnAttribute("amdgpu-no-dispatch-id"))
855 DispatchID = true;
856 }
857
858 if (ST.hasFlatAddressSpace() && AMDGPU::isEntryFunctionCC(CC) &&
859 (IsAmdHsaOrMesa || ST.hasFlatScratchEnabled()) &&
860 // FlatScratchInit cannot be true for graphics CC if
861 // hasFlatScratchEnabled() is false.
862 (ST.hasFlatScratchEnabled() ||
863 (!AMDGPU::isGraphics(CC) &&
864 !F.hasFnAttribute("amdgpu-no-flat-scratch-init"))) &&
865 !ST.hasArchitectedFlatScratch()) {
866 FlatScratchInit = true;
867 }
868
870 NumUsedUserSGPRs += getNumUserSGPRForField(ImplicitBufferPtrID);
871
874
875 if (hasDispatchPtr())
876 NumUsedUserSGPRs += getNumUserSGPRForField(DispatchPtrID);
877
878 if (hasQueuePtr())
879 NumUsedUserSGPRs += getNumUserSGPRForField(QueuePtrID);
880
882 NumUsedUserSGPRs += getNumUserSGPRForField(KernargSegmentPtrID);
883
884 if (hasDispatchID())
885 NumUsedUserSGPRs += getNumUserSGPRForField(DispatchIdID);
886
887 if (hasFlatScratchInit())
888 NumUsedUserSGPRs += getNumUserSGPRForField(FlatScratchInitID);
889
891 NumUsedUserSGPRs += getNumUserSGPRForField(PrivateSegmentSizeID);
892}
893
895 assert(NumKernargPreloadSGPRs + NumSGPRs <= AMDGPU::getMaxNumUserSGPRs(ST));
896 NumKernargPreloadSGPRs += NumSGPRs;
897 NumUsedUserSGPRs += NumSGPRs;
898}
899
901 return AMDGPU::getMaxNumUserSGPRs(ST) - NumUsedUserSGPRs;
902}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > UseAA("aarch64-use-aa", cl::init(true), cl::desc("Enable the use of AA during codegen."))
This file describes how to lower LLVM calls to machine code calls.
This file declares the targeting of the InstructionSelector class for AMDGPU.
This file declares the targeting of the Machinelegalizer class for AMDGPU.
This file declares the targeting of the RegisterBankInfo class for AMDGPU.
The AMDGPU TargetMachine interface definition for hw codegen targets.
MachineBasicBlock & MBB
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static cl::opt< unsigned > NSAThreshold("amdgpu-nsa-threshold", cl::desc("Number of addresses from which to enable MIMG NSA."), cl::init(2), cl::Hidden)
static cl::opt< bool > EnableVGPRIndexMode("amdgpu-vgpr-index-mode", cl::desc("Use GPR indexing mode instead of movrel for vector indexing"), cl::init(false))
static cl::opt< bool > UseAA("amdgpu-use-aa-in-codegen", cl::desc("Enable the use of AA during codegen."), cl::init(true))
static const MachineOperand * getVOP3PSourceModifierFromOpIdx(const MachineInstr &UseI, int UseOpIdx, const SIInstrInfo &InstrInfo)
static unsigned getEffectiveSubRegIdx(const SIRegisterInfo &TRI, const SIInstrInfo &InstrInfo, const MachineInstr &I, const MachineOperand &Op)
AMD GCN specific subclass of TargetSubtarget.
static Register UseReg(const MachineOperand &MO)
IRTranslator LLVM IR MI
This file describes how to lower LLVM inline asm to machine code INLINEASM.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
if(PassOpts->AAPipeline)
This file defines the SmallString class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
std::pair< unsigned, unsigned > getWavesPerEU(const Function &F) const
std::pair< unsigned, unsigned > getOccupancyWithWorkGroupSizes(uint32_t LDSBytes, const Function &F) const
Subtarget's minimum/maximum occupancy, in number of waves per EU, that can be achieved when the only ...
unsigned getWavefrontSizeLog2() const
AMDGPUSubtarget(const Triple &TT)
unsigned AddressableLocalMemorySize
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
Diagnostic information for optimization failures.
Diagnostic information for unsupported feature in backend.
uint64_t getFnAttributeAsParsedInteger(StringRef Kind, uint64_t Default=0) const
For a string attribute Kind, parse attribute as an integer.
Definition Function.cpp:775
bool hasFlat() const
InstrItineraryData InstrItins
bool useVGPRIndexMode() const
void mirFileLoaded(MachineFunction &MF) const override
unsigned MaxPrivateElementSize
unsigned getAddressableNumArchVGPRs() const
unsigned getMinNumSGPRs(unsigned WavesPerEU) const
void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
unsigned getConstantBusLimit(unsigned Opcode) const
const InstrItineraryData * getInstrItineraryData() const override
void adjustSchedDependency(SUnit *Def, int DefOpIdx, SUnit *Use, int UseOpIdx, SDep &Dep, const TargetSchedModel *SchedModel) const override
void overridePostRASchedPolicy(MachineSchedPolicy &Policy, const SchedRegion &Region) const override
Align getStackAlignment() const
bool hasMadF16() const
unsigned getMinNumVGPRs(unsigned WavesPerEU, unsigned DynamicVGPRBlockSize) const
bool isDynamicVGPREnabled() const
const SIRegisterInfo * getRegisterInfo() const override
unsigned getBaseMaxNumVGPRs(const Function &F, std::pair< unsigned, unsigned > NumVGPRBounds) const
bool zeroesHigh16BitsOfDest(unsigned Opcode) const
Returns if the result of this instruction with a 16-bit result returned in a 32-bit register implicit...
unsigned getBaseMaxNumSGPRs(const Function &F, std::pair< unsigned, unsigned > WavesPerEU, unsigned PreloadedSGPRs, unsigned ReservedNumSGPRs) const
unsigned getMaxNumPreloadedSGPRs() const
GCNSubtarget & initializeSubtargetDependencies(const Triple &TT, StringRef GPU, StringRef FS)
void overrideSchedPolicy(MachineSchedPolicy &Policy, const SchedRegion &Region) const override
std::pair< unsigned, unsigned > computeOccupancy(const Function &F, unsigned LDSSize=0, unsigned NumSGPRs=0, unsigned NumVGPRs=0) const
Subtarget's minimum/maximum occupancy, in number of waves per EU, that can be achieved when the only ...
unsigned getMaxNumVGPRs(unsigned WavesPerEU, unsigned DynamicVGPRBlockSize) const
const SITargetLowering * getTargetLowering() const override
unsigned getNSAThreshold(const MachineFunction &MF) const
unsigned getReservedNumSGPRs(const MachineFunction &MF) const
bool useAA() const override
bool isWave32() const
unsigned getOccupancyWithNumVGPRs(unsigned VGPRs, unsigned DynamicVGPRBlockSize) const
Return the maximum number of waves per SIMD for kernels using VGPRs VGPRs.
unsigned InstCacheLineSize
unsigned getOccupancyWithNumSGPRs(unsigned SGPRs) const
Return the maximum number of waves per SIMD for kernels using SGPRs SGPRs.
unsigned getMaxWavesPerEU() const
Generation getGeneration() const
GCNSubtarget(const Triple &TT, StringRef GPU, StringRef FS, const GCNTargetMachine &TM)
unsigned getMaxNumSGPRs(unsigned WavesPerEU, bool Addressable) const
std::pair< unsigned, unsigned > getMaxNumVectorRegs(const Function &F) const
Return a pair of maximum numbers of VGPRs and AGPRs that meet the number of waves per execution unit ...
bool isXNACKEnabled() const
unsigned getBaseReservedNumSGPRs(const bool HasFlatScratch) const
bool hasAddr64() const
unsigned getDynamicVGPRBlockSize() const
void checkSubtargetFeatures(const Function &F) const
Diagnose inconsistent subtarget features before attempting to codegen function F.
~GCNSubtarget() override
const SelectionDAGTargetInfo * getSelectionDAGInfo() const override
AMDGPU::IsaInfo::AMDGPUTargetID TargetID
static unsigned getNumUserSGPRForField(UserSGPRID ID)
void allocKernargPreloadSGPRs(unsigned NumSGPRs)
bool hasPrivateSegmentBuffer() const
GCNUserSGPRUsageInfo(const Function &F, const GCNSubtarget &ST)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Instructions::const_iterator const_instr_iterator
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
bool isBundle() const
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
int64_t getImm() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Definition Register.h:20
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
Definition Register.h:107
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
Scheduling dependency.
Definition ScheduleDAG.h:51
Kind getKind() const
Returns an enum value representing the kind of the dependence.
@ Data
Regular data dependence (aka true-dependence).
Definition ScheduleDAG.h:55
void setLatency(unsigned Lat)
Sets the latency for this edge.
@ Artificial
Arbitrary strong DAG edge (no real dependence).
Definition ScheduleDAG.h:74
unsigned getLatency() const
Returns the latency value for this edge, which roughly means the minimum number of cycles that must e...
Register getReg() const
Returns the register associated with this edge.
void setReg(Register Reg)
Assigns the associated register for this edge.
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
std::pair< unsigned, unsigned > getWavesPerEU() const
GCNUserSGPRUsageInfo & getUserSGPRInfo()
Scheduling unit. This is a node in the scheduling DAG.
Targets can subclass this to parameterize the SelectionDAG lowering and instruction selection process...
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Information about stack frame layout on the target.
Provide an instruction scheduling machine model to CodeGen passes.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
self_iterator getIterator()
Definition ilist_node.h:123
unsigned getNumWavesPerEUWithNumVGPRs(const MCSubtargetInfo *STI, unsigned NumVGPRs, unsigned DynamicVGPRBlockSize)
unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI)
unsigned getLocalMemorySize(const MCSubtargetInfo *STI)
unsigned getEUsPerCU(const MCSubtargetInfo *STI)
unsigned getOccupancyWithNumSGPRs(unsigned SGPRs, unsigned MaxWaves, AMDGPUSubtarget::Generation Gen)
StringRef getSchedStrategy(const Function &F)
unsigned getMaxNumUserSGPRs(const MCSubtargetInfo &STI)
LLVM_READNONE constexpr bool isEntryFunctionCC(CallingConv::ID CC)
unsigned getDynamicVGPRBlockSize(const Function &F)
std::pair< unsigned, unsigned > getIntegerPairAttribute(const Function &F, StringRef Name, std::pair< unsigned, unsigned > Default, bool OnlyFirstRequired)
LLVM_READNONE constexpr bool isGraphics(CallingConv::ID CC)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
@ SPIR_KERNEL
Used for SPIR kernel functions.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Define a generic scheduling policy for targets that don't provide their own MachineSchedStrategy.
bool ShouldTrackLaneMasks
Track LaneMasks to allow reordering of independent subregister writes of the same vreg.
A region of an MBB for scheduling.