LLVM 18.0.0git
AMDGPUInstPrinter.cpp
Go to the documentation of this file.
1//===-- AMDGPUInstPrinter.cpp - AMDGPU MC Inst -> ASM ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7// \file
8//===----------------------------------------------------------------------===//
9
10#include "AMDGPUInstPrinter.h"
12#include "SIDefines.h"
15#include "llvm/MC/MCExpr.h"
16#include "llvm/MC/MCInst.h"
17#include "llvm/MC/MCInstrDesc.h"
18#include "llvm/MC/MCInstrInfo.h"
23
24using namespace llvm;
25using namespace llvm::AMDGPU;
26
28 // FIXME: The current implementation of
29 // AsmParser::parseRegisterOrRegisterNumber in MC implies we either emit this
30 // as an integer or we provide a name which represents a physical register.
31 // For CFI instructions we really want to emit a name for the DWARF register
32 // instead, because there may be multiple DWARF registers corresponding to a
33 // single physical register. One case where this problem manifests is with
34 // wave32/wave64 where using the physical register name is ambiguous: if we
35 // write e.g. `.cfi_undefined v0` we lose information about the wavefront
36 // size which we need to encode the register in the final DWARF. Ideally we
37 // would extend MC to support parsing DWARF register names so we could do
38 // something like `.cfi_undefined dwarf_wave32_v0`. For now we just live with
39 // non-pretty DWARF register names in assembly text.
40 OS << Reg.id();
41}
42
44 StringRef Annot, const MCSubtargetInfo &STI,
45 raw_ostream &OS) {
46 OS.flush();
48 printAnnotation(OS, Annot);
49}
50
51void AMDGPUInstPrinter::printU4ImmOperand(const MCInst *MI, unsigned OpNo,
52 const MCSubtargetInfo &STI,
53 raw_ostream &O) {
54 O << formatHex(MI->getOperand(OpNo).getImm() & 0xf);
55}
56
57void AMDGPUInstPrinter::printU16ImmOperand(const MCInst *MI, unsigned OpNo,
58 const MCSubtargetInfo &STI,
59 raw_ostream &O) {
60 // It's possible to end up with a 32-bit literal used with a 16-bit operand
61 // with ignored high bits. Print as 32-bit anyway in that case.
62 int64_t Imm = MI->getOperand(OpNo).getImm();
63 if (isInt<16>(Imm) || isUInt<16>(Imm))
64 O << formatHex(static_cast<uint64_t>(Imm & 0xffff));
65 else
66 printU32ImmOperand(MI, OpNo, STI, O);
67}
68
69void AMDGPUInstPrinter::printU4ImmDecOperand(const MCInst *MI, unsigned OpNo,
70 raw_ostream &O) {
71 O << formatDec(MI->getOperand(OpNo).getImm() & 0xf);
72}
73
74void AMDGPUInstPrinter::printU8ImmDecOperand(const MCInst *MI, unsigned OpNo,
75 raw_ostream &O) {
76 O << formatDec(MI->getOperand(OpNo).getImm() & 0xff);
77}
78
79void AMDGPUInstPrinter::printU16ImmDecOperand(const MCInst *MI, unsigned OpNo,
80 raw_ostream &O) {
81 O << formatDec(MI->getOperand(OpNo).getImm() & 0xffff);
82}
83
84void AMDGPUInstPrinter::printU32ImmOperand(const MCInst *MI, unsigned OpNo,
85 const MCSubtargetInfo &STI,
86 raw_ostream &O) {
87 O << formatHex(MI->getOperand(OpNo).getImm() & 0xffffffff);
88}
89
90void AMDGPUInstPrinter::printNamedBit(const MCInst *MI, unsigned OpNo,
91 raw_ostream &O, StringRef BitName) {
92 if (MI->getOperand(OpNo).getImm()) {
93 O << ' ' << BitName;
94 }
95}
96
97void AMDGPUInstPrinter::printOffset(const MCInst *MI, unsigned OpNo,
98 const MCSubtargetInfo &STI,
99 raw_ostream &O) {
100 uint16_t Imm = MI->getOperand(OpNo).getImm();
101 if (Imm != 0) {
102 O << " offset:";
103 printU16ImmDecOperand(MI, OpNo, O);
104 }
105}
106
107void AMDGPUInstPrinter::printFlatOffset(const MCInst *MI, unsigned OpNo,
108 const MCSubtargetInfo &STI,
109 raw_ostream &O) {
110 uint16_t Imm = MI->getOperand(OpNo).getImm();
111 if (Imm != 0) {
112 O << " offset:";
113
114 const MCInstrDesc &Desc = MII.get(MI->getOpcode());
115 bool IsFlatSeg = !(Desc.TSFlags &
117
118 if (IsFlatSeg) // Unsigned offset
119 printU16ImmDecOperand(MI, OpNo, O);
120 else // Signed offset
122 }
123}
124
125void AMDGPUInstPrinter::printOffset0(const MCInst *MI, unsigned OpNo,
126 const MCSubtargetInfo &STI,
127 raw_ostream &O) {
128 if (MI->getOperand(OpNo).getImm()) {
129 O << " offset0:";
130 printU8ImmDecOperand(MI, OpNo, O);
131 }
132}
133
134void AMDGPUInstPrinter::printOffset1(const MCInst *MI, unsigned OpNo,
135 const MCSubtargetInfo &STI,
136 raw_ostream &O) {
137 if (MI->getOperand(OpNo).getImm()) {
138 O << " offset1:";
139 printU8ImmDecOperand(MI, OpNo, O);
140 }
141}
142
143void AMDGPUInstPrinter::printSMRDOffset8(const MCInst *MI, unsigned OpNo,
144 const MCSubtargetInfo &STI,
145 raw_ostream &O) {
146 printU32ImmOperand(MI, OpNo, STI, O);
147}
148
149void AMDGPUInstPrinter::printSMEMOffset(const MCInst *MI, unsigned OpNo,
150 const MCSubtargetInfo &STI,
151 raw_ostream &O) {
152 O << formatHex(MI->getOperand(OpNo).getImm());
153}
154
155void AMDGPUInstPrinter::printSMEMOffsetMod(const MCInst *MI, unsigned OpNo,
156 const MCSubtargetInfo &STI,
157 raw_ostream &O) {
158 O << " offset:";
159 printSMEMOffset(MI, OpNo, STI, O);
160}
161
162void AMDGPUInstPrinter::printSMRDLiteralOffset(const MCInst *MI, unsigned OpNo,
163 const MCSubtargetInfo &STI,
164 raw_ostream &O) {
165 printU32ImmOperand(MI, OpNo, STI, O);
166}
167
168void AMDGPUInstPrinter::printCPol(const MCInst *MI, unsigned OpNo,
169 const MCSubtargetInfo &STI, raw_ostream &O) {
170 auto Imm = MI->getOperand(OpNo).getImm();
171 if (Imm & CPol::GLC)
172 O << ((AMDGPU::isGFX940(STI) &&
173 !(MII.get(MI->getOpcode()).TSFlags & SIInstrFlags::SMRD)) ? " sc0"
174 : " glc");
175 if (Imm & CPol::SLC)
176 O << (AMDGPU::isGFX940(STI) ? " nt" : " slc");
177 if ((Imm & CPol::DLC) && AMDGPU::isGFX10Plus(STI))
178 O << " dlc";
179 if ((Imm & CPol::SCC) && AMDGPU::isGFX90A(STI))
180 O << (AMDGPU::isGFX940(STI) ? " sc1" : " scc");
181 if (Imm & ~CPol::ALL)
182 O << " /* unexpected cache policy bit */";
183}
184
185void AMDGPUInstPrinter::printDMask(const MCInst *MI, unsigned OpNo,
186 const MCSubtargetInfo &STI, raw_ostream &O) {
187 if (MI->getOperand(OpNo).getImm()) {
188 O << " dmask:";
189 printU16ImmOperand(MI, OpNo, STI, O);
190 }
191}
192
193void AMDGPUInstPrinter::printDim(const MCInst *MI, unsigned OpNo,
194 const MCSubtargetInfo &STI, raw_ostream &O) {
195 unsigned Dim = MI->getOperand(OpNo).getImm();
196 O << " dim:SQ_RSRC_IMG_";
197
199 if (DimInfo)
200 O << DimInfo->AsmSuffix;
201 else
202 O << Dim;
203}
204
205void AMDGPUInstPrinter::printR128A16(const MCInst *MI, unsigned OpNo,
206 const MCSubtargetInfo &STI, raw_ostream &O) {
207 if (STI.hasFeature(AMDGPU::FeatureR128A16))
208 printNamedBit(MI, OpNo, O, "a16");
209 else
210 printNamedBit(MI, OpNo, O, "r128");
211}
212
213void AMDGPUInstPrinter::printFORMAT(const MCInst *MI, unsigned OpNo,
214 const MCSubtargetInfo &STI,
215 raw_ostream &O) {
216}
217
218void AMDGPUInstPrinter::printSymbolicFormat(const MCInst *MI,
219 const MCSubtargetInfo &STI,
220 raw_ostream &O) {
221 using namespace llvm::AMDGPU::MTBUFFormat;
222
223 int OpNo =
224 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::format);
225 assert(OpNo != -1);
226
227 unsigned Val = MI->getOperand(OpNo).getImm();
228 if (AMDGPU::isGFX10Plus(STI)) {
229 if (Val == UFMT_DEFAULT)
230 return;
231 if (isValidUnifiedFormat(Val, STI)) {
232 O << " format:[" << getUnifiedFormatName(Val, STI) << ']';
233 } else {
234 O << " format:" << Val;
235 }
236 } else {
237 if (Val == DFMT_NFMT_DEFAULT)
238 return;
239 if (isValidDfmtNfmt(Val, STI)) {
240 unsigned Dfmt;
241 unsigned Nfmt;
242 decodeDfmtNfmt(Val, Dfmt, Nfmt);
243 O << " format:[";
244 if (Dfmt != DFMT_DEFAULT) {
245 O << getDfmtName(Dfmt);
246 if (Nfmt != NFMT_DEFAULT) {
247 O << ',';
248 }
249 }
250 if (Nfmt != NFMT_DEFAULT) {
251 O << getNfmtName(Nfmt, STI);
252 }
253 O << ']';
254 } else {
255 O << " format:" << Val;
256 }
257 }
258}
259
261 const MCRegisterInfo &MRI) {
262#if !defined(NDEBUG)
263 switch (RegNo) {
264 case AMDGPU::FP_REG:
265 case AMDGPU::SP_REG:
266 case AMDGPU::PRIVATE_RSRC_REG:
267 llvm_unreachable("pseudo-register should not ever be emitted");
268 case AMDGPU::SCC:
269 llvm_unreachable("pseudo scc should not ever be emitted");
270 default:
271 break;
272 }
273#endif
274
275 O << getRegisterName(RegNo);
276}
277
278void AMDGPUInstPrinter::printVOPDst(const MCInst *MI, unsigned OpNo,
279 const MCSubtargetInfo &STI, raw_ostream &O) {
280 auto Opcode = MI->getOpcode();
281 auto Flags = MII.get(Opcode).TSFlags;
282 if (OpNo == 0) {
283 if (Flags & SIInstrFlags::VOP3 && Flags & SIInstrFlags::DPP)
284 O << "_e64_dpp";
285 else if (Flags & SIInstrFlags::VOP3) {
287 O << "_e64";
288 } else if (Flags & SIInstrFlags::DPP)
289 O << "_dpp";
290 else if (Flags & SIInstrFlags::SDWA)
291 O << "_sdwa";
292 else if (((Flags & SIInstrFlags::VOP1) && !getVOP1IsSingle(Opcode)) ||
294 O << "_e32";
295 O << " ";
296 }
297
298 printRegularOperand(MI, OpNo, STI, O);
299
300 // Print default vcc/vcc_lo operand.
301 switch (Opcode) {
302 default: break;
303
304 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx10:
305 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx10:
306 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10:
307 case AMDGPU::V_ADD_CO_CI_U32_sdwa_gfx10:
308 case AMDGPU::V_SUB_CO_CI_U32_sdwa_gfx10:
309 case AMDGPU::V_SUBREV_CO_CI_U32_sdwa_gfx10:
310 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx10:
311 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx10:
312 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx10:
313 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx10:
314 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx10:
315 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx10:
316 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx11:
317 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx11:
318 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx11:
319 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx11:
320 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx11:
321 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx11:
322 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx11:
323 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx11:
324 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx11:
325 printDefaultVccOperand(false, STI, O);
326 break;
327 }
328}
329
330void AMDGPUInstPrinter::printVINTRPDst(const MCInst *MI, unsigned OpNo,
331 const MCSubtargetInfo &STI, raw_ostream &O) {
332 if (AMDGPU::isSI(STI) || AMDGPU::isCI(STI))
333 O << " ";
334 else
335 O << "_e32 ";
336
337 printRegularOperand(MI, OpNo, STI, O);
338}
339
340void AMDGPUInstPrinter::printImmediateInt16(uint32_t Imm,
341 const MCSubtargetInfo &STI,
342 raw_ostream &O) {
343 int16_t SImm = static_cast<int16_t>(Imm);
344 if (isInlinableIntLiteral(SImm)) {
345 O << SImm;
346 } else {
347 uint64_t Imm16 = static_cast<uint16_t>(Imm);
348 O << formatHex(Imm16);
349 }
350}
351
352void AMDGPUInstPrinter::printImmediate16(uint32_t Imm,
353 const MCSubtargetInfo &STI,
354 raw_ostream &O) {
355 int16_t SImm = static_cast<int16_t>(Imm);
356 if (isInlinableIntLiteral(SImm)) {
357 O << SImm;
358 return;
359 }
360
361 if (Imm == 0x3C00)
362 O<< "1.0";
363 else if (Imm == 0xBC00)
364 O<< "-1.0";
365 else if (Imm == 0x3800)
366 O<< "0.5";
367 else if (Imm == 0xB800)
368 O<< "-0.5";
369 else if (Imm == 0x4000)
370 O<< "2.0";
371 else if (Imm == 0xC000)
372 O<< "-2.0";
373 else if (Imm == 0x4400)
374 O<< "4.0";
375 else if (Imm == 0xC400)
376 O<< "-4.0";
377 else if (Imm == 0x3118 &&
378 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm)) {
379 O << "0.15915494";
380 } else {
381 uint64_t Imm16 = static_cast<uint16_t>(Imm);
382 O << formatHex(Imm16);
383 }
384}
385
386void AMDGPUInstPrinter::printImmediateV216(uint32_t Imm,
387 const MCSubtargetInfo &STI,
388 raw_ostream &O) {
389 uint16_t Lo16 = static_cast<uint16_t>(Imm);
390 printImmediate16(Lo16, STI, O);
391}
392
393void AMDGPUInstPrinter::printImmediate32(uint32_t Imm,
394 const MCSubtargetInfo &STI,
395 raw_ostream &O) {
396 int32_t SImm = static_cast<int32_t>(Imm);
397 if (SImm >= -16 && SImm <= 64) {
398 O << SImm;
399 return;
400 }
401
402 if (Imm == llvm::bit_cast<uint32_t>(0.0f))
403 O << "0.0";
404 else if (Imm == llvm::bit_cast<uint32_t>(1.0f))
405 O << "1.0";
406 else if (Imm == llvm::bit_cast<uint32_t>(-1.0f))
407 O << "-1.0";
408 else if (Imm == llvm::bit_cast<uint32_t>(0.5f))
409 O << "0.5";
410 else if (Imm == llvm::bit_cast<uint32_t>(-0.5f))
411 O << "-0.5";
412 else if (Imm == llvm::bit_cast<uint32_t>(2.0f))
413 O << "2.0";
414 else if (Imm == llvm::bit_cast<uint32_t>(-2.0f))
415 O << "-2.0";
416 else if (Imm == llvm::bit_cast<uint32_t>(4.0f))
417 O << "4.0";
418 else if (Imm == llvm::bit_cast<uint32_t>(-4.0f))
419 O << "-4.0";
420 else if (Imm == 0x3e22f983 &&
421 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
422 O << "0.15915494";
423 else
424 O << formatHex(static_cast<uint64_t>(Imm));
425}
426
427void AMDGPUInstPrinter::printImmediate64(uint64_t Imm,
428 const MCSubtargetInfo &STI,
429 raw_ostream &O, bool IsFP) {
430 int64_t SImm = static_cast<int64_t>(Imm);
431 if (SImm >= -16 && SImm <= 64) {
432 O << SImm;
433 return;
434 }
435
436 if (Imm == llvm::bit_cast<uint64_t>(0.0))
437 O << "0.0";
438 else if (Imm == llvm::bit_cast<uint64_t>(1.0))
439 O << "1.0";
440 else if (Imm == llvm::bit_cast<uint64_t>(-1.0))
441 O << "-1.0";
442 else if (Imm == llvm::bit_cast<uint64_t>(0.5))
443 O << "0.5";
444 else if (Imm == llvm::bit_cast<uint64_t>(-0.5))
445 O << "-0.5";
446 else if (Imm == llvm::bit_cast<uint64_t>(2.0))
447 O << "2.0";
448 else if (Imm == llvm::bit_cast<uint64_t>(-2.0))
449 O << "-2.0";
450 else if (Imm == llvm::bit_cast<uint64_t>(4.0))
451 O << "4.0";
452 else if (Imm == llvm::bit_cast<uint64_t>(-4.0))
453 O << "-4.0";
454 else if (Imm == 0x3fc45f306dc9c882 &&
455 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
456 O << "0.15915494309189532";
457 else if (IsFP) {
459 O << formatHex(static_cast<uint64_t>(Hi_32(Imm)));
460 } else {
461 assert(isUInt<32>(Imm) || isInt<32>(Imm));
462
463 // In rare situations, we will have a 32-bit literal in a 64-bit
464 // operand. This is technically allowed for the encoding of s_mov_b64.
465 O << formatHex(static_cast<uint64_t>(Imm));
466 }
467}
468
469void AMDGPUInstPrinter::printBLGP(const MCInst *MI, unsigned OpNo,
470 const MCSubtargetInfo &STI,
471 raw_ostream &O) {
472 unsigned Imm = MI->getOperand(OpNo).getImm();
473 if (!Imm)
474 return;
475
476 if (AMDGPU::isGFX940(STI)) {
477 switch (MI->getOpcode()) {
478 case AMDGPU::V_MFMA_F64_16X16X4F64_gfx940_acd:
479 case AMDGPU::V_MFMA_F64_16X16X4F64_gfx940_vcd:
480 case AMDGPU::V_MFMA_F64_4X4X4F64_gfx940_acd:
481 case AMDGPU::V_MFMA_F64_4X4X4F64_gfx940_vcd:
482 O << " neg:[" << (Imm & 1) << ',' << ((Imm >> 1) & 1) << ','
483 << ((Imm >> 2) & 1) << ']';
484 return;
485 }
486 }
487
488 O << " blgp:" << Imm;
489}
490
491void AMDGPUInstPrinter::printCBSZ(const MCInst *MI, unsigned OpNo,
492 const MCSubtargetInfo &STI,
493 raw_ostream &O) {
494 unsigned Imm = MI->getOperand(OpNo).getImm();
495 if (!Imm)
496 return;
497
498 O << " cbsz:" << Imm;
499}
500
501void AMDGPUInstPrinter::printABID(const MCInst *MI, unsigned OpNo,
502 const MCSubtargetInfo &STI,
503 raw_ostream &O) {
504 unsigned Imm = MI->getOperand(OpNo).getImm();
505 if (!Imm)
506 return;
507
508 O << " abid:" << Imm;
509}
510
511void AMDGPUInstPrinter::printDefaultVccOperand(bool FirstOperand,
512 const MCSubtargetInfo &STI,
513 raw_ostream &O) {
514 if (!FirstOperand)
515 O << ", ";
516 printRegOperand(STI.hasFeature(AMDGPU::FeatureWavefrontSize64)
517 ? AMDGPU::VCC
518 : AMDGPU::VCC_LO,
519 O, MRI);
520 if (FirstOperand)
521 O << ", ";
522}
523
524void AMDGPUInstPrinter::printWaitVDST(const MCInst *MI, unsigned OpNo,
525 const MCSubtargetInfo &STI,
526 raw_ostream &O) {
527 O << " wait_vdst:";
528 printU4ImmDecOperand(MI, OpNo, O);
529}
530
531void AMDGPUInstPrinter::printWaitEXP(const MCInst *MI, unsigned OpNo,
532 const MCSubtargetInfo &STI,
533 raw_ostream &O) {
534 O << " wait_exp:";
535 printU4ImmDecOperand(MI, OpNo, O);
536}
537
538bool AMDGPUInstPrinter::needsImpliedVcc(const MCInstrDesc &Desc,
539 unsigned OpNo) const {
540 return OpNo == 0 && (Desc.TSFlags & SIInstrFlags::DPP) &&
541 (Desc.TSFlags & SIInstrFlags::VOPC) &&
542 (Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC) ||
543 Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC_LO));
544}
545
546// Print default vcc/vcc_lo operand of VOPC.
547void AMDGPUInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
548 const MCSubtargetInfo &STI,
549 raw_ostream &O) {
550 unsigned Opc = MI->getOpcode();
551 const MCInstrDesc &Desc = MII.get(Opc);
552 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
553 // 0, 1 and 2 are the first printed operands in different cases
554 // If there are printed modifiers, printOperandAndFPInputMods or
555 // printOperandAndIntInputMods will be called instead
556 if ((OpNo == 0 ||
557 (OpNo == 1 && (Desc.TSFlags & SIInstrFlags::DPP) && ModIdx != -1)) &&
558 (Desc.TSFlags & SIInstrFlags::VOPC) &&
559 (Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC) ||
560 Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC_LO)))
561 printDefaultVccOperand(true, STI, O);
562
563 printRegularOperand(MI, OpNo, STI, O);
564}
565
566// Print operands after vcc or modifier handling.
567void AMDGPUInstPrinter::printRegularOperand(const MCInst *MI, unsigned OpNo,
568 const MCSubtargetInfo &STI,
569 raw_ostream &O) {
570 const MCInstrDesc &Desc = MII.get(MI->getOpcode());
571
572 if (OpNo >= MI->getNumOperands()) {
573 O << "/*Missing OP" << OpNo << "*/";
574 return;
575 }
576
577 const MCOperand &Op = MI->getOperand(OpNo);
578 if (Op.isReg()) {
579 printRegOperand(Op.getReg(), O, MRI);
580
581 // Check if operand register class contains register used.
582 // Intention: print disassembler message when invalid code is decoded,
583 // for example sgpr register used in VReg or VISrc(VReg or imm) operand.
584 int RCID = Desc.operands()[OpNo].RegClass;
585 if (RCID != -1) {
586 const MCRegisterClass RC = MRI.getRegClass(RCID);
587 auto Reg = mc2PseudoReg(Op.getReg());
588 if (!RC.contains(Reg) && !isInlineValue(Reg)) {
589 O << "/*Invalid register, operand has \'" << MRI.getRegClassName(&RC)
590 << "\' register class*/";
591 }
592 }
593 } else if (Op.isImm()) {
594 const uint8_t OpTy = Desc.operands()[OpNo].OperandType;
595 switch (OpTy) {
608 printImmediate32(Op.getImm(), STI, O);
609 break;
612 printImmediate64(Op.getImm(), STI, O, false);
613 break;
617 printImmediate64(Op.getImm(), STI, O, true);
618 break;
622 printImmediateInt16(Op.getImm(), STI, O);
623 break;
628 printImmediate16(Op.getImm(), STI, O);
629 break;
632 if (!isUInt<16>(Op.getImm()) &&
633 STI.hasFeature(AMDGPU::FeatureVOP3Literal)) {
634 printImmediate32(Op.getImm(), STI, O);
635 break;
636 }
637
638 // Deal with 16-bit FP inline immediates not working.
639 if (OpTy == AMDGPU::OPERAND_REG_IMM_V2FP16) {
640 printImmediate16(static_cast<uint16_t>(Op.getImm()), STI, O);
641 break;
642 }
643 [[fallthrough]];
646 printImmediateInt16(static_cast<uint16_t>(Op.getImm()), STI, O);
647 break;
650 printImmediateV216(Op.getImm(), STI, O);
651 break;
654 O << formatDec(Op.getImm());
655 break;
657 // Disassembler does not fail when operand should not allow immediate
658 // operands but decodes them into 32bit immediate operand.
659 printImmediate32(Op.getImm(), STI, O);
660 O << "/*Invalid immediate*/";
661 break;
662 default:
663 // We hit this for the immediate instruction bits that don't yet have a
664 // custom printer.
665 llvm_unreachable("unexpected immediate operand type");
666 }
667 } else if (Op.isDFPImm()) {
668 double Value = bit_cast<double>(Op.getDFPImm());
669 // We special case 0.0 because otherwise it will be printed as an integer.
670 if (Value == 0.0)
671 O << "0.0";
672 else {
673 const MCInstrDesc &Desc = MII.get(MI->getOpcode());
674 int RCID = Desc.operands()[OpNo].RegClass;
675 unsigned RCBits = AMDGPU::getRegBitWidth(MRI.getRegClass(RCID));
676 if (RCBits == 32)
677 printImmediate32(llvm::bit_cast<uint32_t>((float)Value), STI, O);
678 else if (RCBits == 64)
679 printImmediate64(llvm::bit_cast<uint64_t>(Value), STI, O, true);
680 else
681 llvm_unreachable("Invalid register class size");
682 }
683 } else if (Op.isExpr()) {
684 const MCExpr *Exp = Op.getExpr();
685 Exp->print(O, &MAI);
686 } else {
687 O << "/*INV_OP*/";
688 }
689
690 // Print default vcc/vcc_lo operand of v_cndmask_b32_e32.
691 switch (MI->getOpcode()) {
692 default: break;
693
694 case AMDGPU::V_CNDMASK_B32_e32_gfx10:
695 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx10:
696 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx10:
697 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10:
698 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx10:
699 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx10:
700 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx10:
701 case AMDGPU::V_CNDMASK_B32_dpp8_gfx10:
702 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx10:
703 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx10:
704 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx10:
705 case AMDGPU::V_CNDMASK_B32_e32_gfx11:
706 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx11:
707 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx11:
708 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx11:
709 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx11:
710 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx11:
711 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx11:
712 case AMDGPU::V_CNDMASK_B32_dpp8_gfx11:
713 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx11:
714 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx11:
715 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx11:
716
717 case AMDGPU::V_CNDMASK_B32_e32_gfx6_gfx7:
718 case AMDGPU::V_CNDMASK_B32_e32_vi:
719 if ((int)OpNo == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
720 AMDGPU::OpName::src1))
721 printDefaultVccOperand(OpNo == 0, STI, O);
722 break;
723 }
724
725 if (Desc.TSFlags & SIInstrFlags::MTBUF) {
726 int SOffsetIdx =
727 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::soffset);
728 assert(SOffsetIdx != -1);
729 if ((int)OpNo == SOffsetIdx)
730 printSymbolicFormat(MI, STI, O);
731 }
732}
733
734void AMDGPUInstPrinter::printOperandAndFPInputMods(const MCInst *MI,
735 unsigned OpNo,
736 const MCSubtargetInfo &STI,
737 raw_ostream &O) {
738 const MCInstrDesc &Desc = MII.get(MI->getOpcode());
739 if (needsImpliedVcc(Desc, OpNo))
740 printDefaultVccOperand(true, STI, O);
741
742 unsigned InputModifiers = MI->getOperand(OpNo).getImm();
743
744 // Use 'neg(...)' instead of '-' to avoid ambiguity.
745 // This is important for integer literals because
746 // -1 is not the same value as neg(1).
747 bool NegMnemo = false;
748
749 if (InputModifiers & SISrcMods::NEG) {
750 if (OpNo + 1 < MI->getNumOperands() &&
751 (InputModifiers & SISrcMods::ABS) == 0) {
752 const MCOperand &Op = MI->getOperand(OpNo + 1);
753 NegMnemo = Op.isImm() || Op.isDFPImm();
754 }
755 if (NegMnemo) {
756 O << "neg(";
757 } else {
758 O << '-';
759 }
760 }
761
762 if (InputModifiers & SISrcMods::ABS)
763 O << '|';
764 printRegularOperand(MI, OpNo + 1, STI, O);
765 if (InputModifiers & SISrcMods::ABS)
766 O << '|';
767
768 if (NegMnemo) {
769 O << ')';
770 }
771
772 // Print default vcc/vcc_lo operand of VOP2b.
773 switch (MI->getOpcode()) {
774 default:
775 break;
776
777 case AMDGPU::V_CNDMASK_B32_sdwa_gfx10:
778 case AMDGPU::V_CNDMASK_B32_dpp_gfx10:
779 case AMDGPU::V_CNDMASK_B32_dpp_gfx11:
780 if ((int)OpNo + 1 ==
781 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::src1))
782 printDefaultVccOperand(OpNo == 0, STI, O);
783 break;
784 }
785}
786
787void AMDGPUInstPrinter::printOperandAndIntInputMods(const MCInst *MI,
788 unsigned OpNo,
789 const MCSubtargetInfo &STI,
790 raw_ostream &O) {
791 const MCInstrDesc &Desc = MII.get(MI->getOpcode());
792 if (needsImpliedVcc(Desc, OpNo))
793 printDefaultVccOperand(true, STI, O);
794
795 unsigned InputModifiers = MI->getOperand(OpNo).getImm();
796 if (InputModifiers & SISrcMods::SEXT)
797 O << "sext(";
798 printRegularOperand(MI, OpNo + 1, STI, O);
799 if (InputModifiers & SISrcMods::SEXT)
800 O << ')';
801
802 // Print default vcc/vcc_lo operand of VOP2b.
803 switch (MI->getOpcode()) {
804 default: break;
805
806 case AMDGPU::V_ADD_CO_CI_U32_sdwa_gfx10:
807 case AMDGPU::V_SUB_CO_CI_U32_sdwa_gfx10:
808 case AMDGPU::V_SUBREV_CO_CI_U32_sdwa_gfx10:
809 if ((int)OpNo + 1 == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
810 AMDGPU::OpName::src1))
811 printDefaultVccOperand(OpNo == 0, STI, O);
812 break;
813 }
814}
815
816void AMDGPUInstPrinter::printDPP8(const MCInst *MI, unsigned OpNo,
817 const MCSubtargetInfo &STI,
818 raw_ostream &O) {
819 if (!AMDGPU::isGFX10Plus(STI))
820 llvm_unreachable("dpp8 is not supported on ASICs earlier than GFX10");
821
822 unsigned Imm = MI->getOperand(OpNo).getImm();
823 O << "dpp8:[" << formatDec(Imm & 0x7);
824 for (size_t i = 1; i < 8; ++i) {
825 O << ',' << formatDec((Imm >> (3 * i)) & 0x7);
826 }
827 O << ']';
828}
829
830void AMDGPUInstPrinter::printDPPCtrl(const MCInst *MI, unsigned OpNo,
831 const MCSubtargetInfo &STI,
832 raw_ostream &O) {
833 using namespace AMDGPU::DPP;
834
835 unsigned Imm = MI->getOperand(OpNo).getImm();
836 const MCInstrDesc &Desc = MII.get(MI->getOpcode());
837
839 O << " /* DP ALU dpp only supports row_newbcast */";
840 return;
841 } else if (Imm <= DppCtrl::QUAD_PERM_LAST) {
842 O << "quad_perm:[";
843 O << formatDec(Imm & 0x3) << ',';
844 O << formatDec((Imm & 0xc) >> 2) << ',';
845 O << formatDec((Imm & 0x30) >> 4) << ',';
846 O << formatDec((Imm & 0xc0) >> 6) << ']';
847 } else if ((Imm >= DppCtrl::ROW_SHL_FIRST) &&
848 (Imm <= DppCtrl::ROW_SHL_LAST)) {
849 O << "row_shl:";
850 printU4ImmDecOperand(MI, OpNo, O);
851 } else if ((Imm >= DppCtrl::ROW_SHR_FIRST) &&
852 (Imm <= DppCtrl::ROW_SHR_LAST)) {
853 O << "row_shr:";
854 printU4ImmDecOperand(MI, OpNo, O);
855 } else if ((Imm >= DppCtrl::ROW_ROR_FIRST) &&
856 (Imm <= DppCtrl::ROW_ROR_LAST)) {
857 O << "row_ror:";
858 printU4ImmDecOperand(MI, OpNo, O);
859 } else if (Imm == DppCtrl::WAVE_SHL1) {
860 if (AMDGPU::isGFX10Plus(STI)) {
861 O << "/* wave_shl is not supported starting from GFX10 */";
862 return;
863 }
864 O << "wave_shl:1";
865 } else if (Imm == DppCtrl::WAVE_ROL1) {
866 if (AMDGPU::isGFX10Plus(STI)) {
867 O << "/* wave_rol is not supported starting from GFX10 */";
868 return;
869 }
870 O << "wave_rol:1";
871 } else if (Imm == DppCtrl::WAVE_SHR1) {
872 if (AMDGPU::isGFX10Plus(STI)) {
873 O << "/* wave_shr is not supported starting from GFX10 */";
874 return;
875 }
876 O << "wave_shr:1";
877 } else if (Imm == DppCtrl::WAVE_ROR1) {
878 if (AMDGPU::isGFX10Plus(STI)) {
879 O << "/* wave_ror is not supported starting from GFX10 */";
880 return;
881 }
882 O << "wave_ror:1";
883 } else if (Imm == DppCtrl::ROW_MIRROR) {
884 O << "row_mirror";
885 } else if (Imm == DppCtrl::ROW_HALF_MIRROR) {
886 O << "row_half_mirror";
887 } else if (Imm == DppCtrl::BCAST15) {
888 if (AMDGPU::isGFX10Plus(STI)) {
889 O << "/* row_bcast is not supported starting from GFX10 */";
890 return;
891 }
892 O << "row_bcast:15";
893 } else if (Imm == DppCtrl::BCAST31) {
894 if (AMDGPU::isGFX10Plus(STI)) {
895 O << "/* row_bcast is not supported starting from GFX10 */";
896 return;
897 }
898 O << "row_bcast:31";
899 } else if ((Imm >= DppCtrl::ROW_SHARE_FIRST) &&
900 (Imm <= DppCtrl::ROW_SHARE_LAST)) {
901 if (AMDGPU::isGFX90A(STI)) {
902 O << "row_newbcast:";
903 } else if (AMDGPU::isGFX10Plus(STI)) {
904 O << "row_share:";
905 } else {
906 O << " /* row_newbcast/row_share is not supported on ASICs earlier "
907 "than GFX90A/GFX10 */";
908 return;
909 }
910 printU4ImmDecOperand(MI, OpNo, O);
911 } else if ((Imm >= DppCtrl::ROW_XMASK_FIRST) &&
912 (Imm <= DppCtrl::ROW_XMASK_LAST)) {
913 if (!AMDGPU::isGFX10Plus(STI)) {
914 O << "/* row_xmask is not supported on ASICs earlier than GFX10 */";
915 return;
916 }
917 O << "row_xmask:";
918 printU4ImmDecOperand(MI, OpNo, O);
919 } else {
920 O << "/* Invalid dpp_ctrl value */";
921 }
922}
923
924void AMDGPUInstPrinter::printDppRowMask(const MCInst *MI, unsigned OpNo,
925 const MCSubtargetInfo &STI,
926 raw_ostream &O) {
927 O << " row_mask:";
928 printU4ImmOperand(MI, OpNo, STI, O);
929}
930
931void AMDGPUInstPrinter::printDppBankMask(const MCInst *MI, unsigned OpNo,
932 const MCSubtargetInfo &STI,
933 raw_ostream &O) {
934 O << " bank_mask:";
935 printU4ImmOperand(MI, OpNo, STI, O);
936}
937
938void AMDGPUInstPrinter::printDppBoundCtrl(const MCInst *MI, unsigned OpNo,
939 const MCSubtargetInfo &STI,
940 raw_ostream &O) {
941 unsigned Imm = MI->getOperand(OpNo).getImm();
942 if (Imm) {
943 O << " bound_ctrl:1";
944 }
945}
946
947void AMDGPUInstPrinter::printDppFI(const MCInst *MI, unsigned OpNo,
948 const MCSubtargetInfo &STI, raw_ostream &O) {
949 using namespace llvm::AMDGPU::DPP;
950 unsigned Imm = MI->getOperand(OpNo).getImm();
951 if (Imm == DPP_FI_1 || Imm == DPP8_FI_1) {
952 O << " fi:1";
953 }
954}
955
956void AMDGPUInstPrinter::printSDWASel(const MCInst *MI, unsigned OpNo,
957 raw_ostream &O) {
958 using namespace llvm::AMDGPU::SDWA;
959
960 unsigned Imm = MI->getOperand(OpNo).getImm();
961 switch (Imm) {
962 case SdwaSel::BYTE_0: O << "BYTE_0"; break;
963 case SdwaSel::BYTE_1: O << "BYTE_1"; break;
964 case SdwaSel::BYTE_2: O << "BYTE_2"; break;
965 case SdwaSel::BYTE_3: O << "BYTE_3"; break;
966 case SdwaSel::WORD_0: O << "WORD_0"; break;
967 case SdwaSel::WORD_1: O << "WORD_1"; break;
968 case SdwaSel::DWORD: O << "DWORD"; break;
969 default: llvm_unreachable("Invalid SDWA data select operand");
970 }
971}
972
973void AMDGPUInstPrinter::printSDWADstSel(const MCInst *MI, unsigned OpNo,
974 const MCSubtargetInfo &STI,
975 raw_ostream &O) {
976 O << "dst_sel:";
977 printSDWASel(MI, OpNo, O);
978}
979
980void AMDGPUInstPrinter::printSDWASrc0Sel(const MCInst *MI, unsigned OpNo,
981 const MCSubtargetInfo &STI,
982 raw_ostream &O) {
983 O << "src0_sel:";
984 printSDWASel(MI, OpNo, O);
985}
986
987void AMDGPUInstPrinter::printSDWASrc1Sel(const MCInst *MI, unsigned OpNo,
988 const MCSubtargetInfo &STI,
989 raw_ostream &O) {
990 O << "src1_sel:";
991 printSDWASel(MI, OpNo, O);
992}
993
994void AMDGPUInstPrinter::printSDWADstUnused(const MCInst *MI, unsigned OpNo,
995 const MCSubtargetInfo &STI,
996 raw_ostream &O) {
997 using namespace llvm::AMDGPU::SDWA;
998
999 O << "dst_unused:";
1000 unsigned Imm = MI->getOperand(OpNo).getImm();
1001 switch (Imm) {
1002 case DstUnused::UNUSED_PAD: O << "UNUSED_PAD"; break;
1003 case DstUnused::UNUSED_SEXT: O << "UNUSED_SEXT"; break;
1004 case DstUnused::UNUSED_PRESERVE: O << "UNUSED_PRESERVE"; break;
1005 default: llvm_unreachable("Invalid SDWA dest_unused operand");
1006 }
1007}
1008
1009void AMDGPUInstPrinter::printExpSrcN(const MCInst *MI, unsigned OpNo,
1010 const MCSubtargetInfo &STI, raw_ostream &O,
1011 unsigned N) {
1012 unsigned Opc = MI->getOpcode();
1013 int EnIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::en);
1014 unsigned En = MI->getOperand(EnIdx).getImm();
1015
1016 int ComprIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::compr);
1017
1018 // If compr is set, print as src0, src0, src1, src1
1019 if (MI->getOperand(ComprIdx).getImm())
1020 OpNo = OpNo - N + N / 2;
1021
1022 if (En & (1 << N))
1023 printRegOperand(MI->getOperand(OpNo).getReg(), O, MRI);
1024 else
1025 O << "off";
1026}
1027
1028void AMDGPUInstPrinter::printExpSrc0(const MCInst *MI, unsigned OpNo,
1029 const MCSubtargetInfo &STI,
1030 raw_ostream &O) {
1031 printExpSrcN(MI, OpNo, STI, O, 0);
1032}
1033
1034void AMDGPUInstPrinter::printExpSrc1(const MCInst *MI, unsigned OpNo,
1035 const MCSubtargetInfo &STI,
1036 raw_ostream &O) {
1037 printExpSrcN(MI, OpNo, STI, O, 1);
1038}
1039
1040void AMDGPUInstPrinter::printExpSrc2(const MCInst *MI, unsigned OpNo,
1041 const MCSubtargetInfo &STI,
1042 raw_ostream &O) {
1043 printExpSrcN(MI, OpNo, STI, O, 2);
1044}
1045
1046void AMDGPUInstPrinter::printExpSrc3(const MCInst *MI, unsigned OpNo,
1047 const MCSubtargetInfo &STI,
1048 raw_ostream &O) {
1049 printExpSrcN(MI, OpNo, STI, O, 3);
1050}
1051
1052void AMDGPUInstPrinter::printExpTgt(const MCInst *MI, unsigned OpNo,
1053 const MCSubtargetInfo &STI,
1054 raw_ostream &O) {
1055 using namespace llvm::AMDGPU::Exp;
1056
1057 // This is really a 6 bit field.
1058 unsigned Id = MI->getOperand(OpNo).getImm() & ((1 << 6) - 1);
1059
1060 int Index;
1061 StringRef TgtName;
1062 if (getTgtName(Id, TgtName, Index) && isSupportedTgtId(Id, STI)) {
1063 O << ' ' << TgtName;
1064 if (Index >= 0)
1065 O << Index;
1066 } else {
1067 O << " invalid_target_" << Id;
1068 }
1069}
1070
1071static bool allOpsDefaultValue(const int* Ops, int NumOps, int Mod,
1072 bool IsPacked, bool HasDstSel) {
1073 int DefaultValue = IsPacked && (Mod == SISrcMods::OP_SEL_1);
1074
1075 for (int I = 0; I < NumOps; ++I) {
1076 if (!!(Ops[I] & Mod) != DefaultValue)
1077 return false;
1078 }
1079
1080 if (HasDstSel && (Ops[0] & SISrcMods::DST_OP_SEL) != 0)
1081 return false;
1082
1083 return true;
1084}
1085
1086void AMDGPUInstPrinter::printPackedModifier(const MCInst *MI,
1088 unsigned Mod,
1089 raw_ostream &O) {
1090 unsigned Opc = MI->getOpcode();
1091 int NumOps = 0;
1092 int Ops[3];
1093
1094 for (int OpName : { AMDGPU::OpName::src0_modifiers,
1095 AMDGPU::OpName::src1_modifiers,
1096 AMDGPU::OpName::src2_modifiers }) {
1098 if (Idx == -1)
1099 break;
1100
1101 Ops[NumOps++] = MI->getOperand(Idx).getImm();
1102 }
1103
1104 const bool HasDstSel =
1105 NumOps > 0 &&
1107 MII.get(MI->getOpcode()).TSFlags & SIInstrFlags::VOP3_OPSEL;
1108
1109 const bool IsPacked =
1110 MII.get(MI->getOpcode()).TSFlags & SIInstrFlags::IsPacked;
1111
1112 if (allOpsDefaultValue(Ops, NumOps, Mod, IsPacked, HasDstSel))
1113 return;
1114
1115 O << Name;
1116 for (int I = 0; I < NumOps; ++I) {
1117 if (I != 0)
1118 O << ',';
1119
1120 O << !!(Ops[I] & Mod);
1121 }
1122
1123 if (HasDstSel) {
1124 O << ',' << !!(Ops[0] & SISrcMods::DST_OP_SEL);
1125 }
1126
1127 O << ']';
1128}
1129
1130void AMDGPUInstPrinter::printOpSel(const MCInst *MI, unsigned,
1131 const MCSubtargetInfo &STI,
1132 raw_ostream &O) {
1133 unsigned Opc = MI->getOpcode();
1134 if (isPermlane16(Opc)) {
1135 auto FIN = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
1136 auto BCN = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
1137 unsigned FI = !!(MI->getOperand(FIN).getImm() & SISrcMods::OP_SEL_0);
1138 unsigned BC = !!(MI->getOperand(BCN).getImm() & SISrcMods::OP_SEL_0);
1139 if (FI || BC)
1140 O << " op_sel:[" << FI << ',' << BC << ']';
1141 return;
1142 }
1143
1144 printPackedModifier(MI, " op_sel:[", SISrcMods::OP_SEL_0, O);
1145}
1146
1147void AMDGPUInstPrinter::printOpSelHi(const MCInst *MI, unsigned OpNo,
1148 const MCSubtargetInfo &STI,
1149 raw_ostream &O) {
1150 printPackedModifier(MI, " op_sel_hi:[", SISrcMods::OP_SEL_1, O);
1151}
1152
1153void AMDGPUInstPrinter::printNegLo(const MCInst *MI, unsigned OpNo,
1154 const MCSubtargetInfo &STI,
1155 raw_ostream &O) {
1156 printPackedModifier(MI, " neg_lo:[", SISrcMods::NEG, O);
1157}
1158
1159void AMDGPUInstPrinter::printNegHi(const MCInst *MI, unsigned OpNo,
1160 const MCSubtargetInfo &STI,
1161 raw_ostream &O) {
1162 printPackedModifier(MI, " neg_hi:[", SISrcMods::NEG_HI, O);
1163}
1164
1165void AMDGPUInstPrinter::printInterpSlot(const MCInst *MI, unsigned OpNum,
1166 const MCSubtargetInfo &STI,
1167 raw_ostream &O) {
1168 unsigned Imm = MI->getOperand(OpNum).getImm();
1169 switch (Imm) {
1170 case 0:
1171 O << "p10";
1172 break;
1173 case 1:
1174 O << "p20";
1175 break;
1176 case 2:
1177 O << "p0";
1178 break;
1179 default:
1180 O << "invalid_param_" << Imm;
1181 }
1182}
1183
1184void AMDGPUInstPrinter::printInterpAttr(const MCInst *MI, unsigned OpNum,
1185 const MCSubtargetInfo &STI,
1186 raw_ostream &O) {
1187 unsigned Attr = MI->getOperand(OpNum).getImm();
1188 O << "attr" << Attr;
1189}
1190
1191void AMDGPUInstPrinter::printInterpAttrChan(const MCInst *MI, unsigned OpNum,
1192 const MCSubtargetInfo &STI,
1193 raw_ostream &O) {
1194 unsigned Chan = MI->getOperand(OpNum).getImm();
1195 O << '.' << "xyzw"[Chan & 0x3];
1196}
1197
1198void AMDGPUInstPrinter::printGPRIdxMode(const MCInst *MI, unsigned OpNo,
1199 const MCSubtargetInfo &STI,
1200 raw_ostream &O) {
1201 using namespace llvm::AMDGPU::VGPRIndexMode;
1202 unsigned Val = MI->getOperand(OpNo).getImm();
1203
1204 if ((Val & ~ENABLE_MASK) != 0) {
1205 O << formatHex(static_cast<uint64_t>(Val));
1206 } else {
1207 O << "gpr_idx(";
1208 bool NeedComma = false;
1209 for (unsigned ModeId = ID_MIN; ModeId <= ID_MAX; ++ModeId) {
1210 if (Val & (1 << ModeId)) {
1211 if (NeedComma)
1212 O << ',';
1213 O << IdSymbolic[ModeId];
1214 NeedComma = true;
1215 }
1216 }
1217 O << ')';
1218 }
1219}
1220
1221void AMDGPUInstPrinter::printMemOperand(const MCInst *MI, unsigned OpNo,
1222 const MCSubtargetInfo &STI,
1223 raw_ostream &O) {
1224 printRegularOperand(MI, OpNo, STI, O);
1225 O << ", ";
1226 printRegularOperand(MI, OpNo + 1, STI, O);
1227}
1228
1229void AMDGPUInstPrinter::printIfSet(const MCInst *MI, unsigned OpNo,
1230 raw_ostream &O, StringRef Asm,
1232 const MCOperand &Op = MI->getOperand(OpNo);
1233 assert(Op.isImm());
1234 if (Op.getImm() == 1) {
1235 O << Asm;
1236 } else {
1237 O << Default;
1238 }
1239}
1240
1241void AMDGPUInstPrinter::printIfSet(const MCInst *MI, unsigned OpNo,
1242 raw_ostream &O, char Asm) {
1243 const MCOperand &Op = MI->getOperand(OpNo);
1244 assert(Op.isImm());
1245 if (Op.getImm() == 1)
1246 O << Asm;
1247}
1248
1249void AMDGPUInstPrinter::printOModSI(const MCInst *MI, unsigned OpNo,
1250 const MCSubtargetInfo &STI,
1251 raw_ostream &O) {
1252 int Imm = MI->getOperand(OpNo).getImm();
1253 if (Imm == SIOutMods::MUL2)
1254 O << " mul:2";
1255 else if (Imm == SIOutMods::MUL4)
1256 O << " mul:4";
1257 else if (Imm == SIOutMods::DIV2)
1258 O << " div:2";
1259}
1260
1261void AMDGPUInstPrinter::printSendMsg(const MCInst *MI, unsigned OpNo,
1262 const MCSubtargetInfo &STI,
1263 raw_ostream &O) {
1264 using namespace llvm::AMDGPU::SendMsg;
1265
1266 const unsigned Imm16 = MI->getOperand(OpNo).getImm();
1267
1268 uint16_t MsgId;
1269 uint16_t OpId;
1271 decodeMsg(Imm16, MsgId, OpId, StreamId, STI);
1272
1273 StringRef MsgName = getMsgName(MsgId, STI);
1274
1275 if (!MsgName.empty() && isValidMsgOp(MsgId, OpId, STI) &&
1276 isValidMsgStream(MsgId, OpId, StreamId, STI)) {
1277 O << "sendmsg(" << MsgName;
1278 if (msgRequiresOp(MsgId, STI)) {
1279 O << ", " << getMsgOpName(MsgId, OpId, STI);
1280 if (msgSupportsStream(MsgId, OpId, STI)) {
1281 O << ", " << StreamId;
1282 }
1283 }
1284 O << ')';
1285 } else if (encodeMsg(MsgId, OpId, StreamId) == Imm16) {
1286 O << "sendmsg(" << MsgId << ", " << OpId << ", " << StreamId << ')';
1287 } else {
1288 O << Imm16; // Unknown imm16 code.
1289 }
1290}
1291
1292static void printSwizzleBitmask(const uint16_t AndMask,
1293 const uint16_t OrMask,
1294 const uint16_t XorMask,
1295 raw_ostream &O) {
1296 using namespace llvm::AMDGPU::Swizzle;
1297
1298 uint16_t Probe0 = ((0 & AndMask) | OrMask) ^ XorMask;
1299 uint16_t Probe1 = ((BITMASK_MASK & AndMask) | OrMask) ^ XorMask;
1300
1301 O << "\"";
1302
1303 for (unsigned Mask = 1 << (BITMASK_WIDTH - 1); Mask > 0; Mask >>= 1) {
1304 uint16_t p0 = Probe0 & Mask;
1305 uint16_t p1 = Probe1 & Mask;
1306
1307 if (p0 == p1) {
1308 if (p0 == 0) {
1309 O << "0";
1310 } else {
1311 O << "1";
1312 }
1313 } else {
1314 if (p0 == 0) {
1315 O << "p";
1316 } else {
1317 O << "i";
1318 }
1319 }
1320 }
1321
1322 O << "\"";
1323}
1324
1325void AMDGPUInstPrinter::printSwizzle(const MCInst *MI, unsigned OpNo,
1326 const MCSubtargetInfo &STI,
1327 raw_ostream &O) {
1328 using namespace llvm::AMDGPU::Swizzle;
1329
1330 uint16_t Imm = MI->getOperand(OpNo).getImm();
1331 if (Imm == 0) {
1332 return;
1333 }
1334
1335 O << " offset:";
1336
1337 if ((Imm & QUAD_PERM_ENC_MASK) == QUAD_PERM_ENC) {
1338
1339 O << "swizzle(" << IdSymbolic[ID_QUAD_PERM];
1340 for (unsigned I = 0; I < LANE_NUM; ++I) {
1341 O << ",";
1342 O << formatDec(Imm & LANE_MASK);
1343 Imm >>= LANE_SHIFT;
1344 }
1345 O << ")";
1346
1347 } else if ((Imm & BITMASK_PERM_ENC_MASK) == BITMASK_PERM_ENC) {
1348
1349 uint16_t AndMask = (Imm >> BITMASK_AND_SHIFT) & BITMASK_MASK;
1350 uint16_t OrMask = (Imm >> BITMASK_OR_SHIFT) & BITMASK_MASK;
1351 uint16_t XorMask = (Imm >> BITMASK_XOR_SHIFT) & BITMASK_MASK;
1352
1353 if (AndMask == BITMASK_MAX && OrMask == 0 && llvm::popcount(XorMask) == 1) {
1354
1355 O << "swizzle(" << IdSymbolic[ID_SWAP];
1356 O << ",";
1357 O << formatDec(XorMask);
1358 O << ")";
1359
1360 } else if (AndMask == BITMASK_MAX && OrMask == 0 && XorMask > 0 &&
1361 isPowerOf2_64(XorMask + 1)) {
1362
1363 O << "swizzle(" << IdSymbolic[ID_REVERSE];
1364 O << ",";
1365 O << formatDec(XorMask + 1);
1366 O << ")";
1367
1368 } else {
1369
1370 uint16_t GroupSize = BITMASK_MAX - AndMask + 1;
1371 if (GroupSize > 1 &&
1372 isPowerOf2_64(GroupSize) &&
1373 OrMask < GroupSize &&
1374 XorMask == 0) {
1375
1376 O << "swizzle(" << IdSymbolic[ID_BROADCAST];
1377 O << ",";
1378 O << formatDec(GroupSize);
1379 O << ",";
1380 O << formatDec(OrMask);
1381 O << ")";
1382
1383 } else {
1384 O << "swizzle(" << IdSymbolic[ID_BITMASK_PERM];
1385 O << ",";
1386 printSwizzleBitmask(AndMask, OrMask, XorMask, O);
1387 O << ")";
1388 }
1389 }
1390 } else {
1391 printU16ImmDecOperand(MI, OpNo, O);
1392 }
1393}
1394
1395void AMDGPUInstPrinter::printSWaitCnt(const MCInst *MI, unsigned OpNo,
1396 const MCSubtargetInfo &STI,
1397 raw_ostream &O) {
1399
1400 unsigned SImm16 = MI->getOperand(OpNo).getImm();
1401 unsigned Vmcnt, Expcnt, Lgkmcnt;
1402 decodeWaitcnt(ISA, SImm16, Vmcnt, Expcnt, Lgkmcnt);
1403
1404 bool IsDefaultVmcnt = Vmcnt == getVmcntBitMask(ISA);
1405 bool IsDefaultExpcnt = Expcnt == getExpcntBitMask(ISA);
1406 bool IsDefaultLgkmcnt = Lgkmcnt == getLgkmcntBitMask(ISA);
1407 bool PrintAll = IsDefaultVmcnt && IsDefaultExpcnt && IsDefaultLgkmcnt;
1408
1409 bool NeedSpace = false;
1410
1411 if (!IsDefaultVmcnt || PrintAll) {
1412 O << "vmcnt(" << Vmcnt << ')';
1413 NeedSpace = true;
1414 }
1415
1416 if (!IsDefaultExpcnt || PrintAll) {
1417 if (NeedSpace)
1418 O << ' ';
1419 O << "expcnt(" << Expcnt << ')';
1420 NeedSpace = true;
1421 }
1422
1423 if (!IsDefaultLgkmcnt || PrintAll) {
1424 if (NeedSpace)
1425 O << ' ';
1426 O << "lgkmcnt(" << Lgkmcnt << ')';
1427 }
1428}
1429
1430void AMDGPUInstPrinter::printDepCtr(const MCInst *MI, unsigned OpNo,
1431 const MCSubtargetInfo &STI,
1432 raw_ostream &O) {
1433 using namespace llvm::AMDGPU::DepCtr;
1434
1435 uint64_t Imm16 = MI->getOperand(OpNo).getImm() & 0xffff;
1436
1437 bool HasNonDefaultVal = false;
1438 if (isSymbolicDepCtrEncoding(Imm16, HasNonDefaultVal, STI)) {
1439 int Id = 0;
1441 unsigned Val;
1442 bool IsDefault;
1443 bool NeedSpace = false;
1444 while (decodeDepCtr(Imm16, Id, Name, Val, IsDefault, STI)) {
1445 if (!IsDefault || !HasNonDefaultVal) {
1446 if (NeedSpace)
1447 O << ' ';
1448 O << Name << '(' << Val << ')';
1449 NeedSpace = true;
1450 }
1451 }
1452 } else {
1453 O << formatHex(Imm16);
1454 }
1455}
1456
1458 const MCSubtargetInfo &STI,
1459 raw_ostream &O) {
1460 const char *BadInstId = "/* invalid instid value */";
1461 static const std::array<const char *, 12> InstIds = {
1462 "NO_DEP", "VALU_DEP_1", "VALU_DEP_2",
1463 "VALU_DEP_3", "VALU_DEP_4", "TRANS32_DEP_1",
1464 "TRANS32_DEP_2", "TRANS32_DEP_3", "FMA_ACCUM_CYCLE_1",
1465 "SALU_CYCLE_1", "SALU_CYCLE_2", "SALU_CYCLE_3"};
1466
1467 const char *BadInstSkip = "/* invalid instskip value */";
1468 static const std::array<const char *, 6> InstSkips = {
1469 "SAME", "NEXT", "SKIP_1", "SKIP_2", "SKIP_3", "SKIP_4"};
1470
1471 unsigned SImm16 = MI->getOperand(OpNo).getImm();
1472 const char *Prefix = "";
1473
1474 unsigned Value = SImm16 & 0xF;
1475 if (Value) {
1476 const char *Name = Value < InstIds.size() ? InstIds[Value] : BadInstId;
1477 O << Prefix << "instid0(" << Name << ')';
1478 Prefix = " | ";
1479 }
1480
1481 Value = (SImm16 >> 4) & 7;
1482 if (Value) {
1483 const char *Name =
1484 Value < InstSkips.size() ? InstSkips[Value] : BadInstSkip;
1485 O << Prefix << "instskip(" << Name << ')';
1486 Prefix = " | ";
1487 }
1488
1489 Value = (SImm16 >> 7) & 0xF;
1490 if (Value) {
1491 const char *Name = Value < InstIds.size() ? InstIds[Value] : BadInstId;
1492 O << Prefix << "instid1(" << Name << ')';
1493 Prefix = " | ";
1494 }
1495
1496 if (!*Prefix)
1497 O << "0";
1498}
1499
1500void AMDGPUInstPrinter::printHwreg(const MCInst *MI, unsigned OpNo,
1501 const MCSubtargetInfo &STI, raw_ostream &O) {
1502 unsigned Id;
1503 unsigned Offset;
1504 unsigned Width;
1505
1506 using namespace llvm::AMDGPU::Hwreg;
1507 unsigned Val = MI->getOperand(OpNo).getImm();
1508 decodeHwreg(Val, Id, Offset, Width);
1509 StringRef HwRegName = getHwreg(Id, STI);
1510
1511 O << "hwreg(";
1512 if (!HwRegName.empty()) {
1513 O << HwRegName;
1514 } else {
1515 O << Id;
1516 }
1517 if (Width != WIDTH_DEFAULT_ || Offset != OFFSET_DEFAULT_) {
1518 O << ", " << Offset << ", " << Width;
1519 }
1520 O << ')';
1521}
1522
1523void AMDGPUInstPrinter::printEndpgm(const MCInst *MI, unsigned OpNo,
1524 const MCSubtargetInfo &STI,
1525 raw_ostream &O) {
1526 uint16_t Imm = MI->getOperand(OpNo).getImm();
1527 if (Imm == 0) {
1528 return;
1529 }
1530
1531 O << ' ' << formatDec(Imm);
1532}
1533
1534#include "AMDGPUGenAsmWriter.inc"
unsigned const MachineRegisterInfo * MRI
static void printSwizzleBitmask(const uint16_t AndMask, const uint16_t OrMask, const uint16_t XorMask, raw_ostream &O)
static bool allOpsDefaultValue(const int *Ops, int NumOps, int Mod, bool IsPacked, bool HasDstSel)
Provides AMDGPU specific target descriptions.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
std::string Name
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
static constexpr uint32_t Opcode
Definition: aarch32.h:200
void printSwizzle(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printEndpgm(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
static const char * getRegisterName(MCRegister Reg)
static void printIfSet(const MCInst *MI, unsigned OpNo, raw_ostream &O, StringRef Asm, StringRef Default="")
void printDepCtr(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printHwreg(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printSendMsg(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printRegName(raw_ostream &OS, MCRegister Reg) const override
Print the assembler register name.
static void printRegOperand(unsigned RegNo, raw_ostream &O, const MCRegisterInfo &MRI)
void printInst(const MCInst *MI, uint64_t Address, StringRef Annot, const MCSubtargetInfo &STI, raw_ostream &O) override
Print the specified MCInst to the specified raw_ostream.
void printInstruction(const MCInst *MI, uint64_t Address, const MCSubtargetInfo &STI, raw_ostream &O)
void printSWaitCnt(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printOModSI(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printSDelayALU(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
This class represents an Operation in the Expression.
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
format_object< int64_t > formatHex(int64_t Value) const
const MCInstrInfo & MII
Definition: MCInstPrinter.h:52
format_object< int64_t > formatDec(int64_t Value) const
Utility functions to print decimal/hexadecimal values.
const MCRegisterInfo & MRI
Definition: MCInstPrinter.h:53
void printAnnotation(raw_ostream &OS, StringRef Annot)
Utility function for printing annotations.
const MCAsmInfo & MAI
Definition: MCInstPrinter.h:51
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:36
MCRegisterClass - Base class of TargetRegisterClass.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
const char * getRegClassName(const MCRegisterClass *Class) const
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
StringRef getCPU() const
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
LLVM Value Representation.
Definition: Value.h:74
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI)
bool getTgtName(unsigned Id, StringRef &Name, int &Index)
bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI)
StringRef getUnifiedFormatName(unsigned Id, const MCSubtargetInfo &STI)
bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI)
StringRef getDfmtName(unsigned Id)
StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI)
void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt)
const char *const IdSymbolic[]
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
LLVM_READNONE bool isLegalDPALU_DPPControl(unsigned DC)
LLVM_READONLY int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For FLAT segment the offset must be positive; MSB is ignored and forced to zero.
unsigned mc2PseudoReg(unsigned Reg)
Convert hardware register Reg to a pseudo register.
bool isGFX940(const MCSubtargetInfo &STI)
IsaVersion getIsaVersion(StringRef GPU)
bool isValid32BitLiteral(uint64_t Val, bool IsFP64)
bool isDPALU_DPP(const MCInstrDesc &OpDesc)
bool isSI(const MCSubtargetInfo &STI)
bool getVOP3IsSingle(unsigned Opc)
bool getVOP1IsSingle(unsigned Opc)
bool isGFX90A(const MCSubtargetInfo &STI)
LLVM_READONLY const MIMGDimInfo * getMIMGDimInfoByEncoding(uint8_t DimEnc)
unsigned getVmcntBitMask(const IsaVersion &Version)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
unsigned getLgkmcntBitMask(const IsaVersion &Version)
unsigned getExpcntBitMask(const IsaVersion &Version)
bool isInlineValue(unsigned Reg)
bool isGFX10Plus(const MCSubtargetInfo &STI)
@ OPERAND_REG_IMM_INT64
Definition: SIDefines.h:190
@ OPERAND_REG_IMM_V2FP16
Definition: SIDefines.h:197
@ OPERAND_REG_INLINE_C_V2INT32
Definition: SIDefines.h:211
@ OPERAND_REG_INLINE_C_FP64
Definition: SIDefines.h:208
@ OPERAND_REG_IMM_V2INT16
Definition: SIDefines.h:198
@ OPERAND_REG_INLINE_AC_V2FP16
Definition: SIDefines.h:225
@ OPERAND_REG_IMM_INT32
Operands with register or 32-bit immediate.
Definition: SIDefines.h:189
@ OPERAND_REG_IMM_FP16
Definition: SIDefines.h:194
@ OPERAND_REG_INLINE_C_INT64
Definition: SIDefines.h:205
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
Definition: SIDefines.h:203
@ OPERAND_REG_INLINE_AC_INT16
Operands with an AccVGPR register or inline constant.
Definition: SIDefines.h:219
@ OPERAND_REG_IMM_FP64
Definition: SIDefines.h:193
@ OPERAND_REG_INLINE_C_V2FP16
Definition: SIDefines.h:210
@ OPERAND_REG_INLINE_AC_V2INT16
Definition: SIDefines.h:224
@ OPERAND_REG_INLINE_AC_FP16
Definition: SIDefines.h:221
@ OPERAND_REG_INLINE_AC_INT32
Definition: SIDefines.h:220
@ OPERAND_REG_INLINE_AC_FP32
Definition: SIDefines.h:222
@ OPERAND_REG_IMM_V2INT32
Definition: SIDefines.h:199
@ OPERAND_REG_IMM_FP32
Definition: SIDefines.h:192
@ OPERAND_REG_INLINE_C_FP32
Definition: SIDefines.h:207
@ OPERAND_REG_INLINE_C_INT32
Definition: SIDefines.h:204
@ OPERAND_REG_INLINE_C_V2INT16
Definition: SIDefines.h:209
@ OPERAND_REG_IMM_V2FP32
Definition: SIDefines.h:200
@ OPERAND_REG_INLINE_AC_FP64
Definition: SIDefines.h:223
@ OPERAND_REG_INLINE_C_FP16
Definition: SIDefines.h:206
@ OPERAND_REG_IMM_INT16
Definition: SIDefines.h:191
@ OPERAND_REG_INLINE_C_V2FP32
Definition: SIDefines.h:212
@ OPERAND_REG_IMM_FP32_DEFERRED
Definition: SIDefines.h:196
@ OPERAND_REG_IMM_FP16_DEFERRED
Definition: SIDefines.h:195
unsigned getRegBitWidth(const TargetRegisterClass &RC)
Get the size in bits of a register from the register class RC.
bool isCI(const MCSubtargetInfo &STI)
bool getVOP2IsSingle(unsigned Opc)
bool isPermlane16(unsigned Opc)
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:61
@ OPERAND_IMMEDIATE
Definition: MCInstrDesc.h:60
@ OPERAND_UNKNOWN
Definition: MCInstrDesc.h:59
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:440
int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition: bit.h:384
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:269
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
Definition: MathExtras.h:136
@ Mod
The access may modify the value stored in memory.
constexpr int32_t SignExtend32(uint32_t X)
Sign-extend the number in the bottom B bits of X to a 32-bit integer.
Definition: MathExtras.h:436
@ Default
The result values are uniform if and only if all operands are uniform.
#define N
Instruction set architecture version.
Definition: TargetParser.h:117
Description of the encoding of one expression Op.