LLVM 22.0.0git
SPIRVPreLegalizer.cpp
Go to the documentation of this file.
1//===-- SPIRVPreLegalizer.cpp - prepare IR for legalization -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The pass prepares IR for legalization: it assigns SPIR-V types to registers
10// and removes intrinsics which holded these types during IR translation.
11// Also it processes constants and registers them in GR to avoid duplication.
12//
13//===----------------------------------------------------------------------===//
14
15#include "SPIRV.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
21#include "llvm/IR/Attributes.h"
22#include "llvm/IR/Constants.h"
23#include "llvm/IR/IntrinsicsSPIRV.h"
24
25#define DEBUG_TYPE "spirv-prelegalizer"
26
27using namespace llvm;
28
29namespace {
30class SPIRVPreLegalizer : public MachineFunctionPass {
31public:
32 static char ID;
33 SPIRVPreLegalizer() : MachineFunctionPass(ID) {}
34 bool runOnMachineFunction(MachineFunction &MF) override;
35 void getAnalysisUsage(AnalysisUsage &AU) const override;
36};
37} // namespace
38
39void SPIRVPreLegalizer::getAnalysisUsage(AnalysisUsage &AU) const {
40 AU.addPreserved<GISelValueTrackingAnalysisLegacy>();
42}
43
44static void
46 const SPIRVSubtarget &STI,
47 DenseMap<MachineInstr *, Type *> &TargetExtConstTypes) {
49 DenseMap<MachineInstr *, Register> RegsAlreadyAddedToDT;
50 SmallVector<MachineInstr *, 10> ToErase, ToEraseComposites;
51 for (MachineBasicBlock &MBB : MF) {
52 for (MachineInstr &MI : MBB) {
53 if (!isSpvIntrinsic(MI, Intrinsic::spv_track_constant))
54 continue;
55 ToErase.push_back(&MI);
56 Register SrcReg = MI.getOperand(2).getReg();
57 auto *Const =
59 MI.getOperand(3).getMetadata()->getOperand(0))
60 ->getValue());
61 if (auto *GV = dyn_cast<GlobalValue>(Const)) {
62 Register Reg = GR->find(GV, &MF);
63 if (!Reg.isValid()) {
64 GR->add(GV, MRI.getVRegDef(SrcReg));
65 GR->addGlobalObject(GV, &MF, SrcReg);
66 } else
67 RegsAlreadyAddedToDT[&MI] = Reg;
68 } else {
69 Register Reg = GR->find(Const, &MF);
70 if (!Reg.isValid()) {
71 if (auto *ConstVec = dyn_cast<ConstantDataVector>(Const)) {
72 auto *BuildVec = MRI.getVRegDef(SrcReg);
73 assert(BuildVec &&
74 BuildVec->getOpcode() == TargetOpcode::G_BUILD_VECTOR);
75 GR->add(Const, BuildVec);
76 for (unsigned i = 0; i < ConstVec->getNumElements(); ++i) {
77 // Ensure that OpConstantComposite reuses a constant when it's
78 // already created and available in the same machine function.
79 Constant *ElemConst = ConstVec->getElementAsConstant(i);
80 Register ElemReg = GR->find(ElemConst, &MF);
81 if (!ElemReg.isValid())
82 GR->add(ElemConst,
83 MRI.getVRegDef(BuildVec->getOperand(1 + i).getReg()));
84 else
85 BuildVec->getOperand(1 + i).setReg(ElemReg);
86 }
87 }
88 if (Const->getType()->isTargetExtTy()) {
89 // remember association so that we can restore it when assign types
90 MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
91 if (SrcMI)
92 GR->add(Const, SrcMI);
93 if (SrcMI && (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT ||
94 SrcMI->getOpcode() == TargetOpcode::G_IMPLICIT_DEF))
95 TargetExtConstTypes[SrcMI] = Const->getType();
96 if (Const->isNullValue()) {
97 MachineBasicBlock &DepMBB = MF.front();
98 MachineIRBuilder MIB(DepMBB, DepMBB.getFirstNonPHI());
99 SPIRVType *ExtType = GR->getOrCreateSPIRVType(
100 Const->getType(), MIB, SPIRV::AccessQualifier::ReadWrite,
101 true);
102 assert(SrcMI && "Expected source instruction to be valid");
103 SrcMI->setDesc(STI.getInstrInfo()->get(SPIRV::OpConstantNull));
105 GR->getSPIRVTypeID(ExtType), false));
106 }
107 }
108 } else {
109 RegsAlreadyAddedToDT[&MI] = Reg;
110 // This MI is unused and will be removed. If the MI uses
111 // const_composite, it will be unused and should be removed too.
112 assert(MI.getOperand(2).isReg() && "Reg operand is expected");
113 MachineInstr *SrcMI = MRI.getVRegDef(MI.getOperand(2).getReg());
114 if (SrcMI && isSpvIntrinsic(*SrcMI, Intrinsic::spv_const_composite))
115 ToEraseComposites.push_back(SrcMI);
116 }
117 }
118 }
119 }
120 for (MachineInstr *MI : ToErase) {
121 Register Reg = MI->getOperand(2).getReg();
122 auto It = RegsAlreadyAddedToDT.find(MI);
123 if (It != RegsAlreadyAddedToDT.end())
124 Reg = It->second;
125 auto *RC = MRI.getRegClassOrNull(MI->getOperand(0).getReg());
126 if (!MRI.getRegClassOrNull(Reg) && RC)
127 MRI.setRegClass(Reg, RC);
128 MRI.replaceRegWith(MI->getOperand(0).getReg(), Reg);
130 MI->eraseFromParent();
131 }
132 for (MachineInstr *MI : ToEraseComposites) {
134 MI->eraseFromParent();
135 }
136}
137
140 MachineIRBuilder MIB) {
142 for (MachineBasicBlock &MBB : MF) {
143 for (MachineInstr &MI : MBB) {
144 if (!isSpvIntrinsic(MI, Intrinsic::spv_assign_name))
145 continue;
146 const MDNode *MD = MI.getOperand(2).getMetadata();
147 StringRef ValueName = cast<MDString>(MD->getOperand(0))->getString();
148 if (ValueName.size() > 0) {
149 MIB.setInsertPt(*MI.getParent(), MI);
150 buildOpName(MI.getOperand(1).getReg(), ValueName, MIB);
151 }
152 ToErase.push_back(&MI);
153 }
154 for (MachineInstr *MI : ToErase) {
156 MI->eraseFromParent();
157 }
158 ToErase.clear();
159 }
160}
161
164 for (MachineRegisterInfo::use_instr_iterator I = MRI->use_instr_begin(Reg),
165 IE = MRI->use_instr_end();
166 I != IE; ++I) {
167 MachineInstr *UseMI = &*I;
168 if ((isSpvIntrinsic(*UseMI, Intrinsic::spv_assign_ptr_type) ||
169 isSpvIntrinsic(*UseMI, Intrinsic::spv_assign_type)) &&
170 UseMI->getOperand(1).getReg() == Reg)
171 return UseMI;
172 }
173 return nullptr;
174}
175
177 Register ResVReg, Register OpReg) {
178 SPIRVType *ResType = GR->getSPIRVTypeForVReg(ResVReg);
179 SPIRVType *OpType = GR->getSPIRVTypeForVReg(OpReg);
180 assert(ResType && OpType && "Operand types are expected");
181 if (!GR->isBitcastCompatible(ResType, OpType))
182 report_fatal_error("incompatible result and operand types in a bitcast");
184 if (!MRI->getRegClassOrNull(ResVReg))
185 MRI->setRegClass(ResVReg, GR->getRegClass(ResType));
186 if (ResType == OpType)
187 MIB.buildInstr(TargetOpcode::COPY).addDef(ResVReg).addUse(OpReg);
188 else
189 MIB.buildInstr(SPIRV::OpBitcast)
190 .addDef(ResVReg)
191 .addUse(GR->getSPIRVTypeID(ResType))
192 .addUse(OpReg);
193}
194
195// We lower G_BITCAST to OpBitcast here to avoid a MachineVerifier error.
196// The verifier checks if the source and destination LLTs of a G_BITCAST are
197// different, but this check is too strict for SPIR-V's typed pointers, which
198// may have the same LLT but different SPIRVType (e.g. pointers to different
199// pointee types). By lowering to OpBitcast here, we bypass the verifier's
200// check. See discussion in https://github.com/llvm/llvm-project/pull/110270
201// for more context.
202//
203// We also handle the llvm.spv.bitcast intrinsic here. If the source and
204// destination SPIR-V types are the same, we lower it to a COPY to enable
205// further optimizations like copy propagation.
207 MachineIRBuilder MIB) {
209 for (MachineBasicBlock &MBB : MF) {
210 for (MachineInstr &MI : MBB) {
211 if (isSpvIntrinsic(MI, Intrinsic::spv_bitcast)) {
212 Register DstReg = MI.getOperand(0).getReg();
213 Register SrcReg = MI.getOperand(2).getReg();
214 SPIRVType *DstType = GR->getSPIRVTypeForVReg(DstReg);
215 assert(
216 DstType &&
217 "Expected destination SPIR-V type to have been assigned already.");
218 SPIRVType *SrcType = GR->getSPIRVTypeForVReg(SrcReg);
219 assert(SrcType &&
220 "Expected source SPIR-V type to have been assigned already.");
221 if (DstType == SrcType) {
222 MIB.setInsertPt(*MI.getParent(), MI);
223 MIB.buildCopy(DstReg, SrcReg);
224 ToErase.push_back(&MI);
225 continue;
226 }
227 }
228
229 if (MI.getOpcode() != TargetOpcode::G_BITCAST)
230 continue;
231
232 MIB.setInsertPt(*MI.getParent(), MI);
233 buildOpBitcast(GR, MIB, MI.getOperand(0).getReg(),
234 MI.getOperand(1).getReg());
235 ToErase.push_back(&MI);
236 }
237 }
238 for (MachineInstr *MI : ToErase) {
240 MI->eraseFromParent();
241 }
242}
243
245 MachineIRBuilder MIB) {
246 // Get access to information about available extensions
247 const SPIRVSubtarget *ST =
248 static_cast<const SPIRVSubtarget *>(&MIB.getMF().getSubtarget());
250 for (MachineBasicBlock &MBB : MF) {
251 for (MachineInstr &MI : MBB) {
252 if (!isSpvIntrinsic(MI, Intrinsic::spv_ptrcast))
253 continue;
254 assert(MI.getOperand(2).isReg());
255 MIB.setInsertPt(*MI.getParent(), MI);
256 ToErase.push_back(&MI);
257 Register Def = MI.getOperand(0).getReg();
258 Register Source = MI.getOperand(2).getReg();
259 Type *ElemTy = getMDOperandAsType(MI.getOperand(3).getMetadata(), 0);
260 SPIRVType *AssignedPtrType = GR->getOrCreateSPIRVPointerType(
261 ElemTy, MI,
262 addressSpaceToStorageClass(MI.getOperand(4).getImm(), *ST));
263
264 // If the ptrcast would be redundant, replace all uses with the source
265 // register.
267 if (GR->getSPIRVTypeForVReg(Source) == AssignedPtrType) {
268 // Erase Def's assign type instruction if we are going to replace Def.
269 if (MachineInstr *AssignMI = findAssignTypeInstr(Def, MRI))
270 ToErase.push_back(AssignMI);
271 MRI->replaceRegWith(Def, Source);
272 } else {
273 if (!GR->getSPIRVTypeForVReg(Def, &MF))
274 GR->assignSPIRVTypeToVReg(AssignedPtrType, Def, MF);
275 MIB.buildBitcast(Def, Source);
276 }
277 }
278 }
279 for (MachineInstr *MI : ToErase) {
281 MI->eraseFromParent();
282 }
283}
284
285// Translating GV, IRTranslator sometimes generates following IR:
286// %1 = G_GLOBAL_VALUE
287// %2 = COPY %1
288// %3 = G_ADDRSPACE_CAST %2
289//
290// or
291//
292// %1 = G_ZEXT %2
293// G_MEMCPY ... %2 ...
294//
295// New registers have no SPIRVType and no register class info.
296//
297// Set SPIRVType for GV, propagate it from GV to other instructions,
298// also set register classes.
301 MachineIRBuilder &MIB) {
302 SPIRVType *SpvType = nullptr;
303 assert(MI && "Machine instr is expected");
304 if (MI->getOperand(0).isReg()) {
305 Register Reg = MI->getOperand(0).getReg();
306 SpvType = GR->getSPIRVTypeForVReg(Reg);
307 if (!SpvType) {
308 switch (MI->getOpcode()) {
309 case TargetOpcode::G_FCONSTANT:
310 case TargetOpcode::G_CONSTANT: {
311 MIB.setInsertPt(*MI->getParent(), MI);
312 Type *Ty = MI->getOperand(1).getCImm()->getType();
313 SpvType = GR->getOrCreateSPIRVType(
314 Ty, MIB, SPIRV::AccessQualifier::ReadWrite, true);
315 break;
316 }
317 case TargetOpcode::G_GLOBAL_VALUE: {
318 MIB.setInsertPt(*MI->getParent(), MI);
319 const GlobalValue *Global = MI->getOperand(1).getGlobal();
321 auto *Ty = TypedPointerType::get(ElementTy,
322 Global->getType()->getAddressSpace());
323 SpvType = GR->getOrCreateSPIRVType(
324 Ty, MIB, SPIRV::AccessQualifier::ReadWrite, true);
325 break;
326 }
327 case TargetOpcode::G_ANYEXT:
328 case TargetOpcode::G_SEXT:
329 case TargetOpcode::G_ZEXT: {
330 if (MI->getOperand(1).isReg()) {
331 if (MachineInstr *DefInstr =
332 MRI.getVRegDef(MI->getOperand(1).getReg())) {
333 if (SPIRVType *Def = propagateSPIRVType(DefInstr, GR, MRI, MIB)) {
334 unsigned CurrentBW = GR->getScalarOrVectorBitWidth(Def);
335 unsigned ExpectedBW =
336 std::max(MRI.getType(Reg).getScalarSizeInBits(), CurrentBW);
337 unsigned NumElements = GR->getScalarOrVectorComponentCount(Def);
338 SpvType = GR->getOrCreateSPIRVIntegerType(ExpectedBW, MIB);
339 if (NumElements > 1)
340 SpvType = GR->getOrCreateSPIRVVectorType(SpvType, NumElements,
341 MIB, true);
342 }
343 }
344 }
345 break;
346 }
347 case TargetOpcode::G_PTRTOINT:
348 SpvType = GR->getOrCreateSPIRVIntegerType(
349 MRI.getType(Reg).getScalarSizeInBits(), MIB);
350 break;
351 case TargetOpcode::G_TRUNC:
352 case TargetOpcode::G_ADDRSPACE_CAST:
353 case TargetOpcode::G_PTR_ADD:
354 case TargetOpcode::COPY: {
355 MachineOperand &Op = MI->getOperand(1);
356 MachineInstr *Def = Op.isReg() ? MRI.getVRegDef(Op.getReg()) : nullptr;
357 if (Def)
358 SpvType = propagateSPIRVType(Def, GR, MRI, MIB);
359 break;
360 }
361 default:
362 break;
363 }
364 if (SpvType) {
365 // check if the address space needs correction
366 LLT RegType = MRI.getType(Reg);
367 if (SpvType->getOpcode() == SPIRV::OpTypePointer &&
368 RegType.isPointer() &&
370 RegType.getAddressSpace()) {
371 const SPIRVSubtarget &ST =
372 MI->getParent()->getParent()->getSubtarget<SPIRVSubtarget>();
373 auto TSC = addressSpaceToStorageClass(RegType.getAddressSpace(), ST);
374 SpvType = GR->changePointerStorageClass(SpvType, TSC, *MI);
375 }
376 GR->assignSPIRVTypeToVReg(SpvType, Reg, MIB.getMF());
377 }
378 if (!MRI.getRegClassOrNull(Reg))
379 MRI.setRegClass(Reg, SpvType ? GR->getRegClass(SpvType)
380 : &SPIRV::iIDRegClass);
381 }
382 }
383 return SpvType;
384}
385
386// To support current approach and limitations wrt. bit width here we widen a
387// scalar register with a bit width greater than 1 to valid sizes and cap it to
388// 64 width.
389static unsigned widenBitWidthToNextPow2(unsigned BitWidth) {
390 if (BitWidth == 1)
391 return 1; // No need to widen 1-bit values
392 return std::min(std::max(1u << Log2_32_Ceil(BitWidth), 8u), 64u);
393}
394
396 LLT RegType = MRI.getType(Reg);
397 if (!RegType.isScalar())
398 return;
399 unsigned CurrentWidth = RegType.getScalarSizeInBits();
400 unsigned NewWidth = widenBitWidthToNextPow2(CurrentWidth);
401 if (NewWidth != CurrentWidth)
402 MRI.setType(Reg, LLT::scalar(NewWidth));
403}
404
405static void widenCImmType(MachineOperand &MOP) {
406 const ConstantInt *CImmVal = MOP.getCImm();
407 unsigned CurrentWidth = CImmVal->getBitWidth();
408 unsigned NewWidth = widenBitWidthToNextPow2(CurrentWidth);
409 if (NewWidth != CurrentWidth) {
410 // Replace the immediate value with the widened version
411 MOP.setCImm(ConstantInt::get(CImmVal->getType()->getContext(),
412 CImmVal->getValue().zextOrTrunc(NewWidth)));
413 }
414}
415
417 MachineBasicBlock &MBB = *Def->getParent();
419 Def->getNextNode() ? Def->getNextNode()->getIterator() : MBB.end();
420 // Skip all the PHI and debug instructions.
421 while (DefIt != MBB.end() &&
422 (DefIt->isPHI() || DefIt->isDebugOrPseudoInstr()))
423 DefIt = std::next(DefIt);
424 MIB.setInsertPt(MBB, DefIt);
425}
426
427namespace llvm {
428void insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpvType,
431 assert((Ty || SpvType) && "Either LLVM or SPIRV type is expected.");
432 MachineInstr *Def = MRI.getVRegDef(Reg);
433 setInsertPtAfterDef(MIB, Def);
434 if (!SpvType)
435 SpvType = GR->getOrCreateSPIRVType(Ty, MIB,
436 SPIRV::AccessQualifier::ReadWrite, true);
437
438 if (!isTypeFoldingSupported(Def->getOpcode())) {
439 // No need to generate SPIRV::ASSIGN_TYPE pseudo-instruction
440 if (!MRI.getRegClassOrNull(Reg))
441 MRI.setRegClass(Reg, GR->getRegClass(SpvType));
442 if (!MRI.getType(Reg).isValid())
443 MRI.setType(Reg, GR->getRegType(SpvType));
444 GR->assignSPIRVTypeToVReg(SpvType, Reg, MIB.getMF());
445 return;
446 }
447
448 // Tablegen definition assumes SPIRV::ASSIGN_TYPE pseudo-instruction is
449 // present after each auto-folded instruction to take a type reference from.
450 Register NewReg = MRI.createGenericVirtualRegister(MRI.getType(Reg));
451 const auto *RegClass = GR->getRegClass(SpvType);
452 MRI.setRegClass(NewReg, RegClass);
453 MRI.setRegClass(Reg, RegClass);
454
455 GR->assignSPIRVTypeToVReg(SpvType, Reg, MIB.getMF());
456 // This is to make it convenient for Legalizer to get the SPIRVType
457 // when processing the actual MI (i.e. not pseudo one).
458 GR->assignSPIRVTypeToVReg(SpvType, NewReg, MIB.getMF());
459 // Copy MIFlags from Def to ASSIGN_TYPE instruction. It's required to keep
460 // the flags after instruction selection.
461 const uint32_t Flags = Def->getFlags();
462 MIB.buildInstr(SPIRV::ASSIGN_TYPE)
463 .addDef(Reg)
464 .addUse(NewReg)
465 .addUse(GR->getSPIRVTypeID(SpvType))
466 .setMIFlags(Flags);
467 for (unsigned I = 0, E = Def->getNumDefs(); I != E; ++I) {
468 MachineOperand &MO = Def->getOperand(I);
469 if (MO.getReg() == Reg) {
470 MO.setReg(NewReg);
471 break;
472 }
473 }
474}
475
478 SPIRVType *KnownResType) {
479 MIB.setInsertPt(*MI.getParent(), MI.getIterator());
480 for (auto &Op : MI.operands()) {
481 if (!Op.isReg() || Op.isDef())
482 continue;
483 Register OpReg = Op.getReg();
484 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(OpReg);
485 if (!SpvType && KnownResType) {
486 SpvType = KnownResType;
487 GR->assignSPIRVTypeToVReg(KnownResType, OpReg, *MI.getMF());
488 }
489 assert(SpvType);
490 if (!MRI.getRegClassOrNull(OpReg))
491 MRI.setRegClass(OpReg, GR->getRegClass(SpvType));
492 if (!MRI.getType(OpReg).isValid())
493 MRI.setType(OpReg, GR->getRegType(SpvType));
494 }
495}
496} // namespace llvm
497
498static void
501 DenseMap<MachineInstr *, Type *> &TargetExtConstTypes) {
502 // Get access to information about available extensions
503 const SPIRVSubtarget *ST =
504 static_cast<const SPIRVSubtarget *>(&MIB.getMF().getSubtarget());
505
508 DenseMap<MachineInstr *, Register> RegsAlreadyAddedToDT;
509
510 bool IsExtendedInts =
511 ST->canUseExtension(
512 SPIRV::Extension::SPV_INTEL_arbitrary_precision_integers) ||
513 ST->canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions) ||
514 ST->canUseExtension(SPIRV::Extension::SPV_INTEL_int4);
515
516 for (MachineBasicBlock *MBB : post_order(&MF)) {
517 if (MBB->empty())
518 continue;
519
520 bool ReachedBegin = false;
521 for (auto MII = std::prev(MBB->end()), Begin = MBB->begin();
522 !ReachedBegin;) {
523 MachineInstr &MI = *MII;
524 unsigned MIOp = MI.getOpcode();
525
526 if (!IsExtendedInts) {
527 // validate bit width of scalar registers and constant immediates
528 for (auto &MOP : MI.operands()) {
529 if (MOP.isReg())
530 widenScalarType(MOP.getReg(), MRI);
531 else if (MOP.isCImm())
532 widenCImmType(MOP);
533 }
534 }
535
536 if (isSpvIntrinsic(MI, Intrinsic::spv_assign_ptr_type)) {
537 Register Reg = MI.getOperand(1).getReg();
538 MIB.setInsertPt(*MI.getParent(), MI.getIterator());
539 Type *ElementTy = getMDOperandAsType(MI.getOperand(2).getMetadata(), 0);
540 SPIRVType *AssignedPtrType = GR->getOrCreateSPIRVPointerType(
541 ElementTy, MI,
542 addressSpaceToStorageClass(MI.getOperand(3).getImm(), *ST));
543 MachineInstr *Def = MRI.getVRegDef(Reg);
544 assert(Def && "Expecting an instruction that defines the register");
545 // G_GLOBAL_VALUE already has type info.
546 if (Def->getOpcode() != TargetOpcode::G_GLOBAL_VALUE &&
547 Def->getOpcode() != SPIRV::ASSIGN_TYPE)
548 insertAssignInstr(Reg, nullptr, AssignedPtrType, GR, MIB,
549 MF.getRegInfo());
550 ToErase.push_back(&MI);
551 } else if (isSpvIntrinsic(MI, Intrinsic::spv_assign_type)) {
552 Register Reg = MI.getOperand(1).getReg();
553 Type *Ty = getMDOperandAsType(MI.getOperand(2).getMetadata(), 0);
554 MachineInstr *Def = MRI.getVRegDef(Reg);
555 assert(Def && "Expecting an instruction that defines the register");
556 // G_GLOBAL_VALUE already has type info.
557 if (Def->getOpcode() != TargetOpcode::G_GLOBAL_VALUE &&
558 Def->getOpcode() != SPIRV::ASSIGN_TYPE)
559 insertAssignInstr(Reg, Ty, nullptr, GR, MIB, MF.getRegInfo());
560 ToErase.push_back(&MI);
561 } else if (MIOp == TargetOpcode::FAKE_USE && MI.getNumOperands() > 0) {
562 MachineInstr *MdMI = MI.getPrevNode();
563 if (MdMI && isSpvIntrinsic(*MdMI, Intrinsic::spv_value_md)) {
564 // It's an internal service info from before IRTranslator passes.
565 MachineInstr *Def = getVRegDef(MRI, MI.getOperand(0).getReg());
566 for (unsigned I = 1, E = MI.getNumOperands(); I != E && Def; ++I)
567 if (getVRegDef(MRI, MI.getOperand(I).getReg()) != Def)
568 Def = nullptr;
569 if (Def) {
570 const MDNode *MD = MdMI->getOperand(1).getMetadata();
572 cast<MDString>(MD->getOperand(1))->getString();
573 const MDNode *TypeMD = cast<MDNode>(MD->getOperand(0));
574 Type *ValueTy = getMDOperandAsType(TypeMD, 0);
575 GR->addValueAttrs(Def, std::make_pair(ValueTy, ValueName.str()));
576 }
577 ToErase.push_back(MdMI);
578 }
579 ToErase.push_back(&MI);
580 } else if (MIOp == TargetOpcode::G_CONSTANT ||
581 MIOp == TargetOpcode::G_FCONSTANT ||
582 MIOp == TargetOpcode::G_BUILD_VECTOR) {
583 // %rc = G_CONSTANT ty Val
584 // ===>
585 // %cty = OpType* ty
586 // %rctmp = G_CONSTANT ty Val
587 // %rc = ASSIGN_TYPE %rctmp, %cty
588 Register Reg = MI.getOperand(0).getReg();
589 bool NeedAssignType = true;
590 if (MRI.hasOneUse(Reg)) {
591 MachineInstr &UseMI = *MRI.use_instr_begin(Reg);
592 if (isSpvIntrinsic(UseMI, Intrinsic::spv_assign_type) ||
593 isSpvIntrinsic(UseMI, Intrinsic::spv_assign_name))
594 continue;
595 if (UseMI.getOpcode() == SPIRV::ASSIGN_TYPE)
596 NeedAssignType = false;
597 }
598 Type *Ty = nullptr;
599 if (MIOp == TargetOpcode::G_CONSTANT) {
600 auto TargetExtIt = TargetExtConstTypes.find(&MI);
601 Ty = TargetExtIt == TargetExtConstTypes.end()
602 ? MI.getOperand(1).getCImm()->getType()
603 : TargetExtIt->second;
604 const ConstantInt *OpCI = MI.getOperand(1).getCImm();
605 // TODO: we may wish to analyze here if OpCI is zero and LLT RegType =
606 // MRI.getType(Reg); RegType.isPointer() is true, so that we observe
607 // at this point not i64/i32 constant but null pointer in the
608 // corresponding address space of RegType.getAddressSpace(). This may
609 // help to successfully validate the case when a OpConstantComposite's
610 // constituent has type that does not match Result Type of
611 // OpConstantComposite (see, for example,
612 // pointers/PtrCast-null-in-OpSpecConstantOp.ll).
613 Register PrimaryReg = GR->find(OpCI, &MF);
614 if (!PrimaryReg.isValid()) {
615 GR->add(OpCI, &MI);
616 } else if (PrimaryReg != Reg &&
617 MRI.getType(Reg) == MRI.getType(PrimaryReg)) {
618 auto *RCReg = MRI.getRegClassOrNull(Reg);
619 auto *RCPrimary = MRI.getRegClassOrNull(PrimaryReg);
620 if (!RCReg || RCPrimary == RCReg) {
621 RegsAlreadyAddedToDT[&MI] = PrimaryReg;
622 ToErase.push_back(&MI);
623 NeedAssignType = false;
624 }
625 }
626 } else if (MIOp == TargetOpcode::G_FCONSTANT) {
627 Ty = MI.getOperand(1).getFPImm()->getType();
628 } else {
629 assert(MIOp == TargetOpcode::G_BUILD_VECTOR);
630 Type *ElemTy = nullptr;
631 MachineInstr *ElemMI = MRI.getVRegDef(MI.getOperand(1).getReg());
632 assert(ElemMI);
633
634 if (ElemMI->getOpcode() == TargetOpcode::G_CONSTANT) {
635 ElemTy = ElemMI->getOperand(1).getCImm()->getType();
636 } else if (ElemMI->getOpcode() == TargetOpcode::G_FCONSTANT) {
637 ElemTy = ElemMI->getOperand(1).getFPImm()->getType();
638 } else {
639 if (const SPIRVType *ElemSpvType =
640 GR->getSPIRVTypeForVReg(MI.getOperand(1).getReg(), &MF))
641 ElemTy = const_cast<Type *>(GR->getTypeForSPIRVType(ElemSpvType));
642 if (!ElemTy) {
643 // There may be a case when we already know Reg's type.
644 MachineInstr *NextMI = MI.getNextNode();
645 if (!NextMI || NextMI->getOpcode() != SPIRV::ASSIGN_TYPE ||
646 NextMI->getOperand(1).getReg() != Reg)
647 llvm_unreachable("Unexpected opcode");
648 }
649 }
650 if (ElemTy)
651 Ty = VectorType::get(
652 ElemTy, MI.getNumExplicitOperands() - MI.getNumExplicitDefs(),
653 false);
654 else
655 NeedAssignType = false;
656 }
657 if (NeedAssignType)
658 insertAssignInstr(Reg, Ty, nullptr, GR, MIB, MRI);
659 } else if (MIOp == TargetOpcode::G_GLOBAL_VALUE) {
660 propagateSPIRVType(&MI, GR, MRI, MIB);
661 }
662
663 if (MII == Begin)
664 ReachedBegin = true;
665 else
666 --MII;
667 }
668 }
669 for (MachineInstr *MI : ToErase) {
670 auto It = RegsAlreadyAddedToDT.find(MI);
671 if (It != RegsAlreadyAddedToDT.end())
672 MRI.replaceRegWith(MI->getOperand(0).getReg(), It->second);
674 MI->eraseFromParent();
675 }
676
677 // Address the case when IRTranslator introduces instructions with new
678 // registers without SPIRVType associated.
679 for (MachineBasicBlock &MBB : MF) {
680 for (MachineInstr &MI : MBB) {
681 switch (MI.getOpcode()) {
682 case TargetOpcode::G_TRUNC:
683 case TargetOpcode::G_ANYEXT:
684 case TargetOpcode::G_SEXT:
685 case TargetOpcode::G_ZEXT:
686 case TargetOpcode::G_PTRTOINT:
687 case TargetOpcode::COPY:
688 case TargetOpcode::G_ADDRSPACE_CAST:
689 propagateSPIRVType(&MI, GR, MRI, MIB);
690 break;
691 }
692 }
693 }
694}
695
698 MachineIRBuilder MIB) {
700 for (MachineBasicBlock &MBB : MF)
701 for (MachineInstr &MI : MBB)
702 if (isTypeFoldingSupported(MI.getOpcode()))
703 processInstr(MI, MIB, MRI, GR, nullptr);
704}
705
706static Register
708 SmallVector<unsigned, 4> *Ops = nullptr) {
709 Register DefReg;
710 unsigned StartOp = InlineAsm::MIOp_FirstOperand,
712 for (unsigned Idx = StartOp, MISz = MI->getNumOperands(); Idx != MISz;
713 ++Idx) {
714 const MachineOperand &MO = MI->getOperand(Idx);
715 if (MO.isMetadata())
716 continue;
717 if (Idx == AsmDescOp && MO.isImm()) {
718 // compute the index of the next operand descriptor
719 const InlineAsm::Flag F(MO.getImm());
720 AsmDescOp += 1 + F.getNumOperandRegisters();
721 continue;
722 }
723 if (MO.isReg() && MO.isDef()) {
724 if (!Ops)
725 return MO.getReg();
726 else
727 DefReg = MO.getReg();
728 } else if (Ops) {
729 Ops->push_back(Idx);
730 }
731 }
732 return DefReg;
733}
734
735static void
737 const SPIRVSubtarget &ST, MachineIRBuilder MIRBuilder,
738 const SmallVector<MachineInstr *> &ToProcess) {
740 Register AsmTargetReg;
741 for (unsigned i = 0, Sz = ToProcess.size(); i + 1 < Sz; i += 2) {
742 MachineInstr *I1 = ToProcess[i], *I2 = ToProcess[i + 1];
743 assert(isSpvIntrinsic(*I1, Intrinsic::spv_inline_asm) && I2->isInlineAsm());
744 MIRBuilder.setInsertPt(*I2->getParent(), *I2);
745
746 if (!AsmTargetReg.isValid()) {
747 // define vendor specific assembly target or dialect
748 AsmTargetReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
749 MRI.setRegClass(AsmTargetReg, &SPIRV::iIDRegClass);
750 auto AsmTargetMIB =
751 MIRBuilder.buildInstr(SPIRV::OpAsmTargetINTEL).addDef(AsmTargetReg);
752 addStringImm(ST.getTargetTripleAsStr(), AsmTargetMIB);
753 GR->add(AsmTargetMIB.getInstr(), AsmTargetMIB);
754 }
755
756 // create types
757 const MDNode *IAMD = I1->getOperand(1).getMetadata();
760 for (const auto &ArgTy : FTy->params())
761 ArgTypes.push_back(GR->getOrCreateSPIRVType(
762 ArgTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true));
763 SPIRVType *RetType =
764 GR->getOrCreateSPIRVType(FTy->getReturnType(), MIRBuilder,
765 SPIRV::AccessQualifier::ReadWrite, true);
767 FTy, RetType, ArgTypes, MIRBuilder);
768
769 // define vendor specific assembly instructions string
770 Register AsmReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
771 MRI.setRegClass(AsmReg, &SPIRV::iIDRegClass);
772 auto AsmMIB = MIRBuilder.buildInstr(SPIRV::OpAsmINTEL)
773 .addDef(AsmReg)
774 .addUse(GR->getSPIRVTypeID(RetType))
775 .addUse(GR->getSPIRVTypeID(FuncType))
776 .addUse(AsmTargetReg);
777 // inline asm string:
778 addStringImm(I2->getOperand(InlineAsm::MIOp_AsmString).getSymbolName(),
779 AsmMIB);
780 // inline asm constraint string:
781 addStringImm(cast<MDString>(I1->getOperand(2).getMetadata()->getOperand(0))
782 ->getString(),
783 AsmMIB);
784 GR->add(AsmMIB.getInstr(), AsmMIB);
785
786 // calls the inline assembly instruction
787 unsigned ExtraInfo = I2->getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
788 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
789 MIRBuilder.buildInstr(SPIRV::OpDecorate)
790 .addUse(AsmReg)
791 .addImm(static_cast<uint32_t>(SPIRV::Decoration::SideEffectsINTEL));
792
794 if (!DefReg.isValid()) {
795 DefReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
796 MRI.setRegClass(DefReg, &SPIRV::iIDRegClass);
797 SPIRVType *VoidType = GR->getOrCreateSPIRVType(
798 Type::getVoidTy(MF.getFunction().getContext()), MIRBuilder,
799 SPIRV::AccessQualifier::ReadWrite, true);
800 GR->assignSPIRVTypeToVReg(VoidType, DefReg, MF);
801 }
802
803 auto AsmCall = MIRBuilder.buildInstr(SPIRV::OpAsmCallINTEL)
804 .addDef(DefReg)
805 .addUse(GR->getSPIRVTypeID(RetType))
806 .addUse(AsmReg);
807 for (unsigned IntrIdx = 3; IntrIdx < I1->getNumOperands(); ++IntrIdx)
808 AsmCall.addUse(I1->getOperand(IntrIdx).getReg());
809 }
810 for (MachineInstr *MI : ToProcess) {
812 MI->eraseFromParent();
813 }
814}
815
817 const SPIRVSubtarget &ST,
818 MachineIRBuilder MIRBuilder) {
820 for (MachineBasicBlock &MBB : MF) {
821 for (MachineInstr &MI : MBB) {
822 if (isSpvIntrinsic(MI, Intrinsic::spv_inline_asm) ||
823 MI.getOpcode() == TargetOpcode::INLINEASM)
824 ToProcess.push_back(&MI);
825 }
826 }
827 if (ToProcess.size() == 0)
828 return;
829
830 if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_inline_assembly))
831 report_fatal_error("Inline assembly instructions require the "
832 "following SPIR-V extension: SPV_INTEL_inline_assembly",
833 false);
834
835 insertInlineAsmProcess(MF, GR, ST, MIRBuilder, ToProcess);
836}
837
839 union {
840 float F;
841 uint32_t Spir;
842 } FPMaxError;
843 FPMaxError.F = F;
844 return FPMaxError.Spir;
845}
846
848 MachineIRBuilder MIB) {
851 for (MachineBasicBlock &MBB : MF) {
852 for (MachineInstr &MI : MBB) {
853 if (!isSpvIntrinsic(MI, Intrinsic::spv_assign_decoration) &&
854 !isSpvIntrinsic(MI, Intrinsic::spv_assign_aliasing_decoration) &&
855 !isSpvIntrinsic(MI, Intrinsic::spv_assign_fpmaxerror_decoration))
856 continue;
857 MIB.setInsertPt(*MI.getParent(), MI.getNextNode());
858 if (isSpvIntrinsic(MI, Intrinsic::spv_assign_decoration)) {
859 buildOpSpirvDecorations(MI.getOperand(1).getReg(), MIB,
860 MI.getOperand(2).getMetadata(), ST);
861 } else if (isSpvIntrinsic(MI,
862 Intrinsic::spv_assign_fpmaxerror_decoration)) {
864 MI.getOperand(2).getMetadata()->getOperand(0));
865 uint32_t OpValue =
867
868 buildOpDecorate(MI.getOperand(1).getReg(), MIB,
869 SPIRV::Decoration::FPMaxErrorDecorationINTEL,
870 {OpValue});
871 } else {
872 GR->buildMemAliasingOpDecorate(MI.getOperand(1).getReg(), MIB,
873 MI.getOperand(2).getImm(),
874 MI.getOperand(3).getMetadata());
875 }
876
877 ToErase.push_back(&MI);
878 }
879 }
880 for (MachineInstr *MI : ToErase) {
882 MI->eraseFromParent();
883 }
884}
885
886// LLVM allows the switches to use registers as cases, while SPIR-V required
887// those to be immediate values. This function replaces such operands with the
888// equivalent immediate constant.
891 MachineIRBuilder MIB) {
893 for (MachineBasicBlock &MBB : MF) {
894 for (MachineInstr &MI : MBB) {
895 if (!isSpvIntrinsic(MI, Intrinsic::spv_switch))
896 continue;
897
899 NewOperands.push_back(MI.getOperand(0)); // Opcode
900 NewOperands.push_back(MI.getOperand(1)); // Condition
901 NewOperands.push_back(MI.getOperand(2)); // Default
902 for (unsigned i = 3; i < MI.getNumOperands(); i += 2) {
903 Register Reg = MI.getOperand(i).getReg();
905 NewOperands.push_back(
907
908 NewOperands.push_back(MI.getOperand(i + 1));
909 }
910
911 assert(MI.getNumOperands() == NewOperands.size());
912 while (MI.getNumOperands() > 0)
913 MI.removeOperand(0);
914 for (auto &MO : NewOperands)
915 MI.addOperand(MO);
916 }
917 }
918}
919
920// Some instructions are used during CodeGen but should never be emitted.
921// Cleaning up those.
925 for (MachineBasicBlock &MBB : MF) {
926 for (MachineInstr &MI : MBB) {
927 if (isSpvIntrinsic(MI, Intrinsic::spv_track_constant) ||
928 MI.getOpcode() == TargetOpcode::G_BRINDIRECT)
929 ToEraseMI.push_back(&MI);
930 }
931 }
932
933 for (MachineInstr *MI : ToEraseMI) {
935 MI->eraseFromParent();
936 }
937}
938
939// Find all usages of G_BLOCK_ADDR in our intrinsics and replace those
940// operands/registers by the actual MBB it references.
942 MachineIRBuilder MIB) {
943 // Gather the reverse-mapping BB -> MBB.
945 for (MachineBasicBlock &MBB : MF)
946 BB2MBB[MBB.getBasicBlock()] = &MBB;
947
948 // Gather instructions requiring patching. For now, only those can use
949 // G_BLOCK_ADDR.
950 SmallVector<MachineInstr *, 8> InstructionsToPatch;
951 for (MachineBasicBlock &MBB : MF) {
952 for (MachineInstr &MI : MBB) {
953 if (isSpvIntrinsic(MI, Intrinsic::spv_switch) ||
954 isSpvIntrinsic(MI, Intrinsic::spv_loop_merge) ||
955 isSpvIntrinsic(MI, Intrinsic::spv_selection_merge))
956 InstructionsToPatch.push_back(&MI);
957 }
958 }
959
960 // For each instruction to fix, we replace all the G_BLOCK_ADDR operands by
961 // the actual MBB it references. Once those references have been updated, we
962 // can cleanup remaining G_BLOCK_ADDR references.
963 SmallPtrSet<MachineBasicBlock *, 8> ClearAddressTaken;
966 for (MachineInstr *MI : InstructionsToPatch) {
968 for (unsigned i = 0; i < MI->getNumOperands(); ++i) {
969 // The operand is not a register, keep as-is.
970 if (!MI->getOperand(i).isReg()) {
971 NewOps.push_back(MI->getOperand(i));
972 continue;
973 }
974
975 Register Reg = MI->getOperand(i).getReg();
976 MachineInstr *BuildMBB = MRI.getVRegDef(Reg);
977 // The register is not the result of G_BLOCK_ADDR, keep as-is.
978 if (!BuildMBB || BuildMBB->getOpcode() != TargetOpcode::G_BLOCK_ADDR) {
979 NewOps.push_back(MI->getOperand(i));
980 continue;
981 }
982
983 assert(BuildMBB && BuildMBB->getOpcode() == TargetOpcode::G_BLOCK_ADDR &&
984 BuildMBB->getOperand(1).isBlockAddress() &&
985 BuildMBB->getOperand(1).getBlockAddress());
986 BasicBlock *BB =
987 BuildMBB->getOperand(1).getBlockAddress()->getBasicBlock();
988 auto It = BB2MBB.find(BB);
989 if (It == BB2MBB.end())
990 report_fatal_error("cannot find a machine basic block by a basic block "
991 "in a switch statement");
992 MachineBasicBlock *ReferencedBlock = It->second;
993 NewOps.push_back(MachineOperand::CreateMBB(ReferencedBlock));
994
995 ClearAddressTaken.insert(ReferencedBlock);
996 ToEraseMI.insert(BuildMBB);
997 }
998
999 // Replace the operands.
1000 assert(MI->getNumOperands() == NewOps.size());
1001 while (MI->getNumOperands() > 0)
1002 MI->removeOperand(0);
1003 for (auto &MO : NewOps)
1004 MI->addOperand(MO);
1005
1006 if (MachineInstr *Next = MI->getNextNode()) {
1007 if (isSpvIntrinsic(*Next, Intrinsic::spv_track_constant)) {
1008 ToEraseMI.insert(Next);
1009 Next = MI->getNextNode();
1010 }
1011 if (Next && Next->getOpcode() == TargetOpcode::G_BRINDIRECT)
1012 ToEraseMI.insert(Next);
1013 }
1014 }
1015
1016 // BlockAddress operands were used to keep information between passes,
1017 // let's undo the "address taken" status to reflect that Succ doesn't
1018 // actually correspond to an IR-level basic block.
1019 for (MachineBasicBlock *Succ : ClearAddressTaken)
1020 Succ->setAddressTakenIRBlock(nullptr);
1021
1022 // If we just delete G_BLOCK_ADDR instructions with BlockAddress operands,
1023 // this leaves their BasicBlock counterparts in a "address taken" status. This
1024 // would make AsmPrinter to generate a series of unneeded labels of a "Address
1025 // of block that was removed by CodeGen" kind. Let's first ensure that we
1026 // don't have a dangling BlockAddress constants by zapping the BlockAddress
1027 // nodes, and only after that proceed with erasing G_BLOCK_ADDR instructions.
1028 Constant *Replacement =
1029 ConstantInt::get(Type::getInt32Ty(MF.getFunction().getContext()), 1);
1030 for (MachineInstr *BlockAddrI : ToEraseMI) {
1031 if (BlockAddrI->getOpcode() == TargetOpcode::G_BLOCK_ADDR) {
1032 BlockAddress *BA = const_cast<BlockAddress *>(
1033 BlockAddrI->getOperand(1).getBlockAddress());
1035 ConstantExpr::getIntToPtr(Replacement, BA->getType()));
1036 BA->destroyConstant();
1037 }
1038 GR->invalidateMachineInstr(BlockAddrI);
1039 BlockAddrI->eraseFromParent();
1040 }
1041}
1042
1044 if (MBB.empty())
1045 return true;
1046
1047 // Branching SPIR-V intrinsics are not detected by this generic method.
1048 // Thus, we can only trust negative result.
1049 if (!MBB.canFallThrough())
1050 return false;
1051
1052 // Otherwise, we must manually check if we have a SPIR-V intrinsic which
1053 // prevent an implicit fallthrough.
1054 for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend();
1055 It != E; ++It) {
1056 if (isSpvIntrinsic(*It, Intrinsic::spv_switch))
1057 return false;
1058 }
1059 return true;
1060}
1061
1063 MachineIRBuilder MIB) {
1064 // It is valid for MachineBasicBlocks to not finish with a branch instruction.
1065 // In such cases, they will simply fallthrough their immediate successor.
1066 for (MachineBasicBlock &MBB : MF) {
1068 continue;
1069
1070 assert(std::distance(MBB.successors().begin(), MBB.successors().end()) ==
1071 1);
1072 MIB.setInsertPt(MBB, MBB.end());
1073 MIB.buildBr(**MBB.successors().begin());
1074 }
1075}
1076
1077bool SPIRVPreLegalizer::runOnMachineFunction(MachineFunction &MF) {
1078 // Initialize the type registry.
1079 const SPIRVSubtarget &ST = MF.getSubtarget<SPIRVSubtarget>();
1080 SPIRVGlobalRegistry *GR = ST.getSPIRVGlobalRegistry();
1081 GR->setCurrentFunc(MF);
1082 MachineIRBuilder MIB(MF);
1083 // a registry of target extension constants
1084 DenseMap<MachineInstr *, Type *> TargetExtConstTypes;
1085 // to keep record of tracked constants
1086 addConstantsToTrack(MF, GR, ST, TargetExtConstTypes);
1087 foldConstantsIntoIntrinsics(MF, GR, MIB);
1088 insertBitcasts(MF, GR, MIB);
1089 generateAssignInstrs(MF, GR, MIB, TargetExtConstTypes);
1090
1091 processSwitchesConstants(MF, GR, MIB);
1092 processBlockAddr(MF, GR, MIB);
1094
1095 processInstrsWithTypeFolding(MF, GR, MIB);
1097 insertSpirvDecorations(MF, GR, MIB);
1098 insertInlineAsm(MF, GR, ST, MIB);
1099 lowerBitcasts(MF, GR, MIB);
1100
1101 return true;
1102}
1103
1104INITIALIZE_PASS(SPIRVPreLegalizer, DEBUG_TYPE, "SPIRV pre legalizer", false,
1105 false)
1106
1107char SPIRVPreLegalizer::ID = 0;
1108
1109FunctionPass *llvm::createSPIRVPreLegalizerPass() {
1110 return new SPIRVPreLegalizer();
1111}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Provides analysis for continuously CSEing during GISel passes.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Provides analysis for querying information about KnownBits during GISel passes.
#define DEBUG_TYPE
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
static Register collectInlineAsmInstrOperands(MachineInstr *MI, SmallVector< unsigned, 4 > *Ops=nullptr)
static void insertInlineAsm(MachineFunction &MF, SPIRVGlobalRegistry *GR, const SPIRVSubtarget &ST, MachineIRBuilder MIRBuilder)
static void cleanupHelperInstructions(MachineFunction &MF, SPIRVGlobalRegistry *GR)
static void insertInlineAsmProcess(MachineFunction &MF, SPIRVGlobalRegistry *GR, const SPIRVSubtarget &ST, MachineIRBuilder MIRBuilder, const SmallVector< MachineInstr * > &ToProcess)
static void removeImplicitFallthroughs(MachineFunction &MF, MachineIRBuilder MIB)
static unsigned widenBitWidthToNextPow2(unsigned BitWidth)
static void setInsertPtAfterDef(MachineIRBuilder &MIB, MachineInstr *Def)
static bool isImplicitFallthrough(MachineBasicBlock &MBB)
static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
static void insertBitcasts(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
static void processInstrsWithTypeFolding(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
static void processSwitchesConstants(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
static SPIRVType * propagateSPIRVType(MachineInstr *MI, SPIRVGlobalRegistry *GR, MachineRegisterInfo &MRI, MachineIRBuilder &MIB)
static void lowerBitcasts(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
static MachineInstr * findAssignTypeInstr(Register Reg, MachineRegisterInfo *MRI)
static void widenCImmType(MachineOperand &MOP)
static void buildOpBitcast(SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, Register ResVReg, Register OpReg)
static void processBlockAddr(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
static void widenScalarType(Register Reg, MachineRegisterInfo &MRI)
static void foldConstantsIntoIntrinsics(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
static void addConstantsToTrack(MachineFunction &MF, SPIRVGlobalRegistry *GR, const SPIRVSubtarget &STI, DenseMap< MachineInstr *, Type * > &TargetExtConstTypes)
static uint32_t convertFloatToSPIRVWord(float F)
static void generateAssignInstrs(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB, DenseMap< MachineInstr *, Type * > &TargetExtConstTypes)
LLVM_ABI float convertToFloat() const
Converts this APFloat to host float value.
Definition APFloat.cpp:6143
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition APInt.cpp:1033
Represent the analysis usage information of a pass.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
The address of a basic block.
Definition Constants.h:899
BasicBlock * getBasicBlock() const
Definition Constants.h:934
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:277
const APFloat & getValueAPF() const
Definition Constants.h:320
This is the shared class of boolean and integer constants.
Definition Constants.h:87
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:157
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:154
This is an important base class in LLVM.
Definition Constant.h:43
LLVM_ABI void destroyConstant()
Called if some element of this constant is no longer valid.
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
iterator end()
Definition DenseMap.h:81
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isPointer() const
constexpr unsigned getAddressSpace() const
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
MachineInstrBundleIterator< MachineInstr > iterator
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineBasicBlock & front() const
Helper class to build MachineInstr.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II)
Set the insertion point before the specified position.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildBitcast(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_BITCAST Src.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
static MachineOperand CreateCImm(const ConstantInt *CI)
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isMetadata() const
isMetadata - Tests if this is a MO_Metadata operand.
const BlockAddress * getBlockAddress() const
void setCImm(const ConstantInt *CI)
bool isBlockAddress() const
isBlockAddress - Tests if this is a MO_BlockAddress operand.
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
defusechain_instr_iterator< true, false, false, true > use_instr_iterator
use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the specified register,...
Wrapper class representing virtual and physical registers.
Definition Register.h:19
constexpr bool isValid() const
Definition Register.h:107
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
SPIRVType * changePointerStorageClass(SPIRVType *PtrType, SPIRV::StorageClass::StorageClass SC, MachineInstr &I)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
bool isBitcastCompatible(const SPIRVType *Type1, const SPIRVType *Type2) const
unsigned getScalarOrVectorComponentCount(Register VReg) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
SPIRVType * getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
void invalidateMachineInstr(MachineInstr *MI)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
void addGlobalObject(const Value *V, const MachineFunction *MF, Register R)
SPIRVType * getOrCreateOpTypeFunctionWithArgs(const Type *Ty, SPIRVType *RetType, const SmallVectorImpl< SPIRVType * > &ArgTypes, MachineIRBuilder &MIRBuilder)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Type * getDeducedGlobalValueType(const GlobalValue *Global)
LLT getRegType(SPIRVType *SpvType) const
void addValueAttrs(MachineInstr *Key, std::pair< Type *, std::string > Val)
void buildMemAliasingOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, uint32_t Dec, const MDNode *GVarMD)
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
bool add(SPIRV::IRHandle Handle, const MachineInstr *MI)
Register find(SPIRV::IRHandle Handle, const MachineFunction *MF)
const SPIRVInstrInfo * getInstrInfo() const override
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:281
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
static LLVM_ABI TypedPointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:538
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:695
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition MathExtras.h:344
void insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for inserting ASSIGN_TYPE instuction between Reg and its definition,...
StringMapEntry< Value * > ValueName
Definition Value.h:56
bool isTypeFoldingSupported(unsigned Opcode)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionPass * createSPIRVPreLegalizerPass()
iterator_range< po_iterator< T > > post_order(const T &G)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
Definition SPIRVUtils.h:239
void processInstr(MachineInstr &MI, MachineIRBuilder &MIB, MachineRegisterInfo &MRI, SPIRVGlobalRegistry *GR, SPIRVType *KnownResType)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Type * toTypedPointer(Type *Ty)
Definition SPIRVUtils.h:436
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
const MachineInstr SPIRVType
@ Global
Append to llvm.global_dtors.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
void buildOpSpirvDecorations(Register Reg, MachineIRBuilder &MIRBuilder, const MDNode *GVarMD, const SPIRVSubtarget &ST)
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
Type * getMDOperandAsType(const MDNode *N, unsigned I)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
void addStringImm(const StringRef &Str, MCInst &Inst)
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)