LLVM 23.0.0git
WebAssemblyFastISel.cpp
Go to the documentation of this file.
1//===-- WebAssemblyFastISel.cpp - WebAssembly FastISel implementation -----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the WebAssembly-specific support for the FastISel
11/// class. Some of the target-specific code is generated by tablegen in the file
12/// WebAssemblyGenFastISel.inc, which is #included here.
13///
14/// TODO: kill flags
15///
16//===----------------------------------------------------------------------===//
17
32#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/Function.h"
38#include "llvm/IR/Operator.h"
39
40using namespace llvm;
41
42#define DEBUG_TYPE "wasm-fastisel"
43
44namespace {
45
46class WebAssemblyFastISel final : public FastISel {
47 // All possible address modes.
48 class Address {
49 public:
50 enum BaseKind { RegBase, FrameIndexBase };
51
52 private:
53 BaseKind Kind = RegBase;
54 union {
55 unsigned Reg;
56 int FI;
57 } Base;
58
59 // Whether the base has been determined yet
60 bool IsBaseSet = false;
61
62 int64_t Offset = 0;
63
64 const GlobalValue *GV = nullptr;
65
66 public:
67 // Innocuous defaults for our address.
68 Address() { Base.Reg = 0; }
69 void setKind(BaseKind K) {
70 assert(!isSet() && "Can't change kind with non-zero base");
71 Kind = K;
72 }
73 BaseKind getKind() const { return Kind; }
74 bool isRegBase() const { return Kind == RegBase; }
75 bool isFIBase() const { return Kind == FrameIndexBase; }
76 void setReg(unsigned Reg) {
77 assert(isRegBase() && "Invalid base register access!");
78 assert(!IsBaseSet && "Base cannot be reset");
79 Base.Reg = Reg;
80 IsBaseSet = true;
81 }
82 unsigned getReg() const {
83 assert(isRegBase() && "Invalid base register access!");
84 return Base.Reg;
85 }
86 void setFI(unsigned FI) {
87 assert(isFIBase() && "Invalid base frame index access!");
88 assert(!IsBaseSet && "Base cannot be reset");
89 Base.FI = FI;
90 IsBaseSet = true;
91 }
92 unsigned getFI() const {
93 assert(isFIBase() && "Invalid base frame index access!");
94 return Base.FI;
95 }
96
97 void setOffset(int64_t NewOffset) {
98 assert(NewOffset >= 0 && "Offsets must be non-negative");
99 Offset = NewOffset;
100 }
101 int64_t getOffset() const { return Offset; }
102 void setGlobalValue(const GlobalValue *G) { GV = G; }
103 const GlobalValue *getGlobalValue() const { return GV; }
104 bool isSet() const { return IsBaseSet; }
105 };
106
107 /// Keep a pointer to the WebAssemblySubtarget around so that we can make the
108 /// right decision when generating code for different targets.
109 const WebAssemblySubtarget *Subtarget;
110 LLVMContext *Context;
111
112private:
113 // Utility helper routines
114 MVT::SimpleValueType getSimpleType(Type *Ty) {
115 EVT VT = TLI.getValueType(DL, Ty, /*AllowUnknown=*/true);
116 return VT.isSimple() ? VT.getSimpleVT().SimpleTy
118 }
120 switch (VT) {
121 case MVT::i1:
122 case MVT::i8:
123 case MVT::i16:
124 return MVT::i32;
125 case MVT::i32:
126 case MVT::i64:
127 case MVT::f32:
128 case MVT::f64:
129 return VT;
130 case MVT::funcref:
131 case MVT::externref:
132 if (Subtarget->hasReferenceTypes())
133 return VT;
134 break;
135 case MVT::exnref:
136 if (Subtarget->hasReferenceTypes() && Subtarget->hasExceptionHandling())
137 return VT;
138 break;
139 case MVT::f16:
140 return MVT::f32;
141 case MVT::v16i8:
142 case MVT::v8i16:
143 case MVT::v4i32:
144 case MVT::v4f32:
145 case MVT::v2i64:
146 case MVT::v2f64:
147 if (Subtarget->hasSIMD128())
148 return VT;
149 break;
150 default:
151 break;
152 }
154 }
155 bool computeAddress(const Value *Obj, Address &Addr);
156 void materializeLoadStoreOperands(Address &Addr);
157 void addLoadStoreOperands(const Address &Addr, const MachineInstrBuilder &MIB,
158 MachineMemOperand *MMO);
159 bool emitLoad(Register ResultReg, unsigned Opc, const LoadInst *LoadInst);
160 unsigned maskI1Value(unsigned Reg, const Value *V);
161 unsigned getRegForI1Value(const Value *V, const BasicBlock *BB, bool &Not);
162 unsigned zeroExtendToI32(unsigned Reg, const Value *V,
164 unsigned signExtendToI32(unsigned Reg, const Value *V,
166 unsigned zeroExtend(unsigned Reg, const Value *V, MVT::SimpleValueType From,
168 unsigned signExtend(unsigned Reg, const Value *V, MVT::SimpleValueType From,
170 unsigned getRegForUnsignedValue(const Value *V);
171 unsigned getRegForSignedValue(const Value *V);
172 unsigned getRegForPromotedValue(const Value *V, bool IsSigned);
173 unsigned notValue(unsigned Reg);
174 unsigned copyValue(unsigned Reg);
175
176 // Backend specific FastISel code.
177 Register fastMaterializeAlloca(const AllocaInst *AI) override;
178 Register fastMaterializeConstant(const Constant *C) override;
179 bool fastLowerArguments() override;
180
181 // Selection routines.
182 bool selectCall(const Instruction *I);
183 bool selectSelect(const Instruction *I);
184 bool selectTrunc(const Instruction *I);
185 bool selectZExt(const Instruction *I);
186 bool selectSExt(const Instruction *I);
187 bool selectICmp(const Instruction *I);
188 bool selectFCmp(const Instruction *I);
189 bool selectBitCast(const Instruction *I);
190 bool selectLoad(const Instruction *I);
191 bool selectStore(const Instruction *I);
192 bool selectCondBr(const Instruction *I);
193 bool selectRet(const Instruction *I);
194 bool selectUnreachable(const Instruction *I);
195
196public:
197 // Backend specific FastISel code.
198 WebAssemblyFastISel(FunctionLoweringInfo &FuncInfo,
199 const TargetLibraryInfo *LibInfo,
200 const LibcallLoweringInfo *LibcallLowering)
201 : FastISel(FuncInfo, LibInfo, LibcallLowering,
202 /*SkipTargetIndependentISel=*/true) {
203 Subtarget = &FuncInfo.MF->getSubtarget<WebAssemblySubtarget>();
204 Context = &FuncInfo.Fn->getContext();
205 }
206
207 bool fastSelectInstruction(const Instruction *I) override;
208 bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
209 const LoadInst *LI) override;
210
211#include "WebAssemblyGenFastISel.inc"
212};
213
214} // end anonymous namespace
215
216bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) {
217 const User *U = nullptr;
218 unsigned Opcode = Instruction::UserOp1;
219 if (const auto *I = dyn_cast<Instruction>(Obj)) {
220 // Don't walk into other basic blocks unless the object is an alloca from
221 // another block, otherwise it may not have a virtual register assigned.
222 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
223 FuncInfo.getMBB(I->getParent()) == FuncInfo.MBB) {
224 Opcode = I->getOpcode();
225 U = I;
226 }
227 } else if (const auto *C = dyn_cast<ConstantExpr>(Obj)) {
228 Opcode = C->getOpcode();
229 U = C;
230 }
231
232 if (auto *Ty = dyn_cast<PointerType>(Obj->getType()))
233 if (Ty->getAddressSpace() > 255)
234 // Fast instruction selection doesn't support the special
235 // address spaces.
236 return false;
237
238 if (const auto *GV = dyn_cast<GlobalValue>(Obj)) {
239 if (TLI.isPositionIndependent())
240 return false;
241 if (Addr.getGlobalValue())
242 return false;
243 if (GV->isThreadLocal())
244 return false;
245 Addr.setGlobalValue(GV);
246 return true;
247 }
248
249 switch (Opcode) {
250 default:
251 break;
252 case Instruction::BitCast: {
253 // Look through bitcasts.
254 return computeAddress(U->getOperand(0), Addr);
255 }
256 case Instruction::IntToPtr: {
257 // Look past no-op inttoptrs.
258 if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
259 TLI.getPointerTy(DL))
260 return computeAddress(U->getOperand(0), Addr);
261 break;
262 }
263 case Instruction::PtrToInt: {
264 // Look past no-op ptrtoints.
265 if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
266 return computeAddress(U->getOperand(0), Addr);
267 break;
268 }
269 case Instruction::GetElementPtr: {
270 Address SavedAddr = Addr;
271 uint64_t TmpOffset = Addr.getOffset();
272 // Non-inbounds geps can wrap; wasm's offsets can't.
273 if (!cast<GEPOperator>(U)->isInBounds())
274 goto unsupported_gep;
275 // Iterate through the GEP folding the constants into offsets where
276 // we can.
278 GTI != E; ++GTI) {
279 const Value *Op = GTI.getOperand();
280 if (StructType *STy = GTI.getStructTypeOrNull()) {
281 const StructLayout *SL = DL.getStructLayout(STy);
282 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
283 TmpOffset += SL->getElementOffset(Idx);
284 } else {
285 uint64_t S = GTI.getSequentialElementStride(DL);
286 for (;;) {
287 if (const auto *CI = dyn_cast<ConstantInt>(Op)) {
288 // Constant-offset addressing.
289 TmpOffset += CI->getSExtValue() * S;
290 break;
291 }
292 if (S == 1 && Addr.isRegBase() && Addr.getReg() == 0) {
293 // An unscaled add of a register. Set it as the new base.
294 Register Reg = getRegForValue(Op);
295 if (Reg == 0)
296 return false;
297 Addr.setReg(Reg);
298 break;
299 }
300 if (canFoldAddIntoGEP(U, Op)) {
301 // A compatible add with a constant operand. Fold the constant.
302 auto *CI = cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
303 TmpOffset += CI->getSExtValue() * S;
304 // Iterate on the other operand.
305 Op = cast<AddOperator>(Op)->getOperand(0);
306 continue;
307 }
308 // Unsupported
309 goto unsupported_gep;
310 }
311 }
312 }
313 // Don't fold in negative offsets.
314 if (int64_t(TmpOffset) >= 0) {
315 // Try to grab the base operand now.
316 Addr.setOffset(TmpOffset);
317 if (computeAddress(U->getOperand(0), Addr))
318 return true;
319 }
320 // We failed, restore everything and try the other options.
321 Addr = SavedAddr;
322 unsupported_gep:
323 break;
324 }
325 case Instruction::Alloca: {
326 const auto *AI = cast<AllocaInst>(Obj);
327 auto SI = FuncInfo.StaticAllocaMap.find(AI);
328 if (SI != FuncInfo.StaticAllocaMap.end()) {
329 if (Addr.isSet()) {
330 return false;
331 }
332 Addr.setKind(Address::FrameIndexBase);
333 Addr.setFI(SI->second);
334 return true;
335 }
336 break;
337 }
338 case Instruction::Add: {
339 // We should not fold operands into an offset when 'nuw' (no unsigned wrap)
340 // is not present, because the address calculation does not wrap.
341 if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(U))
342 if (!OFBinOp->hasNoUnsignedWrap())
343 break;
344
345 // Adds of constants are common and easy enough.
346 const Value *LHS = U->getOperand(0);
347 const Value *RHS = U->getOperand(1);
348
350 std::swap(LHS, RHS);
351
352 if (const auto *CI = dyn_cast<ConstantInt>(RHS)) {
353 uint64_t TmpOffset = Addr.getOffset() + CI->getSExtValue();
354 if (int64_t(TmpOffset) >= 0) {
355 Addr.setOffset(TmpOffset);
356 return computeAddress(LHS, Addr);
357 }
358 }
359
360 Address Backup = Addr;
361 if (computeAddress(LHS, Addr) && computeAddress(RHS, Addr))
362 return true;
363 Addr = Backup;
364
365 break;
366 }
367 case Instruction::Sub: {
368 // We should not fold operands into an offset when 'nuw' (no unsigned wrap)
369 // is not present, because the address calculation does not wrap.
370 if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(U))
371 if (!OFBinOp->hasNoUnsignedWrap())
372 break;
373
374 // Subs of constants are common and easy enough.
375 const Value *LHS = U->getOperand(0);
376 const Value *RHS = U->getOperand(1);
377
378 if (const auto *CI = dyn_cast<ConstantInt>(RHS)) {
379 int64_t TmpOffset = Addr.getOffset() - CI->getSExtValue();
380 if (TmpOffset >= 0) {
381 Addr.setOffset(TmpOffset);
382 return computeAddress(LHS, Addr);
383 }
384 }
385 break;
386 }
387 }
388 if (Addr.isSet()) {
389 return false;
390 }
391 Register Reg = getRegForValue(Obj);
392 if (Reg == 0)
393 return false;
394 Addr.setReg(Reg);
395 return Addr.getReg() != 0;
396}
397
398void WebAssemblyFastISel::materializeLoadStoreOperands(Address &Addr) {
399 if (Addr.isRegBase()) {
400 unsigned Reg = Addr.getReg();
401 if (Reg == 0) {
402 Reg = createResultReg(Subtarget->hasAddr64() ? &WebAssembly::I64RegClass
403 : &WebAssembly::I32RegClass);
404 unsigned Opc = Subtarget->hasAddr64() ? WebAssembly::CONST_I64
405 : WebAssembly::CONST_I32;
406 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), Reg)
407 .addImm(0);
408 Addr.setReg(Reg);
409 }
410 }
411}
412
413void WebAssemblyFastISel::addLoadStoreOperands(const Address &Addr,
414 const MachineInstrBuilder &MIB,
415 MachineMemOperand *MMO) {
416 // Set the alignment operand (this is rewritten in SetP2AlignOperands).
417 // TODO: Disable SetP2AlignOperands for FastISel and just do it here.
418 MIB.addImm(0);
419
420 if (const GlobalValue *GV = Addr.getGlobalValue())
421 MIB.addGlobalAddress(GV, Addr.getOffset());
422 else
423 MIB.addImm(Addr.getOffset());
424
425 if (Addr.isRegBase())
426 MIB.addReg(Addr.getReg());
427 else
428 MIB.addFrameIndex(Addr.getFI());
429
430 MIB.addMemOperand(MMO);
431}
432
433bool WebAssemblyFastISel::emitLoad(Register ResultReg, unsigned Opc,
434 const LoadInst *Load) {
435 Address Addr;
436 if (!computeAddress(Load->getPointerOperand(), Addr))
437 return false;
438
439 materializeLoadStoreOperands(Addr);
440 auto MIB =
441 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg);
442 addLoadStoreOperands(Addr, MIB, createMachineMemOperandFor(Load));
443
444 return true;
445}
446
447unsigned WebAssemblyFastISel::maskI1Value(unsigned Reg, const Value *V) {
448 return zeroExtendToI32(Reg, V, MVT::i1);
449}
450
451unsigned WebAssemblyFastISel::getRegForI1Value(const Value *V,
452 const BasicBlock *BB,
453 bool &Not) {
454 if (const auto *ICmp = dyn_cast<ICmpInst>(V))
455 if (const ConstantInt *C = dyn_cast<ConstantInt>(ICmp->getOperand(1)))
456 if (ICmp->isEquality() && C->isZero() && C->getType()->isIntegerTy(32) &&
457 ICmp->getParent() == BB) {
458 Not = ICmp->isTrueWhenEqual();
459 return getRegForValue(ICmp->getOperand(0));
460 }
461
462 Not = false;
463 Register Reg = getRegForValue(V);
464 if (Reg == 0)
465 return 0;
466 return maskI1Value(Reg, V);
467}
468
469unsigned WebAssemblyFastISel::zeroExtendToI32(unsigned Reg, const Value *V,
471 if (Reg == 0)
472 return 0;
473
474 switch (From) {
475 case MVT::i1:
476 // If the value is naturally an i1, we don't need to mask it. We only know
477 // if a value is naturally an i1 if it is definitely lowered by FastISel,
478 // not a DAG ISel fallback.
479 if (V != nullptr && isa<Argument>(V) && cast<Argument>(V)->hasZExtAttr())
480 return copyValue(Reg);
481 break;
482 case MVT::i8:
483 case MVT::i16:
484 break;
485 case MVT::i32:
486 return copyValue(Reg);
487 default:
488 return 0;
489 }
490
491 Register Imm = createResultReg(&WebAssembly::I32RegClass);
492 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
493 TII.get(WebAssembly::CONST_I32), Imm)
494 .addImm(~(~uint64_t(0) << MVT(From).getSizeInBits()));
495
496 Register Result = createResultReg(&WebAssembly::I32RegClass);
497 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::AND_I32),
498 Result)
499 .addReg(Reg)
500 .addReg(Imm);
501
502 return Result;
503}
504
505unsigned WebAssemblyFastISel::signExtendToI32(unsigned Reg, const Value *V,
507 if (Reg == 0)
508 return 0;
509
510 switch (From) {
511 case MVT::i1:
512 case MVT::i8:
513 case MVT::i16:
514 break;
515 case MVT::i32:
516 return copyValue(Reg);
517 default:
518 return 0;
519 }
520
521 if (Subtarget->hasSignExt()) {
522 if (From == MVT::i8 || From == MVT::i16) {
523 Register Result = createResultReg(&WebAssembly::I32RegClass);
524 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
525 TII.get(From == MVT::i16 ? WebAssembly::I32_EXTEND16_S_I32
526 : WebAssembly::I32_EXTEND8_S_I32),
527 Result)
528 .addReg(Reg);
529 return Result;
530 }
531 }
532
533 Register Imm = createResultReg(&WebAssembly::I32RegClass);
534 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
535 TII.get(WebAssembly::CONST_I32), Imm)
536 .addImm(32 - MVT(From).getSizeInBits());
537
538 Register Left = createResultReg(&WebAssembly::I32RegClass);
539 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::SHL_I32),
540 Left)
541 .addReg(Reg)
542 .addReg(Imm);
543
544 Register Right = createResultReg(&WebAssembly::I32RegClass);
545 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
546 TII.get(WebAssembly::SHR_S_I32), Right)
547 .addReg(Left)
548 .addReg(Imm);
549
550 return Right;
551}
552
553unsigned WebAssemblyFastISel::zeroExtend(unsigned Reg, const Value *V,
556 if (To == MVT::i64) {
557 if (From == MVT::i64)
558 return copyValue(Reg);
559
560 Reg = zeroExtendToI32(Reg, V, From);
561
562 Register Result = createResultReg(&WebAssembly::I64RegClass);
563 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
564 TII.get(WebAssembly::I64_EXTEND_U_I32), Result)
565 .addReg(Reg);
566 return Result;
567 }
568
569 if (To == MVT::i32)
570 return zeroExtendToI32(Reg, V, From);
571
572 return 0;
573}
574
575unsigned WebAssemblyFastISel::signExtend(unsigned Reg, const Value *V,
578 if (To == MVT::i64) {
579 if (From == MVT::i64)
580 return copyValue(Reg);
581
582 Register Result = createResultReg(&WebAssembly::I64RegClass);
583
584 if (Subtarget->hasSignExt()) {
585 if (From != MVT::i32) {
586 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
587 TII.get(WebAssembly::I64_EXTEND_U_I32), Result)
588 .addReg(Reg);
589
590 Reg = Result;
591 Result = createResultReg(&WebAssembly::I64RegClass);
592 }
593
594 switch (From) {
595 case MVT::i8:
596 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
597 TII.get(WebAssembly::I64_EXTEND8_S_I64), Result)
598 .addReg(Reg);
599 return Result;
600 case MVT::i16:
601 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
602 TII.get(WebAssembly::I64_EXTEND16_S_I64), Result)
603 .addReg(Reg);
604 return Result;
605 case MVT::i32:
606 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
607 TII.get(WebAssembly::I64_EXTEND_S_I32), Result)
608 .addReg(Reg);
609 return Result;
610 default:
611 break;
612 }
613 } else {
614 Reg = signExtendToI32(Reg, V, From);
615
616 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
617 TII.get(WebAssembly::I64_EXTEND_S_I32), Result)
618 .addReg(Reg);
619 }
620
621 return Result;
622 }
623
624 if (To == MVT::i32)
625 return signExtendToI32(Reg, V, From);
626
627 return 0;
628}
629
630unsigned WebAssemblyFastISel::getRegForUnsignedValue(const Value *V) {
631 MVT::SimpleValueType From = getSimpleType(V->getType());
632 MVT::SimpleValueType To = getLegalType(From);
633 Register VReg = getRegForValue(V);
634 if (VReg == 0)
635 return 0;
636 if (From == To)
637 return VReg;
638 return zeroExtend(VReg, V, From, To);
639}
640
641unsigned WebAssemblyFastISel::getRegForSignedValue(const Value *V) {
642 MVT::SimpleValueType From = getSimpleType(V->getType());
643 MVT::SimpleValueType To = getLegalType(From);
644 Register VReg = getRegForValue(V);
645 if (VReg == 0)
646 return 0;
647 if (From == To)
648 return VReg;
649 return signExtend(VReg, V, From, To);
650}
651
652unsigned WebAssemblyFastISel::getRegForPromotedValue(const Value *V,
653 bool IsSigned) {
654 return IsSigned ? getRegForSignedValue(V) : getRegForUnsignedValue(V);
655}
656
657unsigned WebAssemblyFastISel::notValue(unsigned Reg) {
658 assert(MRI.getRegClass(Reg) == &WebAssembly::I32RegClass);
659
660 Register NotReg = createResultReg(&WebAssembly::I32RegClass);
661 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::EQZ_I32),
662 NotReg)
663 .addReg(Reg);
664 return NotReg;
665}
666
667unsigned WebAssemblyFastISel::copyValue(unsigned Reg) {
668 Register ResultReg = createResultReg(MRI.getRegClass(Reg));
669 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::COPY),
670 ResultReg)
671 .addReg(Reg);
672 return ResultReg;
673}
674
675Register WebAssemblyFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
676 auto SI = FuncInfo.StaticAllocaMap.find(AI);
677
678 if (SI != FuncInfo.StaticAllocaMap.end()) {
679 Register ResultReg =
680 createResultReg(Subtarget->hasAddr64() ? &WebAssembly::I64RegClass
681 : &WebAssembly::I32RegClass);
682 unsigned Opc =
683 Subtarget->hasAddr64() ? WebAssembly::COPY_I64 : WebAssembly::COPY_I32;
684 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)
685 .addFrameIndex(SI->second);
686 return ResultReg;
687 }
688
689 return Register();
690}
691
692Register WebAssemblyFastISel::fastMaterializeConstant(const Constant *C) {
693 if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
694 if (TLI.isPositionIndependent())
695 return Register();
696 if (GV->isThreadLocal())
697 return Register();
698 Register ResultReg =
699 createResultReg(Subtarget->hasAddr64() ? &WebAssembly::I64RegClass
700 : &WebAssembly::I32RegClass);
701 unsigned Opc = Subtarget->hasAddr64() ? WebAssembly::CONST_I64
702 : WebAssembly::CONST_I32;
703 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)
704 .addGlobalAddress(GV);
705 return ResultReg;
706 }
707
708 // Let target-independent code handle it.
709 return Register();
710}
711
712bool WebAssemblyFastISel::fastLowerArguments() {
713 if (!FuncInfo.CanLowerReturn)
714 return false;
715
716 const Function *F = FuncInfo.Fn;
717 if (F->isVarArg())
718 return false;
719
720 if (FuncInfo.Fn->getCallingConv() == CallingConv::Swift)
721 return false;
722
723 unsigned I = 0;
724 for (auto const &Arg : F->args()) {
725 const AttributeList &Attrs = F->getAttributes();
726 if (Attrs.hasParamAttr(I, Attribute::ByVal) ||
727 Attrs.hasParamAttr(I, Attribute::SwiftSelf) ||
728 Attrs.hasParamAttr(I, Attribute::SwiftError) ||
729 Attrs.hasParamAttr(I, Attribute::InAlloca) ||
730 Attrs.hasParamAttr(I, Attribute::Nest))
731 return false;
732
733 Type *ArgTy = Arg.getType();
734 if (ArgTy->isStructTy() || ArgTy->isArrayTy())
735 return false;
736 if (!Subtarget->hasSIMD128() && ArgTy->isVectorTy())
737 return false;
738
739 unsigned Opc;
740 const TargetRegisterClass *RC;
741 switch (getSimpleType(ArgTy)) {
742 case MVT::i1:
743 case MVT::i8:
744 case MVT::i16:
745 case MVT::i32:
746 Opc = WebAssembly::ARGUMENT_i32;
747 RC = &WebAssembly::I32RegClass;
748 break;
749 case MVT::i64:
750 Opc = WebAssembly::ARGUMENT_i64;
751 RC = &WebAssembly::I64RegClass;
752 break;
753 case MVT::f32:
754 Opc = WebAssembly::ARGUMENT_f32;
755 RC = &WebAssembly::F32RegClass;
756 break;
757 case MVT::f64:
758 Opc = WebAssembly::ARGUMENT_f64;
759 RC = &WebAssembly::F64RegClass;
760 break;
761 case MVT::v16i8:
762 Opc = WebAssembly::ARGUMENT_v16i8;
763 RC = &WebAssembly::V128RegClass;
764 break;
765 case MVT::v8i16:
766 Opc = WebAssembly::ARGUMENT_v8i16;
767 RC = &WebAssembly::V128RegClass;
768 break;
769 case MVT::v4i32:
770 Opc = WebAssembly::ARGUMENT_v4i32;
771 RC = &WebAssembly::V128RegClass;
772 break;
773 case MVT::v2i64:
774 Opc = WebAssembly::ARGUMENT_v2i64;
775 RC = &WebAssembly::V128RegClass;
776 break;
777 case MVT::v4f32:
778 Opc = WebAssembly::ARGUMENT_v4f32;
779 RC = &WebAssembly::V128RegClass;
780 break;
781 case MVT::v2f64:
782 Opc = WebAssembly::ARGUMENT_v2f64;
783 RC = &WebAssembly::V128RegClass;
784 break;
785 case MVT::funcref:
786 Opc = WebAssembly::ARGUMENT_funcref;
787 RC = &WebAssembly::FUNCREFRegClass;
788 break;
789 case MVT::externref:
790 Opc = WebAssembly::ARGUMENT_externref;
791 RC = &WebAssembly::EXTERNREFRegClass;
792 break;
793 case MVT::exnref:
794 Opc = WebAssembly::ARGUMENT_exnref;
795 RC = &WebAssembly::EXNREFRegClass;
796 break;
797 default:
798 return false;
799 }
800 Register ResultReg = createResultReg(RC);
801 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)
802 .addImm(I);
803 updateValueMap(&Arg, ResultReg);
804
805 ++I;
806 }
807
808 MRI.addLiveIn(WebAssembly::ARGUMENTS);
809
810 auto *MFI = MF->getInfo<WebAssemblyFunctionInfo>();
811 for (auto const &Arg : F->args()) {
812 MVT::SimpleValueType ArgTy = getLegalType(getSimpleType(Arg.getType()));
813 if (ArgTy == MVT::INVALID_SIMPLE_VALUE_TYPE) {
814 MFI->clearParamsAndResults();
815 return false;
816 }
817 MFI->addParam(ArgTy);
818 }
819
820 if (!F->getReturnType()->isVoidTy()) {
822 getLegalType(getSimpleType(F->getReturnType()));
823 if (RetTy == MVT::INVALID_SIMPLE_VALUE_TYPE) {
824 MFI->clearParamsAndResults();
825 return false;
826 }
827 MFI->addResult(RetTy);
828 }
829
830 return true;
831}
832
833bool WebAssemblyFastISel::selectCall(const Instruction *I) {
834 const auto *Call = cast<CallInst>(I);
835
836 // FastISel does not support calls through funcref
838 WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_DEFAULT)
839 return false;
840
841 // TODO: Support tail calls in FastISel
842 if (Call->isMustTailCall() || Call->isInlineAsm() ||
844 return false;
845
847 if (Func && Func->isIntrinsic())
848 return false;
849
850 if (Call->getCallingConv() == CallingConv::Swift)
851 return false;
852
853 bool IsDirect = Func != nullptr;
854 if (!IsDirect && isa<ConstantExpr>(Call->getCalledOperand()))
855 return false;
856
857 FunctionType *FuncTy = Call->getFunctionType();
858 unsigned Opc = IsDirect ? WebAssembly::CALL : WebAssembly::CALL_INDIRECT;
859 bool IsVoid = FuncTy->getReturnType()->isVoidTy();
860 unsigned ResultReg;
861 if (!IsVoid) {
862 if (!Subtarget->hasSIMD128() && Call->getType()->isVectorTy())
863 return false;
864
865 MVT::SimpleValueType RetTy = getSimpleType(Call->getType());
866 switch (RetTy) {
867 case MVT::i1:
868 case MVT::i8:
869 case MVT::i16:
870 case MVT::i32:
871 ResultReg = createResultReg(&WebAssembly::I32RegClass);
872 break;
873 case MVT::i64:
874 ResultReg = createResultReg(&WebAssembly::I64RegClass);
875 break;
876 case MVT::f32:
877 ResultReg = createResultReg(&WebAssembly::F32RegClass);
878 break;
879 case MVT::f64:
880 ResultReg = createResultReg(&WebAssembly::F64RegClass);
881 break;
882 case MVT::v16i8:
883 ResultReg = createResultReg(&WebAssembly::V128RegClass);
884 break;
885 case MVT::v8i16:
886 ResultReg = createResultReg(&WebAssembly::V128RegClass);
887 break;
888 case MVT::v4i32:
889 ResultReg = createResultReg(&WebAssembly::V128RegClass);
890 break;
891 case MVT::v2i64:
892 ResultReg = createResultReg(&WebAssembly::V128RegClass);
893 break;
894 case MVT::v4f32:
895 ResultReg = createResultReg(&WebAssembly::V128RegClass);
896 break;
897 case MVT::v2f64:
898 ResultReg = createResultReg(&WebAssembly::V128RegClass);
899 break;
900 case MVT::funcref:
901 ResultReg = createResultReg(&WebAssembly::FUNCREFRegClass);
902 break;
903 case MVT::externref:
904 ResultReg = createResultReg(&WebAssembly::EXTERNREFRegClass);
905 break;
906 case MVT::exnref:
907 ResultReg = createResultReg(&WebAssembly::EXNREFRegClass);
908 break;
909 default:
910 return false;
911 }
912 }
913
914 SmallVector<unsigned, 8> Args;
915 for (unsigned I = 0, E = Call->arg_size(); I < E; ++I) {
917 MVT::SimpleValueType ArgTy = getSimpleType(V->getType());
919 return false;
920
921 const AttributeList &Attrs = Call->getAttributes();
922 if (Attrs.hasParamAttr(I, Attribute::ByVal) ||
923 Attrs.hasParamAttr(I, Attribute::SwiftSelf) ||
924 Attrs.hasParamAttr(I, Attribute::SwiftError) ||
925 Attrs.hasParamAttr(I, Attribute::InAlloca) ||
926 Attrs.hasParamAttr(I, Attribute::Nest))
927 return false;
928
929 unsigned Reg;
930
931 if (Call->paramHasAttr(I, Attribute::SExt))
932 Reg = getRegForSignedValue(V);
933 else if (Call->paramHasAttr(I, Attribute::ZExt))
934 Reg = getRegForUnsignedValue(V);
935 else
936 Reg = getRegForValue(V);
937
938 if (Reg == 0)
939 return false;
940
941 Args.push_back(Reg);
942 }
943
944 unsigned CalleeReg = 0;
945 if (!IsDirect) {
946 CalleeReg = getRegForValue(Call->getCalledOperand());
947 if (!CalleeReg)
948 return false;
949 }
950
951 auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc));
952
953 if (!IsVoid)
954 MIB.addReg(ResultReg, RegState::Define);
955
956 if (IsDirect) {
957 MIB.addGlobalAddress(Func);
958 } else {
959 // Placeholder for the type index.
960 MIB.addImm(0);
961 // The table into which this call_indirect indexes.
963 MF->getContext(), Subtarget);
964 if (Subtarget->hasCallIndirectOverlong()) {
965 MIB.addSym(Table);
966 } else {
967 // Otherwise for the MVP there is at most one table whose number is 0, but
968 // we can't write a table symbol or issue relocations. Instead we just
969 // ensure the table is live.
970 Table->setNoStrip();
971 MIB.addImm(0);
972 }
973 }
974
975 for (unsigned ArgReg : Args)
976 MIB.addReg(ArgReg);
977
978 if (!IsDirect)
979 MIB.addReg(CalleeReg);
980
981 if (!IsVoid)
982 updateValueMap(Call, ResultReg);
983
985 return true;
986}
987
988bool WebAssemblyFastISel::selectSelect(const Instruction *I) {
989 const auto *Select = cast<SelectInst>(I);
990
991 bool Not;
992 unsigned CondReg =
993 getRegForI1Value(Select->getCondition(), I->getParent(), Not);
994 if (CondReg == 0)
995 return false;
996
997 Register TrueReg = getRegForValue(Select->getTrueValue());
998 if (TrueReg == 0)
999 return false;
1000
1001 Register FalseReg = getRegForValue(Select->getFalseValue());
1002 if (FalseReg == 0)
1003 return false;
1004
1005 if (Not)
1006 std::swap(TrueReg, FalseReg);
1007
1008 unsigned Opc;
1009 const TargetRegisterClass *RC;
1010 switch (getSimpleType(Select->getType())) {
1011 case MVT::i1:
1012 case MVT::i8:
1013 case MVT::i16:
1014 case MVT::i32:
1015 Opc = WebAssembly::SELECT_I32;
1016 RC = &WebAssembly::I32RegClass;
1017 break;
1018 case MVT::i64:
1019 Opc = WebAssembly::SELECT_I64;
1020 RC = &WebAssembly::I64RegClass;
1021 break;
1022 case MVT::f32:
1023 Opc = WebAssembly::SELECT_F32;
1024 RC = &WebAssembly::F32RegClass;
1025 break;
1026 case MVT::f64:
1027 Opc = WebAssembly::SELECT_F64;
1028 RC = &WebAssembly::F64RegClass;
1029 break;
1030 case MVT::funcref:
1031 Opc = WebAssembly::SELECT_FUNCREF;
1032 RC = &WebAssembly::FUNCREFRegClass;
1033 break;
1034 case MVT::externref:
1035 Opc = WebAssembly::SELECT_EXTERNREF;
1036 RC = &WebAssembly::EXTERNREFRegClass;
1037 break;
1038 case MVT::exnref:
1039 Opc = WebAssembly::SELECT_EXNREF;
1040 RC = &WebAssembly::EXNREFRegClass;
1041 break;
1042 default:
1043 return false;
1044 }
1045
1046 Register ResultReg = createResultReg(RC);
1047 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)
1048 .addReg(TrueReg)
1049 .addReg(FalseReg)
1050 .addReg(CondReg);
1051
1052 updateValueMap(Select, ResultReg);
1053 return true;
1054}
1055
1056bool WebAssemblyFastISel::selectTrunc(const Instruction *I) {
1057 const auto *Trunc = cast<TruncInst>(I);
1058
1059 const Value *Op = Trunc->getOperand(0);
1060 MVT::SimpleValueType From = getSimpleType(Op->getType());
1061 MVT::SimpleValueType To = getLegalType(getSimpleType(Trunc->getType()));
1062 Register In = getRegForValue(Op);
1063 if (In == 0)
1064 return false;
1065
1066 auto Truncate = [&](Register Reg) -> unsigned {
1067 if (From == MVT::i64) {
1068 if (To == MVT::i64)
1069 return copyValue(Reg);
1070
1071 if (To == MVT::i1 || To == MVT::i8 || To == MVT::i16 || To == MVT::i32) {
1072 Register Result = createResultReg(&WebAssembly::I32RegClass);
1073 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1074 TII.get(WebAssembly::I32_WRAP_I64), Result)
1075 .addReg(Reg);
1076 return Result;
1077 }
1078 }
1079
1080 if (From == MVT::i32)
1081 return copyValue(Reg);
1082
1083 return 0;
1084 };
1085
1086 unsigned Reg = Truncate(In);
1087 if (Reg == 0)
1088 return false;
1089
1090 updateValueMap(Trunc, Reg);
1091 return true;
1092}
1093
1094bool WebAssemblyFastISel::selectZExt(const Instruction *I) {
1095 const auto *ZExt = cast<ZExtInst>(I);
1096
1097 const Value *Op = ZExt->getOperand(0);
1098 MVT::SimpleValueType From = getSimpleType(Op->getType());
1099 MVT::SimpleValueType To = getLegalType(getSimpleType(ZExt->getType()));
1100 Register In = getRegForValue(Op);
1101 if (In == 0)
1102 return false;
1103 unsigned Reg = zeroExtend(In, Op, From, To);
1104 if (Reg == 0)
1105 return false;
1106
1107 updateValueMap(ZExt, Reg);
1108 return true;
1109}
1110
1111bool WebAssemblyFastISel::selectSExt(const Instruction *I) {
1112 const auto *SExt = cast<SExtInst>(I);
1113
1114 const Value *Op = SExt->getOperand(0);
1115 MVT::SimpleValueType From = getSimpleType(Op->getType());
1116 MVT::SimpleValueType To = getLegalType(getSimpleType(SExt->getType()));
1117 Register In = getRegForValue(Op);
1118 if (In == 0)
1119 return false;
1120 unsigned Reg = signExtend(In, Op, From, To);
1121 if (Reg == 0)
1122 return false;
1123
1124 updateValueMap(SExt, Reg);
1125 return true;
1126}
1127
1128bool WebAssemblyFastISel::selectICmp(const Instruction *I) {
1129 const auto *ICmp = cast<ICmpInst>(I);
1130
1131 bool I32 = getSimpleType(ICmp->getOperand(0)->getType()) != MVT::i64;
1132 unsigned Opc;
1133 bool IsSigned = false;
1134 switch (ICmp->getPredicate()) {
1135 case ICmpInst::ICMP_EQ:
1136 Opc = I32 ? WebAssembly::EQ_I32 : WebAssembly::EQ_I64;
1137 break;
1138 case ICmpInst::ICMP_NE:
1139 Opc = I32 ? WebAssembly::NE_I32 : WebAssembly::NE_I64;
1140 break;
1141 case ICmpInst::ICMP_UGT:
1142 Opc = I32 ? WebAssembly::GT_U_I32 : WebAssembly::GT_U_I64;
1143 break;
1144 case ICmpInst::ICMP_UGE:
1145 Opc = I32 ? WebAssembly::GE_U_I32 : WebAssembly::GE_U_I64;
1146 break;
1147 case ICmpInst::ICMP_ULT:
1148 Opc = I32 ? WebAssembly::LT_U_I32 : WebAssembly::LT_U_I64;
1149 break;
1150 case ICmpInst::ICMP_ULE:
1151 Opc = I32 ? WebAssembly::LE_U_I32 : WebAssembly::LE_U_I64;
1152 break;
1153 case ICmpInst::ICMP_SGT:
1154 Opc = I32 ? WebAssembly::GT_S_I32 : WebAssembly::GT_S_I64;
1155 IsSigned = true;
1156 break;
1157 case ICmpInst::ICMP_SGE:
1158 Opc = I32 ? WebAssembly::GE_S_I32 : WebAssembly::GE_S_I64;
1159 IsSigned = true;
1160 break;
1161 case ICmpInst::ICMP_SLT:
1162 Opc = I32 ? WebAssembly::LT_S_I32 : WebAssembly::LT_S_I64;
1163 IsSigned = true;
1164 break;
1165 case ICmpInst::ICMP_SLE:
1166 Opc = I32 ? WebAssembly::LE_S_I32 : WebAssembly::LE_S_I64;
1167 IsSigned = true;
1168 break;
1169 default:
1170 return false;
1171 }
1172
1173 unsigned LHS = getRegForPromotedValue(ICmp->getOperand(0), IsSigned);
1174 if (LHS == 0)
1175 return false;
1176
1177 unsigned RHS = getRegForPromotedValue(ICmp->getOperand(1), IsSigned);
1178 if (RHS == 0)
1179 return false;
1180
1181 Register ResultReg = createResultReg(&WebAssembly::I32RegClass);
1182 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)
1183 .addReg(LHS)
1184 .addReg(RHS);
1185 updateValueMap(ICmp, ResultReg);
1186 return true;
1187}
1188
1189bool WebAssemblyFastISel::selectFCmp(const Instruction *I) {
1190 const auto *FCmp = cast<FCmpInst>(I);
1191
1192 Register LHS = getRegForValue(FCmp->getOperand(0));
1193 if (LHS == 0)
1194 return false;
1195
1196 Register RHS = getRegForValue(FCmp->getOperand(1));
1197 if (RHS == 0)
1198 return false;
1199
1200 bool F32 = getSimpleType(FCmp->getOperand(0)->getType()) != MVT::f64;
1201 unsigned Opc;
1202 bool Not = false;
1203 switch (FCmp->getPredicate()) {
1204 case FCmpInst::FCMP_OEQ:
1205 Opc = F32 ? WebAssembly::EQ_F32 : WebAssembly::EQ_F64;
1206 break;
1207 case FCmpInst::FCMP_UNE:
1208 Opc = F32 ? WebAssembly::NE_F32 : WebAssembly::NE_F64;
1209 break;
1210 case FCmpInst::FCMP_OGT:
1211 Opc = F32 ? WebAssembly::GT_F32 : WebAssembly::GT_F64;
1212 break;
1213 case FCmpInst::FCMP_OGE:
1214 Opc = F32 ? WebAssembly::GE_F32 : WebAssembly::GE_F64;
1215 break;
1216 case FCmpInst::FCMP_OLT:
1217 Opc = F32 ? WebAssembly::LT_F32 : WebAssembly::LT_F64;
1218 break;
1219 case FCmpInst::FCMP_OLE:
1220 Opc = F32 ? WebAssembly::LE_F32 : WebAssembly::LE_F64;
1221 break;
1222 case FCmpInst::FCMP_UGT:
1223 Opc = F32 ? WebAssembly::LE_F32 : WebAssembly::LE_F64;
1224 Not = true;
1225 break;
1226 case FCmpInst::FCMP_UGE:
1227 Opc = F32 ? WebAssembly::LT_F32 : WebAssembly::LT_F64;
1228 Not = true;
1229 break;
1230 case FCmpInst::FCMP_ULT:
1231 Opc = F32 ? WebAssembly::GE_F32 : WebAssembly::GE_F64;
1232 Not = true;
1233 break;
1234 case FCmpInst::FCMP_ULE:
1235 Opc = F32 ? WebAssembly::GT_F32 : WebAssembly::GT_F64;
1236 Not = true;
1237 break;
1238 default:
1239 return false;
1240 }
1241
1242 Register ResultReg = createResultReg(&WebAssembly::I32RegClass);
1243 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)
1244 .addReg(LHS)
1245 .addReg(RHS);
1246
1247 if (Not)
1248 ResultReg = notValue(ResultReg);
1249
1250 updateValueMap(FCmp, ResultReg);
1251 return true;
1252}
1253
1254bool WebAssemblyFastISel::selectBitCast(const Instruction *I) {
1255 // Target-independent code can handle this, except it doesn't set the dead
1256 // flag on the ARGUMENTS clobber, so we have to do that manually in order
1257 // to satisfy code that expects this of isBitcast() instructions.
1258 EVT VT = TLI.getValueType(DL, I->getOperand(0)->getType());
1259 EVT RetVT = TLI.getValueType(DL, I->getType());
1260 if (!VT.isSimple() || !RetVT.isSimple())
1261 return false;
1262
1263 Register In = getRegForValue(I->getOperand(0));
1264 if (In == 0)
1265 return false;
1266
1267 if (VT == RetVT) {
1268 // No-op bitcast.
1269 updateValueMap(I, In);
1270 return true;
1271 }
1272
1273 Register Reg =
1274 fastEmit_ISD_BITCAST_r(VT.getSimpleVT(), RetVT.getSimpleVT(), In);
1275 if (!Reg)
1276 return false;
1277 MachineBasicBlock::iterator Iter = FuncInfo.InsertPt;
1278 --Iter;
1279 assert(Iter->isBitcast());
1280 Iter->setPhysRegsDeadExcept(ArrayRef<Register>(), TRI);
1281 updateValueMap(I, Reg);
1282 return true;
1283}
1284
1285static unsigned getSExtLoadOpcode(unsigned LoadSize, bool I64Result, bool A64) {
1286 if (I64Result) {
1287 switch (LoadSize) {
1288 default:
1289 return WebAssembly::INSTRUCTION_LIST_END;
1290 case 8:
1291 return A64 ? WebAssembly::LOAD8_S_I64_A64 : WebAssembly::LOAD8_S_I64_A32;
1292 case 16:
1293 return A64 ? WebAssembly::LOAD16_S_I64_A64
1294 : WebAssembly::LOAD16_S_I64_A32;
1295 case 32:
1296 return A64 ? WebAssembly::LOAD32_S_I64_A64
1297 : WebAssembly::LOAD32_S_I64_A32;
1298 }
1299 }
1300
1301 switch (LoadSize) {
1302 default:
1303 return WebAssembly::INSTRUCTION_LIST_END;
1304 case 8:
1305 return A64 ? WebAssembly::LOAD8_S_I32_A64 : WebAssembly::LOAD8_S_I32_A32;
1306 case 16:
1307 return A64 ? WebAssembly::LOAD16_S_I32_A64 : WebAssembly::LOAD16_S_I32_A32;
1308 }
1309}
1310
1311static unsigned getZExtLoadOpcode(unsigned LoadSize, bool I64Result, bool A64) {
1312 if (I64Result) {
1313 switch (LoadSize) {
1314 default:
1315 return WebAssembly::INSTRUCTION_LIST_END;
1316 case 8:
1317 return A64 ? WebAssembly::LOAD8_U_I64_A64 : WebAssembly::LOAD8_U_I64_A32;
1318 case 16:
1319 return A64 ? WebAssembly::LOAD16_U_I64_A64
1320 : WebAssembly::LOAD16_U_I64_A32;
1321 case 32:
1322 return A64 ? WebAssembly::LOAD32_U_I64_A64
1323 : WebAssembly::LOAD32_U_I64_A32;
1324 }
1325 }
1326
1327 switch (LoadSize) {
1328 default:
1329 return WebAssembly::INSTRUCTION_LIST_END;
1330 case 8:
1331 return A64 ? WebAssembly::LOAD8_U_I32_A64 : WebAssembly::LOAD8_U_I32_A32;
1332 case 16:
1333 return A64 ? WebAssembly::LOAD16_U_I32_A64 : WebAssembly::LOAD16_U_I32_A32;
1334 }
1335}
1336
1337static bool isFoldableSExtOpcode(unsigned Opc) {
1338 switch (Opc) {
1339 default:
1340 return false;
1341 case WebAssembly::I32_EXTEND8_S_I32:
1342 case WebAssembly::I32_EXTEND16_S_I32:
1343 case WebAssembly::I64_EXTEND8_S_I64:
1344 case WebAssembly::I64_EXTEND16_S_I64:
1345 case WebAssembly::I64_EXTEND32_S_I64:
1346 case WebAssembly::I64_EXTEND_S_I32:
1347 return true;
1348 }
1349}
1350
1351static bool isI64SExtResult(unsigned Opc) {
1352 switch (Opc) {
1353 default:
1354 llvm_unreachable("unexpected opcode");
1355 case WebAssembly::I32_EXTEND8_S_I32:
1356 case WebAssembly::I32_EXTEND16_S_I32:
1357 return false;
1358 case WebAssembly::I64_EXTEND8_S_I64:
1359 case WebAssembly::I64_EXTEND16_S_I64:
1360 case WebAssembly::I64_EXTEND32_S_I64:
1361 case WebAssembly::I64_EXTEND_S_I32:
1362 return true;
1363 }
1364}
1365
1367 const LoadInst *LI, bool A64) {
1368 unsigned Opc = MI->getOpcode();
1369
1371 unsigned LoadSize = LI->getType()->getPrimitiveSizeInBits();
1372 return getSExtLoadOpcode(LoadSize, isI64SExtResult(Opc), A64);
1373 }
1374
1375 return WebAssembly::INSTRUCTION_LIST_END;
1376}
1377
1378static unsigned getFoldedI64LoadOpcode(Register DestReg, const LoadInst *LI,
1379 MachineRegisterInfo &MRI, bool A64,
1380 MachineInstr *&OuterUserMI,
1381 unsigned NarrowOpc) {
1382 if (!MRI.hasOneNonDBGUse(DestReg))
1383 return NarrowOpc;
1384
1385 MachineInstr *UserMI = &*MRI.use_instr_nodbg_begin(DestReg);
1386 unsigned LoadSize = LI->getType()->getPrimitiveSizeInBits();
1387 switch (UserMI->getOpcode()) {
1388 case WebAssembly::I64_EXTEND_U_I32:
1389 OuterUserMI = UserMI;
1390 return getZExtLoadOpcode(LoadSize, /*I64Result=*/true, A64);
1391 case WebAssembly::I64_EXTEND_S_I32:
1392 OuterUserMI = UserMI;
1393 return getSExtLoadOpcode(LoadSize, /*I64Result=*/true, A64);
1394 default:
1395 return NarrowOpc;
1396 }
1397}
1398
1399/// Matches a sign-extension pattern (shl + shr_s) to fold it into a signed
1400/// load. FastISel assumes that 'sext' from i8 or i16 will first be lowered to a
1401/// 32-bit zero-extending load (i32.load8_u / i32.load16_u) followed by 32-bit
1402/// shifts, even when extending to i64. Therefore, this function only matches
1403/// 32-bit shifts (SHL_I32 / SHR_S_I32) and specifically checks if both shift
1404/// amounts are identical, compile-time constants that match the exact extension
1405/// size (32 - LoadBitWidth).
1406static unsigned matchFoldableShift(MachineInstr *MI, const LoadInst *LI,
1407 MachineRegisterInfo &MRI, bool A64,
1408 MachineInstr *&UserMI,
1409 MachineInstr *&OuterUserMI) {
1410 unsigned Opc = MI->getOpcode();
1411 unsigned NewOpc = WebAssembly::INSTRUCTION_LIST_END;
1412 if (Opc != WebAssembly::SHL_I32)
1413 return NewOpc;
1414
1415 Register DestReg = MI->getOperand(0).getReg();
1416 if (!MRI.hasOneNonDBGUse(DestReg))
1417 return NewOpc;
1418
1419 UserMI = &*MRI.use_instr_nodbg_begin(DestReg);
1420 unsigned UserOpc = UserMI->getOpcode();
1421 if (UserOpc != WebAssembly::SHR_S_I32)
1422 return NewOpc;
1423
1424 Type *LoadTy = LI->getType();
1425 if (!LoadTy->isIntegerTy(8) && !LoadTy->isIntegerTy(16))
1426 return NewOpc;
1427
1428 int64_t ExpectedShiftAmt = 32 - LoadTy->getIntegerBitWidth();
1429 Register ShlAmtReg = MI->getOperand(2).getReg();
1430 Register ShrAmtReg = UserMI->getOperand(2).getReg();
1431 MachineInstr *ShlAmtDef = MRI.getUniqueVRegDef(ShlAmtReg);
1432 MachineInstr *ShrAmtDef = MRI.getUniqueVRegDef(ShrAmtReg);
1433 auto IsExpectedConst = [ExpectedShiftAmt](MachineInstr *MI) {
1434 return MI && MI->getOpcode() == WebAssembly::CONST_I32 &&
1435 MI->getOperand(1).getImm() == ExpectedShiftAmt;
1436 };
1437 if (!IsExpectedConst(ShlAmtDef) || !IsExpectedConst(ShrAmtDef))
1438 return NewOpc;
1439
1440 unsigned LoadSize = LoadTy->getIntegerBitWidth();
1441 unsigned NarrowOpc = getSExtLoadOpcode(LoadSize, /*I64Result=*/false, A64);
1442 if (NarrowOpc == WebAssembly::INSTRUCTION_LIST_END)
1443 return WebAssembly::INSTRUCTION_LIST_END;
1444
1445 return getFoldedI64LoadOpcode(UserMI->getOperand(0).getReg(), LI, MRI, A64,
1446 OuterUserMI, NarrowOpc);
1447}
1448
1450 const LoadInst *LI,
1452 bool A64,
1453 MachineInstr *&UserMI) {
1454 if (MI->getOpcode() != WebAssembly::I64_EXTEND_U_I32)
1455 return WebAssembly::INSTRUCTION_LIST_END;
1456
1457 unsigned LoadSize = LI->getType()->getPrimitiveSizeInBits();
1458 Register DestReg = MI->getOperand(0).getReg();
1459 if (!MRI.hasOneNonDBGUse(DestReg))
1460 return WebAssembly::INSTRUCTION_LIST_END;
1461
1462 UserMI = &*MRI.use_instr_nodbg_begin(DestReg);
1463 switch (UserMI->getOpcode()) {
1464 default:
1465 return WebAssembly::INSTRUCTION_LIST_END;
1466 case WebAssembly::I64_EXTEND8_S_I64:
1467 if (LoadSize != 8)
1468 return WebAssembly::INSTRUCTION_LIST_END;
1469 return getSExtLoadOpcode(LoadSize, true, A64);
1470 case WebAssembly::I64_EXTEND16_S_I64:
1471 if (LoadSize != 16)
1472 return WebAssembly::INSTRUCTION_LIST_END;
1473 return getSExtLoadOpcode(LoadSize, true, A64);
1474 }
1475}
1476
1478 MachineRegisterInfo &MRI, bool A64,
1479 MachineInstr *&OuterUserMI) {
1480 if (MI->getOpcode() != WebAssembly::COPY)
1481 return WebAssembly::INSTRUCTION_LIST_END;
1482
1483 unsigned LoadSize = LI->getType()->getPrimitiveSizeInBits();
1484 if (LoadSize != 32)
1485 return WebAssembly::INSTRUCTION_LIST_END;
1486
1487 Register CopyDst = MI->getOperand(0).getReg();
1488 if (!MRI.hasOneNonDBGUse(CopyDst))
1489 return WebAssembly::INSTRUCTION_LIST_END;
1490
1491 OuterUserMI = &*MRI.use_instr_nodbg_begin(CopyDst);
1492 switch (OuterUserMI->getOpcode()) {
1493 default:
1494 return WebAssembly::INSTRUCTION_LIST_END;
1495 case WebAssembly::I64_EXTEND_U_I32:
1496 return getZExtLoadOpcode(LoadSize, true, A64);
1497 case WebAssembly::I64_EXTEND_S_I32:
1498 return getSExtLoadOpcode(LoadSize, true, A64);
1499 }
1500}
1501
1502static unsigned matchFoldableAnd(MachineInstr *MI, const LoadInst *LI,
1503 MachineRegisterInfo &MRI, bool A64,
1504 MachineInstr *&OuterUserMI) {
1505 if (MI->getOpcode() != WebAssembly::AND_I32 &&
1506 MI->getOpcode() != WebAssembly::AND_I64)
1507 return WebAssembly::INSTRUCTION_LIST_END;
1508
1509 uint64_t Mask = 0;
1510 bool IsConstant = false;
1511 for (unsigned I = 1; I <= 2; ++I) {
1512 Register Reg = MI->getOperand(I).getReg();
1514 if (DefMI && (DefMI->getOpcode() == WebAssembly::CONST_I32 ||
1515 DefMI->getOpcode() == WebAssembly::CONST_I64)) {
1516 Mask = DefMI->getOperand(1).getImm();
1517 IsConstant = true;
1518 break;
1519 }
1520 }
1521
1522 if (!IsConstant)
1523 return WebAssembly::INSTRUCTION_LIST_END;
1524
1525 unsigned LoadSize = LI->getType()->getPrimitiveSizeInBits();
1526 if (Mask != llvm::maskTrailingOnes<uint64_t>(LoadSize))
1527 return WebAssembly::INSTRUCTION_LIST_END;
1528
1529 if (MI->getOpcode() == WebAssembly::AND_I64)
1530 return getZExtLoadOpcode(LoadSize, /*I64Result=*/true, A64);
1531
1532 unsigned NarrowOpc = getZExtLoadOpcode(LoadSize, /*I64Result=*/false, A64);
1533 if (NarrowOpc == WebAssembly::INSTRUCTION_LIST_END)
1534 return WebAssembly::INSTRUCTION_LIST_END;
1535
1536 return getFoldedI64LoadOpcode(MI->getOperand(0).getReg(), LI, MRI, A64,
1537 OuterUserMI, NarrowOpc);
1538}
1539
1540bool WebAssemblyFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
1541 const LoadInst *LI) {
1542 bool A64 = Subtarget->hasAddr64();
1543 MachineRegisterInfo &MRI = FuncInfo.MF->getRegInfo();
1544 Register ResultReg;
1545 MachineInstr *UserMI = nullptr;
1546 MachineInstr *OuterUserMI = nullptr;
1547 unsigned NewOpc = WebAssembly::INSTRUCTION_LIST_END;
1548 if ((NewOpc = matchFoldableSExtFromPromotedI32(MI, LI, MRI, A64, UserMI)) !=
1549 WebAssembly::INSTRUCTION_LIST_END) {
1550 ResultReg = UserMI->getOperand(0).getReg();
1551 } else if ((NewOpc =
1552 matchFoldableCopyToI64Ext(MI, LI, MRI, A64, OuterUserMI)) !=
1553 WebAssembly::INSTRUCTION_LIST_END) {
1554 ResultReg = OuterUserMI->getOperand(0).getReg();
1555 } else if ((NewOpc = matchFoldableAnd(MI, LI, MRI, A64, OuterUserMI)) !=
1556 WebAssembly::INSTRUCTION_LIST_END) {
1557 ResultReg = OuterUserMI ? OuterUserMI->getOperand(0).getReg()
1558 : MI->getOperand(0).getReg();
1559 } else if ((NewOpc = getFoldedLoadOpcode(MI, MRI, LI, A64)) !=
1560 WebAssembly::INSTRUCTION_LIST_END) {
1561 ResultReg = MI->getOperand(0).getReg();
1562 } else if ((NewOpc =
1563 matchFoldableShift(MI, LI, MRI, A64, UserMI, OuterUserMI)) !=
1564 WebAssembly::INSTRUCTION_LIST_END) {
1565 ResultReg = OuterUserMI ? OuterUserMI->getOperand(0).getReg()
1566 : UserMI->getOperand(0).getReg();
1567 } else {
1568 return false;
1569 }
1570
1571 if (!emitLoad(ResultReg, NewOpc, LI))
1572 return false;
1573
1574 if (OuterUserMI) {
1575 MachineBasicBlock::iterator OuterIter(OuterUserMI);
1576 removeDeadCode(OuterIter, std::next(OuterIter));
1577 }
1578
1579 if (UserMI) {
1580 MachineBasicBlock::iterator UserIter(UserMI);
1581 removeDeadCode(UserIter, std::next(UserIter));
1582 }
1583
1585 removeDeadCode(Iter, std::next(Iter));
1586 return true;
1587}
1588
1589bool WebAssemblyFastISel::selectLoad(const Instruction *I) {
1590 const auto *Load = cast<LoadInst>(I);
1591 if (Load->isAtomic())
1592 return false;
1593 if (!WebAssembly::isDefaultAddressSpace(Load->getPointerAddressSpace()))
1594 return false;
1595 if (!Subtarget->hasSIMD128() && Load->getType()->isVectorTy())
1596 return false;
1597
1598 // TODO: Fold a following sign-/zero-extend into the load instruction.
1599
1600 unsigned Opc;
1601 const TargetRegisterClass *RC;
1602 bool A64 = Subtarget->hasAddr64();
1603 switch (getSimpleType(Load->getType())) {
1604 case MVT::i1:
1605 case MVT::i8:
1606 Opc = A64 ? WebAssembly::LOAD8_U_I32_A64 : WebAssembly::LOAD8_U_I32_A32;
1607 RC = &WebAssembly::I32RegClass;
1608 break;
1609 case MVT::i16:
1610 Opc = A64 ? WebAssembly::LOAD16_U_I32_A64 : WebAssembly::LOAD16_U_I32_A32;
1611 RC = &WebAssembly::I32RegClass;
1612 break;
1613 case MVT::i32:
1614 Opc = A64 ? WebAssembly::LOAD_I32_A64 : WebAssembly::LOAD_I32_A32;
1615 RC = &WebAssembly::I32RegClass;
1616 break;
1617 case MVT::i64:
1618 Opc = A64 ? WebAssembly::LOAD_I64_A64 : WebAssembly::LOAD_I64_A32;
1619 RC = &WebAssembly::I64RegClass;
1620 break;
1621 case MVT::f32:
1622 Opc = A64 ? WebAssembly::LOAD_F32_A64 : WebAssembly::LOAD_F32_A32;
1623 RC = &WebAssembly::F32RegClass;
1624 break;
1625 case MVT::f64:
1626 Opc = A64 ? WebAssembly::LOAD_F64_A64 : WebAssembly::LOAD_F64_A32;
1627 RC = &WebAssembly::F64RegClass;
1628 break;
1629 default:
1630 return false;
1631 }
1632
1633 Register ResultReg = createResultReg(RC);
1634 if (!emitLoad(ResultReg, Opc, Load))
1635 return false;
1636
1637 updateValueMap(Load, ResultReg);
1638 return true;
1639}
1640
1641bool WebAssemblyFastISel::selectStore(const Instruction *I) {
1642 const auto *Store = cast<StoreInst>(I);
1643 if (Store->isAtomic())
1644 return false;
1645 if (!WebAssembly::isDefaultAddressSpace(Store->getPointerAddressSpace()))
1646 return false;
1647 if (!Subtarget->hasSIMD128() &&
1648 Store->getValueOperand()->getType()->isVectorTy())
1649 return false;
1650
1651 Address Addr;
1652 if (!computeAddress(Store->getPointerOperand(), Addr))
1653 return false;
1654
1655 unsigned Opc;
1656 bool VTIsi1 = false;
1657 bool A64 = Subtarget->hasAddr64();
1658 switch (getSimpleType(Store->getValueOperand()->getType())) {
1659 case MVT::i1:
1660 VTIsi1 = true;
1661 [[fallthrough]];
1662 case MVT::i8:
1663 Opc = A64 ? WebAssembly::STORE8_I32_A64 : WebAssembly::STORE8_I32_A32;
1664 break;
1665 case MVT::i16:
1666 Opc = A64 ? WebAssembly::STORE16_I32_A64 : WebAssembly::STORE16_I32_A32;
1667 break;
1668 case MVT::i32:
1669 Opc = A64 ? WebAssembly::STORE_I32_A64 : WebAssembly::STORE_I32_A32;
1670 break;
1671 case MVT::i64:
1672 Opc = A64 ? WebAssembly::STORE_I64_A64 : WebAssembly::STORE_I64_A32;
1673 break;
1674 case MVT::f32:
1675 Opc = A64 ? WebAssembly::STORE_F32_A64 : WebAssembly::STORE_F32_A32;
1676 break;
1677 case MVT::f64:
1678 Opc = A64 ? WebAssembly::STORE_F64_A64 : WebAssembly::STORE_F64_A32;
1679 break;
1680 default:
1681 return false;
1682 }
1683
1684 materializeLoadStoreOperands(Addr);
1685
1686 Register ValueReg = getRegForValue(Store->getValueOperand());
1687 if (ValueReg == 0)
1688 return false;
1689 if (VTIsi1)
1690 ValueReg = maskI1Value(ValueReg, Store->getValueOperand());
1691
1692 auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc));
1693
1694 addLoadStoreOperands(Addr, MIB, createMachineMemOperandFor(Store));
1695
1696 MIB.addReg(ValueReg);
1697 return true;
1698}
1699
1700bool WebAssemblyFastISel::selectCondBr(const Instruction *I) {
1701 const auto *Br = cast<CondBrInst>(I);
1702
1703 MachineBasicBlock *TBB = FuncInfo.getMBB(Br->getSuccessor(0));
1704 MachineBasicBlock *FBB = FuncInfo.getMBB(Br->getSuccessor(1));
1705
1706 bool Not;
1707 unsigned CondReg = getRegForI1Value(Br->getCondition(), Br->getParent(), Not);
1708 if (CondReg == 0)
1709 return false;
1710
1711 unsigned Opc = WebAssembly::BR_IF;
1712 if (Not)
1713 Opc = WebAssembly::BR_UNLESS;
1714
1715 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc))
1716 .addMBB(TBB)
1717 .addReg(CondReg);
1718
1719 finishCondBranch(Br->getParent(), TBB, FBB);
1720 return true;
1721}
1722
1723bool WebAssemblyFastISel::selectRet(const Instruction *I) {
1724 if (!FuncInfo.CanLowerReturn)
1725 return false;
1726
1727 const auto *Ret = cast<ReturnInst>(I);
1728
1729 if (Ret->getNumOperands() == 0) {
1730 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1731 TII.get(WebAssembly::RETURN));
1732 return true;
1733 }
1734
1735 // TODO: support multiple return in FastISel
1736 if (Ret->getNumOperands() > 1)
1737 return false;
1738
1739 Value *RV = Ret->getOperand(0);
1740 if (!Subtarget->hasSIMD128() && RV->getType()->isVectorTy())
1741 return false;
1742
1743 switch (getSimpleType(RV->getType())) {
1744 case MVT::i1:
1745 case MVT::i8:
1746 case MVT::i16:
1747 case MVT::i32:
1748 case MVT::i64:
1749 case MVT::f32:
1750 case MVT::f64:
1751 case MVT::v16i8:
1752 case MVT::v8i16:
1753 case MVT::v4i32:
1754 case MVT::v2i64:
1755 case MVT::v4f32:
1756 case MVT::v2f64:
1757 case MVT::funcref:
1758 case MVT::externref:
1759 case MVT::exnref:
1760 break;
1761 default:
1762 return false;
1763 }
1764
1765 unsigned Reg;
1766 if (FuncInfo.Fn->getAttributes().hasRetAttr(Attribute::SExt))
1767 Reg = getRegForSignedValue(RV);
1768 else if (FuncInfo.Fn->getAttributes().hasRetAttr(Attribute::ZExt))
1769 Reg = getRegForUnsignedValue(RV);
1770 else
1771 Reg = getRegForValue(RV);
1772
1773 if (Reg == 0)
1774 return false;
1775
1776 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(WebAssembly::RETURN))
1777 .addReg(Reg);
1778 return true;
1779}
1780
1781bool WebAssemblyFastISel::selectUnreachable(const Instruction *I) {
1782 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1783 TII.get(WebAssembly::UNREACHABLE));
1784 return true;
1785}
1786
1787bool WebAssemblyFastISel::fastSelectInstruction(const Instruction *I) {
1788 switch (I->getOpcode()) {
1789 case Instruction::Call:
1790 if (selectCall(I))
1791 return true;
1792 break;
1793 case Instruction::Select:
1794 return selectSelect(I);
1795 case Instruction::Trunc:
1796 return selectTrunc(I);
1797 case Instruction::ZExt:
1798 return selectZExt(I);
1799 case Instruction::SExt:
1800 return selectSExt(I);
1801 case Instruction::ICmp:
1802 return selectICmp(I);
1803 case Instruction::FCmp:
1804 return selectFCmp(I);
1805 case Instruction::BitCast:
1806 return selectBitCast(I);
1807 case Instruction::Load:
1808 return selectLoad(I);
1809 case Instruction::Store:
1810 return selectStore(I);
1811 case Instruction::CondBr:
1812 return selectCondBr(I);
1813 case Instruction::Ret:
1814 return selectRet(I);
1815 case Instruction::Unreachable:
1816 return selectUnreachable(I);
1817 default:
1818 break;
1819 }
1820
1821 // Fall back to target-independent instruction selection.
1822 return selectOperator(I, I->getOpcode());
1823}
1824
1825FastISel *
1827 const TargetLibraryInfo *LibInfo,
1828 const LibcallLoweringInfo *LibcallLowering) {
1829 return new WebAssemblyFastISel(FuncInfo, LibInfo, LibcallLowering);
1830}
MachineInstrBuilder MachineInstrBuilder & DefMI
static void emitLoad(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos, const TargetInstrInfo &TII, unsigned Reg1, unsigned Reg2, int Offset, bool IsPostDec)
Emit a load-pair instruction for frame-destroy.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
constexpr LLT F32
AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file defines the FastISel class.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define G(x, y, z)
Definition MD5.cpp:55
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
static bool isFoldableSExtOpcode(unsigned Opc)
static unsigned getSExtLoadOpcode(unsigned LoadSize, bool I64Result, bool A64)
static bool isI64SExtResult(unsigned Opc)
static unsigned matchFoldableCopyToI64Ext(MachineInstr *MI, const LoadInst *LI, MachineRegisterInfo &MRI, bool A64, MachineInstr *&OuterUserMI)
static unsigned matchFoldableSExtFromPromotedI32(MachineInstr *MI, const LoadInst *LI, MachineRegisterInfo &MRI, bool A64, MachineInstr *&UserMI)
static unsigned getZExtLoadOpcode(unsigned LoadSize, bool I64Result, bool A64)
static unsigned matchFoldableShift(MachineInstr *MI, const LoadInst *LI, MachineRegisterInfo &MRI, bool A64, MachineInstr *&UserMI, MachineInstr *&OuterUserMI)
Matches a sign-extension pattern (shl + shr_s) to fold it into a signed load.
static unsigned getFoldedI64LoadOpcode(Register DestReg, const LoadInst *LI, MachineRegisterInfo &MRI, bool A64, MachineInstr *&OuterUserMI, unsigned NarrowOpc)
static unsigned getFoldedLoadOpcode(MachineInstr *MI, MachineRegisterInfo &MRI, const LoadInst *LI, bool A64)
static unsigned matchFoldableAnd(MachineInstr *MI, const LoadInst *LI, MachineRegisterInfo &MRI, bool A64, MachineInstr *&OuterUserMI)
This file provides WebAssembly-specific target descriptions.
This file declares WebAssembly-specific per-machine-function information.
This file declares the WebAssembly-specific subclass of TargetSubtarget.
This file contains the declaration of the WebAssembly-specific type parsing utility functions.
This file contains the declaration of the WebAssembly-specific utility functions.
Value * RHS
Value * LHS
an instruction to allocate memory on the stack
LLVM Basic Block Representation.
Definition BasicBlock.h:62
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
FunctionType * getFunctionType() const
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool isMustTailCall() const
This is an important base class in LLVM.
Definition Constant.h:43
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
bool isVarArg() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:358
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Tracks which library functions to use for a particular subtarget.
An instruction for reading from memory.
void setNoStrip() const
@ INVALID_SIMPLE_VALUE_TYPE
SimpleValueType SimpleTy
MachineInstrBundleIterator< MachineInstr > iterator
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
use_instr_nodbg_iterator use_instr_nodbg_begin(Register RegNo) const
LLVM_ABI MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
Wrapper class representing virtual and physical registers.
Definition Register.h:20
TypeSize getElementOffset(unsigned Idx) const
Definition DataLayout.h:774
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:281
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:278
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:201
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
Not(const Pred &P) -> Not< Pred >
bool isDefaultAddressSpace(unsigned AS)
MCSymbolWasm * getOrCreateFunctionTableSymbol(MCContext &Ctx, const WebAssemblySubtarget *Subtarget)
Returns the __indirect_function_table, for use in call_indirect and in function bitcasts.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo, const LibcallLoweringInfo *libcallLowering)
@ User
could "use" a pointer
NodeAddr< FuncNode * > Func
Definition RDFGraph.h:393
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:557
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
gep_type_iterator gep_type_end(const User *GEP)
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
generic_gep_type_iterator<> gep_type_iterator
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
gep_type_iterator gep_type_begin(const User *GEP)
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
Definition MathExtras.h:77
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:876
Extended Value Type.
Definition ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:145
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:324