LLVM 23.0.0git
BPFISelLowering.cpp
Go to the documentation of this file.
1//===-- BPFISelLowering.cpp - BPF DAG Lowering Implementation ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that BPF uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "BPFISelLowering.h"
15#include "BPF.h"
16#include "BPFSubtarget.h"
25#include "llvm/IR/DIBuilder.h"
28#include "llvm/IR/Module.h"
29#include "llvm/Support/Debug.h"
33
34using namespace llvm;
35
36#define DEBUG_TYPE "bpf-lower"
37
38static cl::opt<bool> BPFExpandMemcpyInOrder("bpf-expand-memcpy-in-order",
39 cl::Hidden, cl::init(false),
40 cl::desc("Expand memcpy into load/store pairs in order"));
41
43 "bpf-min-jump-table-entries", cl::init(13), cl::Hidden,
44 cl::desc("Set minimum number of entries to use a jump table on BPF"));
45
46static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg,
47 SDValue Val = {}) {
48 std::string Str;
49 if (Val) {
50 raw_string_ostream OS(Str);
51 Val->print(OS);
52 OS << ' ';
53 }
56 MF.getFunction(), Twine(Str).concat(Msg), DL.getDebugLoc()));
57}
58
60 const BPFSubtarget &STI)
61 : TargetLowering(TM, STI) {
62
63 // Set up the register classes.
64 addRegisterClass(MVT::i64, &BPF::GPRRegClass);
65 if (STI.getHasAlu32())
66 addRegisterClass(MVT::i32, &BPF::GPR32RegClass);
67
68 // Compute derived properties from the register classes
70
72
76
77 if (!STI.hasGotox())
79
81
83 if (STI.hasGotox())
85
89
90 // Set unsupported atomic operations as Custom so
91 // we can emit better error messages than fatal error
92 // from selectiondag.
93 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
94 if (VT == MVT::i32) {
95 if (STI.getHasAlu32())
96 continue;
97 } else {
99 }
100
106 }
107
108 for (auto VT : {MVT::i32, MVT::i64}) {
111 }
112
113 for (auto VT : { MVT::i32, MVT::i64 }) {
114 if (VT == MVT::i32 && !STI.getHasAlu32())
115 continue;
116
119 if (!STI.hasSdivSmod()) {
122 }
137
141 }
142
143 if (STI.getHasAlu32()) {
146 STI.getHasJmp32() ? Custom : Promote);
147 }
148
150 if (!STI.hasMovsx()) {
154 }
155
156 // Extended load operations for i1 types must be promoted
157 for (MVT VT : MVT::integer_valuetypes()) {
161
162 if (!STI.hasLdsx()) {
164 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Expand);
165 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
166 }
167 }
168
172
173 // Function alignments
176
178 // LLVM generic code will try to expand memcpy into load/store pairs at this
179 // stage which is before quite a few IR optimization passes, therefore the
180 // loads and stores could potentially be moved apart from each other which
181 // will cause trouble to memcpy pattern matcher inside kernel eBPF JIT
182 // compilers.
183 //
184 // When -bpf-expand-memcpy-in-order specified, we want to defer the expand
185 // of memcpy to later stage in IR optimization pipeline so those load/store
186 // pairs won't be touched and could be kept in order. Hence, we set
187 // MaxStoresPerMem* to zero to disable the generic getMemcpyLoadsAndStores
188 // code path, and ask LLVM to use target expander EmitTargetCodeForMemcpy.
193 } else {
194 // inline memcpy() for kernel to see explicit copy
195 unsigned CommonMaxStores =
197
202 }
203
204 // CPU/Feature control
205 HasAlu32 = STI.getHasAlu32();
206 HasJmp32 = STI.getHasJmp32();
207 HasJmpExt = STI.getHasJmpExt();
208 HasMovsx = STI.hasMovsx();
209
210 AllowsMisalignedMemAccess = STI.getAllowsMisalignedMemAccess();
211}
212
215 unsigned *Fast) const {
216 // allows-misaligned-mem-access is disabled
217 if (!AllowsMisalignedMemAccess)
218 return false;
219
220 // only allow misalignment for simple value types
221 if (!VT.isSimple())
222 return false;
223
224 // always assume fast mode when misalignment is allowed
225 if (Fast)
226 *Fast = true;
227
228 return true;
229}
230
232 return false;
233}
234
235bool BPFTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
236 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
237 return false;
238 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
239 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
240 return NumBits1 > NumBits2;
241}
242
243bool BPFTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
244 if (!VT1.isInteger() || !VT2.isInteger())
245 return false;
246 unsigned NumBits1 = VT1.getSizeInBits();
247 unsigned NumBits2 = VT2.getSizeInBits();
248 return NumBits1 > NumBits2;
249}
250
251bool BPFTargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
252 if (!getHasAlu32() || !Ty1->isIntegerTy() || !Ty2->isIntegerTy())
253 return false;
254 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
255 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
256 return NumBits1 == 32 && NumBits2 == 64;
257}
258
259bool BPFTargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
260 if (!getHasAlu32() || !VT1.isInteger() || !VT2.isInteger())
261 return false;
262 unsigned NumBits1 = VT1.getSizeInBits();
263 unsigned NumBits2 = VT2.getSizeInBits();
264 return NumBits1 == 32 && NumBits2 == 64;
265}
266
267bool BPFTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
268 EVT VT1 = Val.getValueType();
269 if (Val.getOpcode() == ISD::LOAD && VT1.isSimple() && VT2.isSimple()) {
270 MVT MT1 = VT1.getSimpleVT().SimpleTy;
271 MVT MT2 = VT2.getSimpleVT().SimpleTy;
272 if ((MT1 == MVT::i8 || MT1 == MVT::i16 || MT1 == MVT::i32) &&
273 (MT2 == MVT::i32 || MT2 == MVT::i64))
274 return true;
275 }
276 return TargetLoweringBase::isZExtFree(Val, VT2);
277}
278
282
285 if (Constraint.size() == 1) {
286 switch (Constraint[0]) {
287 default:
288 break;
289 case 'w':
290 return C_RegisterClass;
291 }
292 }
293
294 return TargetLowering::getConstraintType(Constraint);
295}
296
297std::pair<unsigned, const TargetRegisterClass *>
299 StringRef Constraint,
300 MVT VT) const {
301 if (Constraint.size() == 1) {
302 // GCC Constraint Letters
303 switch (Constraint[0]) {
304 case 'r': // GENERAL_REGS
305 return std::make_pair(0U, &BPF::GPRRegClass);
306 case 'w':
307 if (HasAlu32)
308 return std::make_pair(0U, &BPF::GPR32RegClass);
309 break;
310 default:
311 break;
312 }
313 }
314
316}
317
318void BPFTargetLowering::ReplaceNodeResults(
320 const char *Msg;
321 uint32_t Opcode = N->getOpcode();
322 switch (Opcode) {
323 default:
324 report_fatal_error("unhandled custom legalization: " + Twine(Opcode));
329 case ISD::ATOMIC_SWAP:
331 if (HasAlu32 || Opcode == ISD::ATOMIC_LOAD_ADD)
332 Msg = "unsupported atomic operation, please use 32/64 bit version";
333 else
334 Msg = "unsupported atomic operation, please use 64 bit version";
335 break;
336 case ISD::ATOMIC_LOAD:
338 return;
339 }
340
341 SDLoc DL(N);
342 // We'll still produce a fatal error downstream, but this diagnostic is more
343 // user-friendly.
344 fail(DL, DAG, Msg);
345}
346
348 switch (Op.getOpcode()) {
349 default:
350 report_fatal_error("unimplemented opcode: " + Twine(Op.getOpcode()));
351 case ISD::BR_CC:
352 return LowerBR_CC(Op, DAG);
353 case ISD::JumpTable:
354 return LowerJumpTable(Op, DAG);
356 return LowerGlobalAddress(Op, DAG);
358 return LowerConstantPool(Op, DAG);
360 return LowerBlockAddress(Op, DAG);
361 case ISD::SELECT_CC:
362 return LowerSELECT_CC(Op, DAG);
363 case ISD::SDIV:
364 case ISD::SREM:
365 return LowerSDIVSREM(Op, DAG);
366 case ISD::SHL_PARTS:
367 case ISD::SRL_PARTS:
368 case ISD::SRA_PARTS:
369 return LowerShiftParts(Op, DAG);
371 return LowerDYNAMIC_STACKALLOC(Op, DAG);
372 case ISD::ATOMIC_LOAD:
374 return LowerATOMIC_LOAD_STORE(Op, DAG);
375 case ISD::TRAP:
376 return LowerTRAP(Op, DAG);
377 }
378}
379
380// Calling Convention Implementation
381#include "BPFGenCallingConv.inc"
382
383// Apply AssertSext/AssertZext and truncate based on VA's LocInfo.
385 const CCValAssign &VA, EVT RegVT,
386 SDValue ArgValue) {
387 if (VA.getLocInfo() == CCValAssign::SExt)
388 ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue,
389 DAG.getValueType(VA.getValVT()));
390 else if (VA.getLocInfo() == CCValAssign::ZExt)
391 ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue,
392 DAG.getValueType(VA.getValVT()));
393 if (VA.getLocInfo() != CCValAssign::Full)
394 ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue);
395 return ArgValue;
396}
397
398SDValue BPFTargetLowering::LowerFormalArguments(
399 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
400 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
401 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
402 switch (CallConv) {
403 default:
404 report_fatal_error("unimplemented calling convention: " + Twine(CallConv));
405 case CallingConv::C:
407 break;
408 }
409
410 MachineFunction &MF = DAG.getMachineFunction();
411 MachineRegisterInfo &RegInfo = MF.getRegInfo();
412
413 // Assign locations to all of the incoming arguments.
415 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
416 CCInfo.AnalyzeFormalArguments(Ins, getHasAlu32() ? CC_BPF32 : CC_BPF64);
417
418 for (size_t I = 0; I < ArgLocs.size(); ++I) {
419 auto &VA = ArgLocs[I];
420 EVT RegVT = VA.getLocVT();
421
422 if (VA.isRegLoc()) {
423 // Arguments passed in registers
424 MVT::SimpleValueType SimpleTy = RegVT.getSimpleVT().SimpleTy;
425 switch (SimpleTy) {
426 default: {
427 std::string Str;
428 {
429 raw_string_ostream OS(Str);
430 RegVT.print(OS);
431 }
432 report_fatal_error("unhandled argument type: " + Twine(Str));
433 }
434 case MVT::i32:
435 case MVT::i64:
436 Register VReg = RegInfo.createVirtualRegister(
437 SimpleTy == MVT::i64 ? &BPF::GPRRegClass : &BPF::GPR32RegClass);
438 RegInfo.addLiveIn(VA.getLocReg(), VReg);
439 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, RegVT);
440 InVals.push_back(convertLocValType(DAG, DL, VA, RegVT, ArgValue));
441 break;
442 }
443 continue;
444 }
445
446 if (VA.isMemLoc()) {
447 // For example, two stack arguments,
448 // arg1: Off = 8
449 // arg2: off = 16
450 int Off = VA.getLocMemOffset() + 8;
451 if (Off > INT16_MAX) {
452 fail(DL, DAG, "extra parameter stack depth exceeded limit");
453 break;
454 }
455
456 // Physical extra argument slot is always 64-bit.
457 SDValue StackVal = DAG.getNode(BPFISD::LOAD_STACK_ARG, DL,
458 DAG.getVTList(MVT::i64, MVT::Other), Chain,
459 DAG.getConstant(Off, DL, MVT::i64));
460 SDValue ArgValue = StackVal.getValue(0);
461 Chain = StackVal.getValue(1);
462 InVals.push_back(convertLocValType(DAG, DL, VA, MVT::i64, ArgValue));
463 continue;
464 }
465 }
466
467 if (IsVarArg)
468 fail(DL, DAG, "variadic functions are not supported");
469 return Chain;
470}
471
472static void resetRegMaskBit(const TargetRegisterInfo *TRI, uint32_t *RegMask,
473 MCRegister Reg) {
474 for (MCPhysReg SubReg : TRI->subregs_inclusive(Reg))
475 RegMask[SubReg / 32] &= ~(1u << (SubReg % 32));
476}
477
479 MachineFunction &MF,
480 const uint32_t *BaseRegMask) {
481 uint32_t *RegMask = MF.allocateRegMask();
482 unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
483 memcpy(RegMask, BaseRegMask, sizeof(RegMask[0]) * RegMaskSize);
484 return RegMask;
485}
486
487SDValue BPFTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
488 SmallVectorImpl<SDValue> &InVals) const {
489 SelectionDAG &DAG = CLI.DAG;
490 auto &Outs = CLI.Outs;
491 auto &OutVals = CLI.OutVals;
492 auto &Ins = CLI.Ins;
493 SDValue Chain = CLI.Chain;
494 SDValue Callee = CLI.Callee;
495 bool &IsTailCall = CLI.IsTailCall;
496 CallingConv::ID CallConv = CLI.CallConv;
497 bool IsVarArg = CLI.IsVarArg;
498 MachineFunction &MF = DAG.getMachineFunction();
499
500 // BPF target does not support tail call optimization.
501 IsTailCall = false;
502
503 switch (CallConv) {
504 default:
505 report_fatal_error("unsupported calling convention: " + Twine(CallConv));
507 case CallingConv::C:
508 break;
509 }
510
511 // Analyze operands of the call, assigning locations to each operand.
513 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
514
515 CCInfo.AnalyzeCallOperands(Outs, getHasAlu32() ? CC_BPF32 : CC_BPF64);
516
517 unsigned NumBytes = CCInfo.getStackSize();
518
519 for (auto &Arg : Outs) {
520 ISD::ArgFlagsTy Flags = Arg.Flags;
521 if (!Flags.isByVal())
522 continue;
523 fail(CLI.DL, DAG, "pass by value not supported", Callee);
524 break;
525 }
526
527 auto PtrVT = getPointerTy(MF.getDataLayout());
528 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
529
531
532 // Walk arg assignments
533 for (size_t i = 0; i < OutVals.size(); ++i) {
534 CCValAssign &VA = ArgLocs[i];
535 SDValue &Arg = OutVals[i];
536
537 // Promote the value if needed.
538 switch (VA.getLocInfo()) {
539 default:
540 report_fatal_error("unhandled location info: " + Twine(VA.getLocInfo()));
542 break;
544 Arg = DAG.getNode(ISD::SIGN_EXTEND, CLI.DL, VA.getLocVT(), Arg);
545 break;
547 Arg = DAG.getNode(ISD::ZERO_EXTEND, CLI.DL, VA.getLocVT(), Arg);
548 break;
550 Arg = DAG.getNode(ISD::ANY_EXTEND, CLI.DL, VA.getLocVT(), Arg);
551 break;
552 }
553
554 // Push arguments into RegsToPass vector
555 if (VA.isRegLoc()) {
556 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
557 continue;
558 }
559
560 if (VA.isMemLoc()) {
561 int Off = -8 - VA.getLocMemOffset();
562 if (Off < INT16_MIN) {
563 fail(CLI.DL, DAG, "extra parameter stack depth exceeded limit");
564 break;
565 }
566
567 // STORE_STACK_ARG requires i64 operands. With ALU32 mode, the CC
568 // promotion may only extend to i32, so extend to i64 if needed.
569 if (Arg.getValueType() != MVT::i64)
570 Arg = DAG.getNode(ISD::ANY_EXTEND, CLI.DL, MVT::i64, Arg);
571
572 SDValue OffVal = DAG.getConstant(Off, CLI.DL, MVT::i64);
573 Chain = DAG.getNode(BPFISD::STORE_STACK_ARG, CLI.DL, MVT::Other, Chain,
574 OffVal, Arg);
575 continue;
576 }
577
578 report_fatal_error("unhandled argument location");
579 }
580
581 SDValue InGlue;
582
583 // Build a sequence of copy-to-reg nodes chained together with token chain and
584 // flag operands which copy the outgoing args into registers. The InGlue in
585 // necessary since all emitted instructions must be stuck together.
586 for (auto &Reg : RegsToPass) {
587 Chain = DAG.getCopyToReg(Chain, CLI.DL, Reg.first, Reg.second, InGlue);
588 InGlue = Chain.getValue(1);
589 }
590
591 // If the callee is a GlobalAddress node (quite common, every direct call is)
592 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
593 // Likewise ExternalSymbol -> TargetExternalSymbol.
594 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
595 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), CLI.DL, PtrVT,
596 G->getOffset(), 0);
597 } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
598 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, 0);
599 StringRef Sym = E->getSymbol();
600 if (Sym != BPF_TRAP && Sym != "__multi3" && Sym != "__divti3" &&
601 Sym != "__modti3" && Sym != "__udivti3" && Sym != "__umodti3" &&
602 Sym != "memcpy" && Sym != "memset" && Sym != "memmove")
603 fail(
604 CLI.DL, DAG,
605 Twine("A call to built-in function '" + Sym + "' is not supported."));
606 }
607
608 // Returns a chain & a flag for retval copy to use.
609 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
611 Ops.push_back(Chain);
612 Ops.push_back(Callee);
613
614 // Add argument registers to the end of the list so that they are
615 // known live into the call.
616 for (auto &Reg : RegsToPass)
617 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
618
619 bool HasFastCall =
620 (CLI.CB && isa<CallInst>(CLI.CB) && CLI.CB->hasFnAttr("bpf_fastcall"));
621 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
622 if (HasFastCall) {
623 uint32_t *RegMask = regMaskFromTemplate(
624 TRI, MF, TRI->getCallPreservedMask(MF, CallingConv::PreserveAll));
625 for (auto const &RegPair : RegsToPass)
626 resetRegMaskBit(TRI, RegMask, RegPair.first);
627 if (!CLI.CB->getType()->isVoidTy())
628 resetRegMaskBit(TRI, RegMask, BPF::R0);
629 Ops.push_back(DAG.getRegisterMask(RegMask));
630 } else {
631 Ops.push_back(
632 DAG.getRegisterMask(TRI->getCallPreservedMask(MF, CLI.CallConv)));
633 }
634
635 if (InGlue.getNode())
636 Ops.push_back(InGlue);
637
638 Chain = DAG.getNode(BPFISD::CALL, CLI.DL, NodeTys, Ops);
639 InGlue = Chain.getValue(1);
640
641 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
642
643 // Create the CALLSEQ_END node.
644 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue, CLI.DL);
645 InGlue = Chain.getValue(1);
646
647 // Handle result values, copying them out of physregs into vregs that we
648 // return.
649 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, CLI.DL, DAG,
650 InVals);
651}
652
654BPFTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
655 bool IsVarArg,
657 const SmallVectorImpl<SDValue> &OutVals,
658 const SDLoc &DL, SelectionDAG &DAG) const {
659 unsigned Opc = BPFISD::RET_GLUE;
660
661 // CCValAssign - represent the assignment of the return value to a location
663 MachineFunction &MF = DAG.getMachineFunction();
664
665 // CCState - Info about the registers and stack slot.
666 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
667
668 // Analize return values.
669 CCInfo.AnalyzeReturn(Outs, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
670
671 SDValue Glue;
672 SmallVector<SDValue, 4> RetOps(1, Chain);
673
674 // Copy the result values into the output registers.
675 for (size_t i = 0; i != RVLocs.size(); ++i) {
676 CCValAssign &VA = RVLocs[i];
677 if (!VA.isRegLoc())
678 report_fatal_error("stack return values are not supported");
679
680 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Glue);
681
682 // Guarantee that all emitted copies are stuck together,
683 // avoiding something bad.
684 Glue = Chain.getValue(1);
685 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
686 }
687
688 RetOps[0] = Chain; // Update chain.
689
690 // Add the glue if we have it.
691 if (Glue.getNode())
692 RetOps.push_back(Glue);
693
694 return DAG.getNode(Opc, DL, MVT::Other, RetOps);
695}
696
697SDValue BPFTargetLowering::LowerCallResult(
698 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool IsVarArg,
699 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
700 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
701
702 MachineFunction &MF = DAG.getMachineFunction();
703 // Assign locations to each value returned by this call.
705 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
706
707 CCInfo.AnalyzeCallResult(Ins, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
708
709 // Copy all of the result registers out of their specified physreg.
710 for (auto &Val : RVLocs) {
711 Chain = DAG.getCopyFromReg(Chain, DL, Val.getLocReg(),
712 Val.getValVT(), InGlue).getValue(1);
713 InGlue = Chain.getValue(2);
714 InVals.push_back(Chain.getValue(0));
715 }
716
717 return Chain;
718}
719
721 switch (CC) {
722 default:
723 break;
724 case ISD::SETULT:
725 case ISD::SETULE:
726 case ISD::SETLT:
727 case ISD::SETLE:
729 std::swap(LHS, RHS);
730 break;
731 }
732}
733
734SDValue BPFTargetLowering::LowerSDIVSREM(SDValue Op, SelectionDAG &DAG) const {
735 SDLoc DL(Op);
736 fail(DL, DAG,
737 "unsupported signed division, please convert to unsigned div/mod.");
738 return DAG.getUNDEF(Op->getValueType(0));
739}
740
741SDValue BPFTargetLowering::LowerShiftParts(SDValue Op,
742 SelectionDAG &DAG) const {
743 SDValue Lo, Hi;
744 expandShiftParts(Op.getNode(), Lo, Hi, DAG);
745 return DAG.getMergeValues({Lo, Hi}, SDLoc(Op));
746}
747
748SDValue BPFTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
749 SelectionDAG &DAG) const {
750 SDLoc DL(Op);
751 fail(DL, DAG, "unsupported dynamic stack allocation");
752 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
753 return DAG.getMergeValues(Ops, SDLoc());
754}
755
756SDValue BPFTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
757 SDValue Chain = Op.getOperand(0);
758 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
759 SDValue LHS = Op.getOperand(2);
760 SDValue RHS = Op.getOperand(3);
761 SDValue Dest = Op.getOperand(4);
762 SDLoc DL(Op);
763
764 if (!getHasJmpExt())
765 NegateCC(LHS, RHS, CC);
766
767 return DAG.getNode(BPFISD::BR_CC, DL, Op.getValueType(), Chain, LHS, RHS,
768 DAG.getConstant(CC, DL, LHS.getValueType()), Dest);
769}
770
771SDValue BPFTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
772 SDValue LHS = Op.getOperand(0);
773 SDValue RHS = Op.getOperand(1);
774 SDValue TrueV = Op.getOperand(2);
775 SDValue FalseV = Op.getOperand(3);
776 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
777 SDLoc DL(Op);
778
779 if (!getHasJmpExt())
780 NegateCC(LHS, RHS, CC);
781
782 SDValue TargetCC = DAG.getConstant(CC, DL, LHS.getValueType());
783 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
784
785 return DAG.getNode(BPFISD::SELECT_CC, DL, Op.getValueType(), Ops);
786}
787
788SDValue BPFTargetLowering::LowerATOMIC_LOAD_STORE(SDValue Op,
789 SelectionDAG &DAG) const {
790 SDNode *N = Op.getNode();
791 SDLoc DL(N);
792
793 if (cast<AtomicSDNode>(N)->getMergedOrdering() ==
795 fail(DL, DAG,
796 "sequentially consistent (seq_cst) "
797 "atomic load/store is not supported");
798
799 return Op;
800}
801
803 if (auto *Fn = M->getFunction(BPF_TRAP))
804 return Fn;
805
806 FunctionType *FT = FunctionType::get(Type::getVoidTy(M->getContext()), false);
807 Function *NewF =
809 NewF->setDSOLocal(true);
811 NewF->setSection(".ksyms");
812
813 if (M->debug_compile_units().empty())
814 return NewF;
815
816 DIBuilder DBuilder(*M);
817 DITypeArray ParamTypes =
818 DBuilder.getOrCreateTypeArray({nullptr /*void return*/});
819 DISubroutineType *FuncType = DBuilder.createSubroutineType(ParamTypes);
820 DICompileUnit *CU = *M->debug_compile_units_begin();
821 DISubprogram *SP =
822 DBuilder.createFunction(CU, BPF_TRAP, BPF_TRAP, nullptr, 0, FuncType, 0,
823 DINode::FlagZero, DISubprogram::SPFlagZero);
824 NewF->setSubprogram(SP);
825 return NewF;
826}
827
828SDValue BPFTargetLowering::LowerTRAP(SDValue Op, SelectionDAG &DAG) const {
829 MachineFunction &MF = DAG.getMachineFunction();
830 TargetLowering::CallLoweringInfo CLI(DAG);
832 SDNode *N = Op.getNode();
833 SDLoc DL(N);
834
836 auto PtrVT = getPointerTy(MF.getDataLayout());
837 CLI.Callee = DAG.getTargetGlobalAddress(Fn, DL, PtrVT);
838 CLI.Chain = N->getOperand(0);
839 CLI.IsTailCall = false;
841 CLI.IsVarArg = false;
842 CLI.DL = std::move(DL);
843 CLI.NoMerge = false;
844 CLI.DoesNotReturn = true;
845 return LowerCall(CLI, InVals);
846}
847
848SDValue BPFTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
849 JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
850 return getAddr(N, DAG);
851}
852
854 SelectionDAG &DAG, unsigned Flags) {
855 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
856 N->getOffset(), Flags);
857}
858
860 SelectionDAG &DAG, unsigned Flags) {
861 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
862}
863
864template <class NodeTy>
865SDValue BPFTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
866 unsigned Flags) const {
867 SDLoc DL(N);
868
869 SDValue GA = getTargetNode(N, DL, MVT::i64, DAG, Flags);
870
871 return DAG.getNode(BPFISD::Wrapper, DL, MVT::i64, GA);
872}
873
874SDValue BPFTargetLowering::LowerGlobalAddress(SDValue Op,
875 SelectionDAG &DAG) const {
876 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
877 if (N->getOffset() != 0)
878 report_fatal_error("invalid offset for global address: " +
879 Twine(N->getOffset()));
880
881 const GlobalValue *GVal = N->getGlobal();
882 SDLoc DL(Op);
883
884 // Wrap it in a TargetGlobalAddress
885 SDValue Addr = DAG.getTargetGlobalAddress(GVal, DL, MVT::i64);
886
887 // Emit pseudo instruction
888 return SDValue(DAG.getMachineNode(BPF::LDIMM64, DL, MVT::i64, Addr), 0);
889}
890
891SDValue BPFTargetLowering::LowerConstantPool(SDValue Op,
892 SelectionDAG &DAG) const {
893 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
894
895 return getAddr(N, DAG);
896}
897
898SDValue BPFTargetLowering::LowerBlockAddress(SDValue Op,
899 SelectionDAG &DAG) const {
900 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
901 SDLoc DL(Op);
902
903 // Wrap it in a TargetBlockAddress
904 SDValue Addr = DAG.getTargetBlockAddress(BA, MVT::i64);
905
906 // Emit pseudo instruction
907 return SDValue(DAG.getMachineNode(BPF::LDIMM64, DL, MVT::i64, Addr), 0);
908}
909
910unsigned
911BPFTargetLowering::EmitSubregExt(MachineInstr &MI, MachineBasicBlock *BB,
912 unsigned Reg, bool isSigned) const {
913 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
914 const TargetRegisterClass *RC = getRegClassFor(MVT::i64);
915 int RShiftOp = isSigned ? BPF::SRA_ri : BPF::SRL_ri;
916 MachineFunction *F = BB->getParent();
917 DebugLoc DL = MI.getDebugLoc();
918
919 MachineRegisterInfo &RegInfo = F->getRegInfo();
920
921 if (!isSigned) {
922 Register PromotedReg0 = RegInfo.createVirtualRegister(RC);
923 BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg);
924 return PromotedReg0;
925 }
926 Register PromotedReg0 = RegInfo.createVirtualRegister(RC);
927 Register PromotedReg1 = RegInfo.createVirtualRegister(RC);
928 Register PromotedReg2 = RegInfo.createVirtualRegister(RC);
929 if (HasMovsx) {
930 BuildMI(BB, DL, TII.get(BPF::MOVSX_rr_32), PromotedReg0).addReg(Reg);
931 } else {
932 BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg);
933 BuildMI(BB, DL, TII.get(BPF::SLL_ri), PromotedReg1)
934 .addReg(PromotedReg0).addImm(32);
935 BuildMI(BB, DL, TII.get(RShiftOp), PromotedReg2)
936 .addReg(PromotedReg1).addImm(32);
937 }
938
939 return PromotedReg2;
940}
941
943BPFTargetLowering::EmitInstrWithCustomInserterMemcpy(MachineInstr &MI,
945 const {
946 MachineFunction *MF = MI.getParent()->getParent();
947 MachineRegisterInfo &MRI = MF->getRegInfo();
948 MachineInstrBuilder MIB(*MF, MI);
949 unsigned ScratchReg;
950
951 // This function does custom insertion during lowering BPFISD::MEMCPY which
952 // only has two register operands from memcpy semantics, the copy source
953 // address and the copy destination address.
954 //
955 // Because we will expand BPFISD::MEMCPY into load/store pairs, we will need
956 // a third scratch register to serve as the destination register of load and
957 // source register of store.
958 //
959 // The scratch register here is with the Define | Dead | EarlyClobber flags.
960 // The EarlyClobber flag has the semantic property that the operand it is
961 // attached to is clobbered before the rest of the inputs are read. Hence it
962 // must be unique among the operands to the instruction. The Define flag is
963 // needed to coerce the machine verifier that an Undef value isn't a problem
964 // as we anyway is loading memory into it. The Dead flag is needed as the
965 // value in scratch isn't supposed to be used by any other instruction.
966 ScratchReg = MRI.createVirtualRegister(&BPF::GPRRegClass);
967 MIB.addReg(ScratchReg,
969
970 return BB;
971}
972
973MachineBasicBlock *BPFTargetLowering::EmitInstrWithCustomInserterLDimm64(
974 MachineInstr &MI, MachineBasicBlock *BB) const {
975 MachineFunction *MF = BB->getParent();
976 const BPFInstrInfo *TII = MF->getSubtarget<BPFSubtarget>().getInstrInfo();
977 const TargetRegisterClass *RC = getRegClassFor(MVT::i64);
978 MachineRegisterInfo &RegInfo = MF->getRegInfo();
979 DebugLoc DL = MI.getDebugLoc();
980
981 // Build address taken map for Global Varaibles and BlockAddresses
982 DenseMap<const BasicBlock *, MachineBasicBlock *> AddressTakenBBs;
983 for (MachineBasicBlock &MBB : *MF) {
984 if (const BasicBlock *BB = MBB.getBasicBlock())
985 if (BB->hasAddressTaken())
986 AddressTakenBBs[BB] = &MBB;
987 }
988
989 MachineOperand &MO = MI.getOperand(1);
990 assert(MO.isBlockAddress() || MO.isGlobal());
991
992 Register ResultReg = MI.getOperand(0).getReg();
993 Register TmpReg = RegInfo.createVirtualRegister(RC);
994
995 std::vector<MachineBasicBlock *> Targets;
996 unsigned JTI;
997
998 if (MO.isBlockAddress()) {
999 auto *BA = MO.getBlockAddress();
1000 MachineBasicBlock *TgtMBB = AddressTakenBBs[BA->getBasicBlock()];
1001 assert(TgtMBB);
1002
1003 Targets.push_back(TgtMBB);
1004 JTI = MF->getOrCreateJumpTableInfo(getJumpTableEncoding())
1005 ->createJumpTableIndex(Targets);
1006
1007 BuildMI(*BB, MI, DL, TII->get(BPF::LD_imm64), TmpReg)
1008 .addJumpTableIndex(JTI);
1009 BuildMI(*BB, MI, DL, TII->get(BPF::LDD), ResultReg)
1010 .addReg(TmpReg)
1011 .addImm(0);
1012 MI.eraseFromParent();
1013 return BB;
1014 }
1015
1016 // Helper: emit LD_imm64 with operand GlobalAddress or JumpTable
1017 auto emitLDImm64 = [&](const GlobalValue *GV = nullptr, unsigned JTI = -1) {
1018 auto MIB = BuildMI(*BB, MI, DL, TII->get(BPF::LD_imm64), ResultReg);
1019 if (GV)
1020 MIB.addGlobalAddress(GV);
1021 else
1022 MIB.addJumpTableIndex(JTI);
1023 MI.eraseFromParent();
1024 return BB;
1025 };
1026
1027 // Must be a global at this point
1028 const GlobalValue *GVal = MO.getGlobal();
1029 const auto *GV = dyn_cast<GlobalVariable>(GVal);
1030
1031 if (!GV || GV->getLinkage() != GlobalValue::PrivateLinkage ||
1032 !GV->isConstant() || !GV->hasInitializer())
1033 return emitLDImm64(GVal);
1034
1035 const auto *CA = dyn_cast<ConstantArray>(GV->getInitializer());
1036 if (!CA)
1037 return emitLDImm64(GVal);
1038
1039 for (const Use &Op : CA->operands()) {
1040 if (!isa<BlockAddress>(Op))
1041 return emitLDImm64(GVal);
1042 auto *BA = cast<BlockAddress>(Op);
1043 MachineBasicBlock *TgtMBB = AddressTakenBBs[BA->getBasicBlock()];
1044 assert(TgtMBB);
1045 Targets.push_back(TgtMBB);
1046 }
1047
1048 JTI = MF->getOrCreateJumpTableInfo(getJumpTableEncoding())
1049 ->createJumpTableIndex(Targets);
1050 return emitLDImm64(nullptr, JTI);
1051}
1052
1055 MachineBasicBlock *BB) const {
1057 DebugLoc DL = MI.getDebugLoc();
1058 unsigned Opc = MI.getOpcode();
1059 bool isSelectRROp = (Opc == BPF::Select ||
1060 Opc == BPF::Select_64_32 ||
1061 Opc == BPF::Select_32 ||
1062 Opc == BPF::Select_32_64);
1063
1064 bool isMemcpyOp = Opc == BPF::MEMCPY;
1065 bool isLDimm64Op = Opc == BPF::LDIMM64;
1066
1067#ifndef NDEBUG
1068 bool isSelectRIOp = (Opc == BPF::Select_Ri ||
1069 Opc == BPF::Select_Ri_64_32 ||
1070 Opc == BPF::Select_Ri_32 ||
1071 Opc == BPF::Select_Ri_32_64);
1072
1073 if (!(isSelectRROp || isSelectRIOp || isMemcpyOp || isLDimm64Op))
1074 report_fatal_error("unhandled instruction type: " + Twine(Opc));
1075#endif
1076
1077 if (isMemcpyOp)
1078 return EmitInstrWithCustomInserterMemcpy(MI, BB);
1079
1080 if (isLDimm64Op)
1081 return EmitInstrWithCustomInserterLDimm64(MI, BB);
1082
1083 bool is32BitCmp = (Opc == BPF::Select_32 ||
1084 Opc == BPF::Select_32_64 ||
1085 Opc == BPF::Select_Ri_32 ||
1086 Opc == BPF::Select_Ri_32_64);
1087
1088 // To "insert" a SELECT instruction, we actually have to insert the diamond
1089 // control-flow pattern. The incoming instruction knows the destination vreg
1090 // to set, the condition code register to branch on, the true/false values to
1091 // select between, and a branch opcode to use.
1092 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1094
1095 // ThisMBB:
1096 // ...
1097 // TrueVal = ...
1098 // jmp_XX r1, r2 goto Copy1MBB
1099 // fallthrough --> Copy0MBB
1100 MachineBasicBlock *ThisMBB = BB;
1101 MachineFunction *F = BB->getParent();
1102 MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1103 MachineBasicBlock *Copy1MBB = F->CreateMachineBasicBlock(LLVM_BB);
1104
1105 F->insert(I, Copy0MBB);
1106 F->insert(I, Copy1MBB);
1107 // Update machine-CFG edges by transferring all successors of the current
1108 // block to the new block which will contain the Phi node for the select.
1109 Copy1MBB->splice(Copy1MBB->begin(), BB,
1110 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1111 Copy1MBB->transferSuccessorsAndUpdatePHIs(BB);
1112 // Next, add the true and fallthrough blocks as its successors.
1113 BB->addSuccessor(Copy0MBB);
1114 BB->addSuccessor(Copy1MBB);
1115
1116 // Insert Branch if Flag
1117 int CC = MI.getOperand(3).getImm();
1118 int NewCC;
1119 switch (CC) {
1120#define SET_NEWCC(X, Y) \
1121 case ISD::X: \
1122 if (is32BitCmp && HasJmp32) \
1123 NewCC = isSelectRROp ? BPF::Y##_rr_32 : BPF::Y##_ri_32; \
1124 else \
1125 NewCC = isSelectRROp ? BPF::Y##_rr : BPF::Y##_ri; \
1126 break
1127 SET_NEWCC(SETGT, JSGT);
1128 SET_NEWCC(SETUGT, JUGT);
1129 SET_NEWCC(SETGE, JSGE);
1130 SET_NEWCC(SETUGE, JUGE);
1131 SET_NEWCC(SETEQ, JEQ);
1132 SET_NEWCC(SETNE, JNE);
1133 SET_NEWCC(SETLT, JSLT);
1134 SET_NEWCC(SETULT, JULT);
1135 SET_NEWCC(SETLE, JSLE);
1136 SET_NEWCC(SETULE, JULE);
1137 default:
1138 report_fatal_error("unimplemented select CondCode " + Twine(CC));
1139 }
1140
1141 Register LHS = MI.getOperand(1).getReg();
1142 bool isSignedCmp = (CC == ISD::SETGT ||
1143 CC == ISD::SETGE ||
1144 CC == ISD::SETLT ||
1145 CC == ISD::SETLE);
1146
1147 // eBPF at the moment only has 64-bit comparison. Any 32-bit comparison need
1148 // to be promoted, however if the 32-bit comparison operands are destination
1149 // registers then they are implicitly zero-extended already, there is no
1150 // need of explicit zero-extend sequence for them.
1151 //
1152 // We simply do extension for all situations in this method, but we will
1153 // try to remove those unnecessary in BPFMIPeephole pass.
1154 if (is32BitCmp && !HasJmp32)
1155 LHS = EmitSubregExt(MI, BB, LHS, isSignedCmp);
1156
1157 if (isSelectRROp) {
1158 Register RHS = MI.getOperand(2).getReg();
1159
1160 if (is32BitCmp && !HasJmp32)
1161 RHS = EmitSubregExt(MI, BB, RHS, isSignedCmp);
1162
1163 BuildMI(BB, DL, TII.get(NewCC)).addReg(LHS).addReg(RHS).addMBB(Copy1MBB);
1164 } else {
1165 int64_t imm32 = MI.getOperand(2).getImm();
1166 // Check before we build J*_ri instruction.
1167 if (!isInt<32>(imm32))
1168 report_fatal_error("immediate overflows 32 bits: " + Twine(imm32));
1169 BuildMI(BB, DL, TII.get(NewCC))
1170 .addReg(LHS).addImm(imm32).addMBB(Copy1MBB);
1171 }
1172
1173 // Copy0MBB:
1174 // %FalseValue = ...
1175 // # fallthrough to Copy1MBB
1176 BB = Copy0MBB;
1177
1178 // Update machine-CFG edges
1179 BB->addSuccessor(Copy1MBB);
1180
1181 // Copy1MBB:
1182 // %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ]
1183 // ...
1184 BB = Copy1MBB;
1185 BuildMI(*BB, BB->begin(), DL, TII.get(BPF::PHI), MI.getOperand(0).getReg())
1186 .addReg(MI.getOperand(5).getReg())
1187 .addMBB(Copy0MBB)
1188 .addReg(MI.getOperand(4).getReg())
1189 .addMBB(ThisMBB);
1190
1191 MI.eraseFromParent(); // The pseudo instruction is gone now.
1192 return BB;
1193}
1194
1196 EVT VT) const {
1197 return getHasAlu32() ? MVT::i32 : MVT::i64;
1198}
1199
1201 EVT VT) const {
1202 return (getHasAlu32() && VT == MVT::i32) ? MVT::i32 : MVT::i64;
1203}
1204
1205bool BPFTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1206 const AddrMode &AM, Type *Ty,
1207 unsigned AS,
1208 Instruction *I) const {
1209 // No global is ever allowed as a base.
1210 if (AM.BaseGV)
1211 return false;
1212
1213 switch (AM.Scale) {
1214 case 0: // "r+i" or just "i", depending on HasBaseReg.
1215 break;
1216 case 1:
1217 if (!AM.HasBaseReg) // allow "r+i".
1218 break;
1219 return false; // disallow "r+r" or "r+r+i".
1220 default:
1221 return false;
1222 }
1223
1224 return true;
1225}
1226
1227bool BPFTargetLowering::CanLowerReturn(
1228 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
1230 const Type *RetTy) const {
1232 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
1233 return CCInfo.CheckReturn(Outs, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
1234}
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static uint32_t * regMaskFromTemplate(const TargetRegisterInfo *TRI, MachineFunction &MF, const uint32_t *BaseRegMask)
static Function * createBPFUnreachable(Module *M)
static SDValue getTargetNode(ConstantPoolSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static cl::opt< bool > BPFExpandMemcpyInOrder("bpf-expand-memcpy-in-order", cl::Hidden, cl::init(false), cl::desc("Expand memcpy into load/store pairs in order"))
static SDValue convertLocValType(SelectionDAG &DAG, const SDLoc &DL, const CCValAssign &VA, EVT RegVT, SDValue ArgValue)
static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg, SDValue Val={})
static cl::opt< unsigned > BPFMinimumJumpTableEntries("bpf-min-jump-table-entries", cl::init(13), cl::Hidden, cl::desc("Set minimum number of entries to use a jump table on BPF"))
static void resetRegMaskBit(const TargetRegisterInfo *TRI, uint32_t *RegMask, MCRegister Reg)
static void NegateCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC)
#define SET_NEWCC(X, Y)
#define BPF_TRAP
Definition BPF.h:25
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static bool isSigned(unsigned Opcode)
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define G(x, y, z)
Definition MD5.cpp:55
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
Value * RHS
Value * LHS
unsigned getCommonMaxStoresPerMemFunc() const
bool hasSdivSmod() const
bool getAllowsMisalignedMemAccess() const
bool getHasJmpExt() const
const BPFSelectionDAGInfo * getSelectionDAGInfo() const override
bool hasLdsx() const
bool hasGotox() const
bool hasMovsx() const
bool getHasJmp32() const
const BPFRegisterInfo * getRegisterInfo() const override
bool getHasAlu32() const
BPFTargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned, Align, MachineMemOperand::Flags, unsigned *) const override
Determine if the target supports unaligned memory accesses.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
BPFTargetLowering(const TargetMachine &TM, const BPFSubtarget &STI)
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
BasicBlock * getBasicBlock() const
Definition Constants.h:1106
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
LLVM_ABI DISubroutineType * createSubroutineType(DITypeArray ParameterTypes, DINode::DIFlags Flags=DINode::FlagZero, unsigned CC=0)
Create subroutine type.
LLVM_ABI DISubprogram * createFunction(DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File, unsigned LineNo, DISubroutineType *Ty, unsigned ScopeLine, DINode::DIFlags Flags=DINode::FlagZero, DISubprogram::DISPFlags SPFlags=DISubprogram::SPFlagZero, DITemplateParameterArray TParams=nullptr, DISubprogram *Decl=nullptr, DITypeArray ThrownTypes=nullptr, DINodeArray Annotations=nullptr, StringRef TargetFuncName="", bool UseKeyInstructions=false)
Create a new descriptor for the specified subprogram.
LLVM_ABI DITypeArray getOrCreateTypeArray(ArrayRef< Metadata * > Elements)
Get a DITypeArray, create one if required.
Subprogram description. Uses SubclassData1.
Type array for a subprogram.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
A debug info location.
Definition DebugLoc.h:123
Diagnostic information for unsupported feature in backend.
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void setSubprogram(DISubprogram *SP)
Set the attached subprogram.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:168
void setCallingConv(CallingConv::ID CC)
Definition Function.h:276
LLVM_ABI void setSection(StringRef S)
Change the section for this global.
Definition Globals.cpp:284
LinkageTypes getLinkage() const
Module * getParent()
Get the module that this global value is contained inside of...
void setDSOLocal(bool Local)
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ ExternalWeakLinkage
ExternalWeak linkage description.
Definition GlobalValue.h:62
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
Machine Value Type.
SimpleValueType SimpleTy
static auto integer_valuetypes()
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool hasAddressTaken() const
Test whether this block is used as something other than the target of a terminator,...
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
Flags
Flags values. These may be or'd together.
const GlobalValue * getGlobal() const
const BlockAddress * getBlockAddress() const
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
bool isBlockAddress() const
isBlockAddress - Tests if this is a MO_BlockAddress operand.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
const SDValue & getOperand(unsigned Num) const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
Represent a constant reference to a string, i.e.
Definition StringRef.h:56
constexpr size_t size() const
Get the string size.
Definition StringRef.h:144
TargetInstrInfo - Interface to description of machine instruction set.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi, SelectionDAG &DAG) const
Expand shift-by-parts.
Primary interface to the complete machine description for the target machine.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:286
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:201
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
self_iterator getIterator()
Definition ilist_node.h:123
A raw_ostream that writes to an std::string.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition CallingConv.h:66
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:819
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:275
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:779
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:853
@ GlobalAddress
Definition ISDOpcodes.h:88
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:280
@ CTLZ_ZERO_POISON
Definition ISDOpcodes.h:788
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:844
@ BR_CC
BR_CC - Conditional branch.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:796
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:704
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:850
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition ISDOpcodes.h:811
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:888
@ TRAP
TRAP - Trapping instruction.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ CTTZ_ZERO_POISON
Bit counting operators with a poisoned result for zero inputs.
Definition ISDOpcodes.h:787
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:856
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition ISDOpcodes.h:833
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition ISDOpcodes.h:62
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
@ Dead
Unused definition.
@ EarlyClobber
Register definition happens before uses.
@ Define
Register definition.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
Definition STLExtras.h:1151
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:876
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:145
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:381
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:324
void print(raw_ostream &OS) const
Implement operator<<.
Definition ValueTypes.h:512
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition ValueTypes.h:160
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs