22 #define DEBUG_TYPE "gisel-known-bits"
29 "Analysis for ComputingKnownBits",
false,
true)
32 : MF(MF),
MRI(MF.getRegInfo()), TL(*MF.getSubtarget().getTargetLowering()),
37 switch (
MI->getOpcode()) {
38 case TargetOpcode::COPY:
40 case TargetOpcode::G_ASSERT_ALIGN: {
42 int64_t LogAlign =
MI->getOperand(2).getImm();
43 return Align(1ull << LogAlign);
45 case TargetOpcode::G_FRAME_INDEX: {
46 int FrameIdx =
MI->getOperand(1).getIndex();
49 case TargetOpcode::G_INTRINSIC:
50 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
57 assert(
MI.getNumExplicitDefs() == 1 &&
58 "expected single return generic instruction");
72 assert(ComputeKnownBitsCache.empty() &&
"Cache should have been cleared");
76 ComputeKnownBitsCache.clear();
95 <<
"] Computed for: " <<
MI <<
"[" <<
Depth <<
"] Known: 0x"
106 const APInt &DemandedElts,
137 const APInt &DemandedElts,
140 unsigned Opcode =
MI.getOpcode();
153 auto CacheEntry = ComputeKnownBitsCache.find(R);
154 if (CacheEntry != ComputeKnownBitsCache.end()) {
155 Known = CacheEntry->second;
184 case TargetOpcode::G_BUILD_VECTOR: {
187 for (
unsigned i = 0,
e =
MI.getNumOperands() - 1;
i <
e; ++
i) {
188 if (!DemandedElts[
i])
203 case TargetOpcode::COPY:
204 case TargetOpcode::G_PHI:
205 case TargetOpcode::PHI: {
211 assert(
MI.getOperand(0).getSubReg() == 0 &&
"Is this code in SSA?");
224 for (
unsigned Idx = 1; Idx <
MI.getNumOperands(); Idx += 2) {
234 if (SrcReg.
isVirtual() && Src.getSubReg() == 0 &&
238 Depth + (Opcode != TargetOpcode::COPY));
242 if (Known.
One == 0 && Known.
Zero == 0)
252 case TargetOpcode::G_CONSTANT: {
259 case TargetOpcode::G_FRAME_INDEX: {
260 int FrameIdx =
MI.getOperand(1).getIndex();
264 case TargetOpcode::G_SUB: {
273 case TargetOpcode::G_XOR: {
282 case TargetOpcode::G_PTR_ADD: {
291 case TargetOpcode::G_ADD: {
300 case TargetOpcode::G_AND: {
310 case TargetOpcode::G_OR: {
320 case TargetOpcode::G_MUL: {
328 case TargetOpcode::G_SELECT: {
329 computeKnownBitsMin(
MI.getOperand(2).getReg(),
MI.getOperand(3).getReg(),
330 Known, DemandedElts,
Depth + 1);
333 case TargetOpcode::G_SMIN: {
343 case TargetOpcode::G_SMAX: {
353 case TargetOpcode::G_UMIN: {
356 DemandedElts,
Depth + 1);
358 DemandedElts,
Depth + 1);
362 case TargetOpcode::G_UMAX: {
365 DemandedElts,
Depth + 1);
367 DemandedElts,
Depth + 1);
371 case TargetOpcode::G_FCMP:
372 case TargetOpcode::G_ICMP: {
376 Opcode == TargetOpcode::G_FCMP) ==
382 case TargetOpcode::G_SEXT: {
390 case TargetOpcode::G_ASSERT_SEXT:
391 case TargetOpcode::G_SEXT_INREG: {
394 Known = Known.
sextInReg(
MI.getOperand(2).getImm());
397 case TargetOpcode::G_ANYEXT: {
403 case TargetOpcode::G_LOAD: {
411 case TargetOpcode::G_ZEXTLOAD: {
418 case TargetOpcode::G_ASHR: {
427 case TargetOpcode::G_LSHR: {
436 case TargetOpcode::G_SHL: {
445 case TargetOpcode::G_INTTOPTR:
446 case TargetOpcode::G_PTRTOINT:
451 case TargetOpcode::G_ASSERT_ZEXT:
452 case TargetOpcode::G_ZEXT:
453 case TargetOpcode::G_TRUNC: {
456 unsigned SrcBitWidth;
459 if (Opcode == TargetOpcode::G_ASSERT_ZEXT)
460 SrcBitWidth =
MI.getOperand(2).getImm();
466 assert(SrcBitWidth &&
"SrcBitWidth can't be zero");
474 case TargetOpcode::G_ASSERT_ALIGN: {
475 int64_t LogOfAlign =
MI.getOperand(2).getImm();
486 case TargetOpcode::G_MERGE_VALUES: {
487 unsigned NumOps =
MI.getNumOperands();
490 for (
unsigned I = 0;
I != NumOps - 1; ++
I) {
493 DemandedElts,
Depth + 1);
498 case TargetOpcode::G_UNMERGE_VALUES: {
501 unsigned NumOps =
MI.getNumOperands();
502 Register SrcReg =
MI.getOperand(NumOps - 1).getReg();
511 for (; DstIdx != NumOps - 1 &&
MI.getOperand(DstIdx).
getReg() != R;
518 case TargetOpcode::G_BSWAP: {
524 case TargetOpcode::G_BITREVERSE: {
530 case TargetOpcode::G_CTPOP: {
536 unsigned LowBits =
Log2_32(BitsPossiblySet)+1;
542 case TargetOpcode::G_UBFX: {
543 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
553 case TargetOpcode::G_SBFX: {
554 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
566 false,
false, ExtKnown, WidthKnown);
570 case TargetOpcode::G_UADDO:
571 case TargetOpcode::G_UADDE:
572 case TargetOpcode::G_SADDO:
573 case TargetOpcode::G_SADDE:
574 case TargetOpcode::G_USUBO:
575 case TargetOpcode::G_USUBE:
576 case TargetOpcode::G_SSUBO:
577 case TargetOpcode::G_SSUBE:
578 case TargetOpcode::G_UMULO:
579 case TargetOpcode::G_SMULO: {
580 if (
MI.getOperand(1).getReg() == R) {
596 ComputeKnownBitsCache[R] = Known;
600 unsigned GISelKnownBits::computeNumSignBitsMin(
Register Src0,
Register Src1,
601 const APInt &DemandedElts,
605 if (Src1SignBits == 1)
611 const APInt &DemandedElts,
614 unsigned Opcode =
MI.getOpcode();
616 if (Opcode == TargetOpcode::G_CONSTANT)
617 return MI.getOperand(1).getCImm()->getValue().getNumSignBits();
635 unsigned FirstAnswer = 1;
637 case TargetOpcode::COPY: {
639 if (Src.getReg().isVirtual() && Src.getSubReg() == 0 &&
647 case TargetOpcode::G_SEXT: {
653 case TargetOpcode::G_ASSERT_SEXT:
654 case TargetOpcode::G_SEXT_INREG: {
657 unsigned SrcBits =
MI.getOperand(2).getImm();
658 unsigned InRegBits = TyBits - SrcBits + 1;
661 case TargetOpcode::G_SEXTLOAD: {
670 case TargetOpcode::G_ZEXTLOAD: {
679 case TargetOpcode::G_TRUNC: {
687 if (NumSrcSignBits > (NumSrcBits - DstTyBits))
688 return NumSrcSignBits - (NumSrcBits - DstTyBits);
691 case TargetOpcode::G_SELECT: {
692 return computeNumSignBitsMin(
MI.getOperand(2).getReg(),
693 MI.getOperand(3).getReg(), DemandedElts,
696 case TargetOpcode::G_SADDO:
697 case TargetOpcode::G_SADDE:
698 case TargetOpcode::G_UADDO:
699 case TargetOpcode::G_UADDE:
700 case TargetOpcode::G_SSUBO:
701 case TargetOpcode::G_SSUBE:
702 case TargetOpcode::G_USUBO:
703 case TargetOpcode::G_USUBE:
704 case TargetOpcode::G_SMULO:
705 case TargetOpcode::G_UMULO: {
709 if (
MI.getOperand(1).getReg() == R) {
717 case TargetOpcode::G_INTRINSIC:
718 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
723 FirstAnswer =
std::max(FirstAnswer, NumBits);
743 Mask <<=
Mask.getBitWidth() - TyBits;