LLVM 22.0.0git
VPlanPatternMatch.h
Go to the documentation of this file.
1//===- VPlanPatternMatch.h - Match on VPValues and recipes ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides a simple and efficient mechanism for performing general
10// tree-based pattern matches on the VPlan values and recipes, based on
11// LLVM's IR pattern matchers.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_TRANSFORM_VECTORIZE_VPLANPATTERNMATCH_H
16#define LLVM_TRANSFORM_VECTORIZE_VPLANPATTERNMATCH_H
17
18#include "VPlan.h"
19
21
22template <typename Val, typename Pattern> bool match(Val *V, const Pattern &P) {
23 return P.match(V);
24}
25
26template <typename Pattern> bool match(VPUser *U, const Pattern &P) {
27 auto *R = dyn_cast<VPRecipeBase>(U);
28 return R && match(R, P);
29}
30
31template <typename Pattern> bool match(VPSingleDefRecipe *R, const Pattern &P) {
32 return P.match(static_cast<const VPRecipeBase *>(R));
33}
34
35template <typename Val, typename Pattern> struct VPMatchFunctor {
36 const Pattern &P;
37 VPMatchFunctor(const Pattern &P) : P(P) {}
38 bool operator()(Val *V) const { return match(V, P); }
39};
40
41/// A match functor that can be used as a UnaryPredicate in functional
42/// algorithms like all_of.
43template <typename Val = VPUser, typename Pattern>
47
48template <typename Class> struct class_match {
49 template <typename ITy> bool match(ITy *V) const { return isa<Class>(V); }
50};
51
52/// Match an arbitrary VPValue and ignore it.
54
55template <typename Class> struct bind_ty {
56 Class *&VR;
57
58 bind_ty(Class *&V) : VR(V) {}
59
60 template <typename ITy> bool match(ITy *V) const {
61 if (auto *CV = dyn_cast<Class>(V)) {
62 VR = CV;
63 return true;
64 }
65 return false;
66 }
67};
68
69/// Match a specified VPValue.
71 const VPValue *Val;
72
73 specificval_ty(const VPValue *V) : Val(V) {}
74
75 bool match(VPValue *VPV) const { return VPV == Val; }
76};
77
78inline specificval_ty m_Specific(const VPValue *VPV) { return VPV; }
79
80/// Stores a reference to the VPValue *, not the VPValue * itself,
81/// thus can be used in commutative matchers.
83 VPValue *const &Val;
84
85 deferredval_ty(VPValue *const &V) : Val(V) {}
86
87 bool match(VPValue *const V) const { return V == Val; }
88};
89
90/// Like m_Specific(), but works if the specific value to match is determined
91/// as part of the same match() expression. For example:
92/// m_Mul(m_VPValue(X), m_Specific(X)) is incorrect, because m_Specific() will
93/// bind X before the pattern match starts.
94/// m_Mul(m_VPValue(X), m_Deferred(X)) is correct, and will check against
95/// whichever value m_VPValue(X) populated.
96inline deferredval_ty m_Deferred(VPValue *const &V) { return V; }
97
98/// Match an integer constant or vector of constants if Pred::isValue returns
99/// true for the APInt. \p BitWidth optionally specifies the bitwidth the
100/// matched constant must have. If it is 0, the matched constant can have any
101/// bitwidth.
102template <typename Pred, unsigned BitWidth = 0> struct int_pred_ty {
103 Pred P;
104
105 int_pred_ty(Pred P) : P(std::move(P)) {}
106 int_pred_ty() : P() {}
107
108 bool match(VPValue *VPV) const {
109 auto *VPI = dyn_cast<VPInstruction>(VPV);
110 if (VPI && VPI->getOpcode() == VPInstruction::Broadcast)
111 VPV = VPI->getOperand(0);
112 auto *IRV = dyn_cast<VPIRValue>(VPV);
113 if (!IRV)
114 return false;
115 assert(!IRV->getType()->isVectorTy() && "Unexpected vector live-in");
116 const auto *CI = dyn_cast<ConstantInt>(IRV->getValue());
117 if (!CI)
118 return false;
119
120 if (BitWidth != 0 && CI->getBitWidth() != BitWidth)
121 return false;
122 return P.isValue(CI->getValue());
123 }
124};
125
126/// Match a specified integer value or vector of all elements of that
127/// value. \p BitWidth optionally specifies the bitwidth the matched constant
128/// must have. If it is 0, the matched constant can have any bitwidth.
131
133
134 bool isValue(const APInt &C) const { return APInt::isSameValue(Val, C); }
135};
136
137template <unsigned Bitwidth = 0>
139
143
147
151
153 bool isValue(const APInt &C) const { return C.isAllOnes(); }
154};
155
156/// Match an integer or vector with all bits set.
157/// For vectors, this includes constants with undefined elements.
161
163 bool isValue(const APInt &C) const { return C.isZero(); }
164};
165
166struct is_one {
167 bool isValue(const APInt &C) const { return C.isOne(); }
168};
169
170/// Match an integer 0 or a vector with all elements equal to 0.
171/// For vectors, this includes constants with undefined elements.
175
176/// Match an integer 1 or a vector with all elements equal to 1.
177/// For vectors, this includes constants with undefined elements.
179
181 const APInt *&Res;
182
183 bind_apint(const APInt *&Res) : Res(Res) {}
184
185 bool match(VPValue *VPV) const {
186 auto *IRV = dyn_cast<VPIRValue>(VPV);
187 if (!IRV)
188 return false;
189 assert(!IRV->getType()->isVectorTy() && "Unexpected vector live-in");
190 const auto *CI = dyn_cast<ConstantInt>(IRV->getValue());
191 if (!CI)
192 return false;
193 Res = &CI->getValue();
194 return true;
195 }
196};
197
198inline bind_apint m_APInt(const APInt *&C) { return C; }
199
202
204
205 bool match(VPValue *VPV) const {
206 const APInt *APConst;
207 if (!bind_apint(APConst).match(VPV))
208 return false;
209 if (auto C = APConst->tryZExtValue()) {
210 Res = *C;
211 return true;
212 }
213 return false;
214 }
215};
216
217/// Match a plain integer constant no wider than 64-bits, capturing it if we
218/// match.
220
221/// Matching combinators
222template <typename LTy, typename RTy> struct match_combine_or {
223 LTy L;
224 RTy R;
225
226 match_combine_or(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
227
228 template <typename ITy> bool match(ITy *V) const {
229 return L.match(V) || R.match(V);
230 }
231};
232
233template <typename LTy, typename RTy> struct match_combine_and {
234 LTy L;
235 RTy R;
236
237 match_combine_and(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
238
239 template <typename ITy> bool match(ITy *V) const {
240 return L.match(V) && R.match(V);
241 }
242};
243
244/// Combine two pattern matchers matching L || R
245template <typename LTy, typename RTy>
246inline match_combine_or<LTy, RTy> m_CombineOr(const LTy &L, const RTy &R) {
247 return match_combine_or<LTy, RTy>(L, R);
248}
249
250/// Combine two pattern matchers matching L && R
251template <typename LTy, typename RTy>
252inline match_combine_and<LTy, RTy> m_CombineAnd(const LTy &L, const RTy &R) {
253 return match_combine_and<LTy, RTy>(L, R);
254}
255
256/// Match a VPValue, capturing it if we match.
257inline bind_ty<VPValue> m_VPValue(VPValue *&V) { return V; }
258
259/// Match a VPInstruction, capturing if we match.
261
262template <typename Ops_t, unsigned Opcode, bool Commutative,
263 typename... RecipeTys>
265 Ops_t Ops;
266
267 template <typename... OpTy> Recipe_match(OpTy... Ops) : Ops(Ops...) {
268 static_assert(std::tuple_size<Ops_t>::value == sizeof...(Ops) &&
269 "number of operands in constructor doesn't match Ops_t");
270 static_assert((!Commutative || std::tuple_size<Ops_t>::value == 2) &&
271 "only binary ops can be commutative");
272 }
273
274 bool match(const VPValue *V) const {
275 auto *DefR = V->getDefiningRecipe();
276 return DefR && match(DefR);
277 }
278
279 bool match(const VPSingleDefRecipe *R) const {
280 return match(static_cast<const VPRecipeBase *>(R));
281 }
282
283 bool match(const VPRecipeBase *R) const {
284 if (std::tuple_size_v<Ops_t> == 0) {
285 auto *VPI = dyn_cast<VPInstruction>(R);
286 return VPI && VPI->getOpcode() == Opcode;
287 }
288
289 if ((!matchRecipeAndOpcode<RecipeTys>(R) && ...))
290 return false;
291
292 if (R->getNumOperands() != std::tuple_size_v<Ops_t>) {
293 [[maybe_unused]] auto *RepR = dyn_cast<VPReplicateRecipe>(R);
294 assert((Opcode == Instruction::PHI ||
295 (RepR && std::tuple_size_v<Ops_t> ==
296 RepR->getNumOperands() - RepR->isPredicated())) &&
297 "non-variadic recipe with matched opcode does not have the "
298 "expected number of operands");
299 return false;
300 }
301
302 auto IdxSeq = std::make_index_sequence<std::tuple_size<Ops_t>::value>();
303 if (all_of_tuple_elements(IdxSeq, [R](auto Op, unsigned Idx) {
304 return Op.match(R->getOperand(Idx));
305 }))
306 return true;
307
308 return Commutative &&
309 all_of_tuple_elements(IdxSeq, [R](auto Op, unsigned Idx) {
310 return Op.match(R->getOperand(R->getNumOperands() - Idx - 1));
311 });
312 }
313
314private:
315 template <typename RecipeTy>
316 static bool matchRecipeAndOpcode(const VPRecipeBase *R) {
317 auto *DefR = dyn_cast<RecipeTy>(R);
318 // Check for recipes that do not have opcodes.
319 if constexpr (std::is_same_v<RecipeTy, VPScalarIVStepsRecipe> ||
320 std::is_same_v<RecipeTy, VPCanonicalIVPHIRecipe> ||
321 std::is_same_v<RecipeTy, VPDerivedIVRecipe> ||
322 std::is_same_v<RecipeTy, VPVectorEndPointerRecipe>)
323 return DefR;
324 else
325 return DefR && DefR->getOpcode() == Opcode;
326 }
327
328 /// Helper to check if predicate \p P holds on all tuple elements in Ops using
329 /// the provided index sequence.
330 template <typename Fn, std::size_t... Is>
331 bool all_of_tuple_elements(std::index_sequence<Is...>, Fn P) const {
332 return (P(std::get<Is>(Ops), Is) && ...);
333 }
334};
335
336template <unsigned Opcode, typename... OpTys>
338 Recipe_match<std::tuple<OpTys...>, Opcode, /*Commutative*/ false,
341
342template <unsigned Opcode, typename... OpTys>
344 Recipe_match<std::tuple<OpTys...>, Opcode, /*Commutative*/ true,
346
347template <unsigned Opcode, typename... OpTys>
348using VPInstruction_match = Recipe_match<std::tuple<OpTys...>, Opcode,
349 /*Commutative*/ false, VPInstruction>;
350
351template <unsigned Opcode, typename... OpTys>
352inline VPInstruction_match<Opcode, OpTys...>
353m_VPInstruction(const OpTys &...Ops) {
354 return VPInstruction_match<Opcode, OpTys...>(Ops...);
355}
356
357/// BuildVector is matches only its opcode, w/o matching its operands as the
358/// number of operands is not fixed.
362
363template <typename Op0_t>
365m_Freeze(const Op0_t &Op0) {
367}
368
372
373template <typename Op0_t>
375m_BranchOnCond(const Op0_t &Op0) {
377}
378
383
384template <typename Op0_t, typename Op1_t>
386m_BranchOnTwoConds(const Op0_t &Op0, const Op1_t &Op1) {
388}
389
390template <typename Op0_t>
392m_Broadcast(const Op0_t &Op0) {
394}
395
396template <typename Op0_t>
398m_EVL(const Op0_t &Op0) {
400}
401
402template <typename Op0_t>
407
408template <typename Op0_t, typename Op1_t>
410m_ExtractElement(const Op0_t &Op0, const Op1_t &Op1) {
412}
413
414template <typename Op0_t, typename Op1_t>
416m_ExtractLane(const Op0_t &Op0, const Op1_t &Op1) {
418}
419
420template <typename Op0_t>
425
426template <typename Op0_t>
432}
433
434template <typename Op0_t>
439
440template <typename Op0_t, typename Op1_t, typename Op2_t>
442m_ActiveLaneMask(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
444}
445
449
450template <typename Op0_t, typename Op1_t>
452m_BranchOnCount(const Op0_t &Op0, const Op1_t &Op1) {
454}
455
459
460template <typename Op0_t>
462m_AnyOf(const Op0_t &Op0) {
464}
465
466template <typename Op0_t>
471
472template <typename Op0_t>
474m_LastActiveLane(const Op0_t &Op0) {
476}
477
478template <typename Op0_t>
480m_Reverse(const Op0_t &Op0) {
482}
483
487
488template <unsigned Opcode, typename Op0_t>
489inline AllRecipe_match<Opcode, Op0_t> m_Unary(const Op0_t &Op0) {
491}
492
493template <typename Op0_t>
497
498template <typename Op0_t>
500m_TruncOrSelf(const Op0_t &Op0) {
501 return m_CombineOr(m_Trunc(Op0), Op0);
502}
503
504template <typename Op0_t>
508
509template <typename Op0_t>
513
514template <typename Op0_t>
517m_ZExtOrSExt(const Op0_t &Op0) {
518 return m_CombineOr(m_ZExt(Op0), m_SExt(Op0));
519}
520
521template <typename Op0_t>
523m_ZExtOrSelf(const Op0_t &Op0) {
524 return m_CombineOr(m_ZExt(Op0), Op0);
525}
526
527template <unsigned Opcode, typename Op0_t, typename Op1_t>
529 const Op1_t &Op1) {
531}
532
533template <unsigned Opcode, typename Op0_t, typename Op1_t>
535m_c_Binary(const Op0_t &Op0, const Op1_t &Op1) {
537}
538
539template <typename Op0_t, typename Op1_t>
541 const Op1_t &Op1) {
543}
544
545template <typename Op0_t, typename Op1_t>
547m_c_Add(const Op0_t &Op0, const Op1_t &Op1) {
549}
550
551template <typename Op0_t, typename Op1_t>
553 const Op1_t &Op1) {
555}
556
557template <typename Op0_t, typename Op1_t>
559 const Op1_t &Op1) {
561}
562
563template <typename Op0_t, typename Op1_t>
565m_c_Mul(const Op0_t &Op0, const Op1_t &Op1) {
567}
568
569template <typename Op0_t, typename Op1_t>
571m_UDiv(const Op0_t &Op0, const Op1_t &Op1) {
573}
574
575/// Match a binary AND operation.
576template <typename Op0_t, typename Op1_t>
578m_c_BinaryAnd(const Op0_t &Op0, const Op1_t &Op1) {
580}
581
582/// Match a binary OR operation. Note that while conceptually the operands can
583/// be matched commutatively, \p Commutative defaults to false in line with the
584/// IR-based pattern matching infrastructure. Use m_c_BinaryOr for a commutative
585/// version of the matcher.
586template <typename Op0_t, typename Op1_t>
588m_BinaryOr(const Op0_t &Op0, const Op1_t &Op1) {
590}
591
592template <typename Op0_t, typename Op1_t>
594m_c_BinaryOr(const Op0_t &Op0, const Op1_t &Op1) {
596}
597
598/// Cmp_match is a variant of BinaryRecipe_match that also binds the comparison
599/// predicate. Opcodes must either be Instruction::ICmp or Instruction::FCmp, or
600/// both.
601template <typename Op0_t, typename Op1_t, unsigned... Opcodes>
602struct Cmp_match {
603 static_assert((sizeof...(Opcodes) == 1 || sizeof...(Opcodes) == 2) &&
604 "Expected one or two opcodes");
605 static_assert(
606 ((Opcodes == Instruction::ICmp || Opcodes == Instruction::FCmp) && ...) &&
607 "Expected a compare instruction opcode");
608
610 Op0_t Op0;
612
613 Cmp_match(CmpPredicate &Pred, const Op0_t &Op0, const Op1_t &Op1)
614 : Predicate(&Pred), Op0(Op0), Op1(Op1) {}
615 Cmp_match(const Op0_t &Op0, const Op1_t &Op1) : Op0(Op0), Op1(Op1) {}
616
617 bool match(const VPValue *V) const {
618 auto *DefR = V->getDefiningRecipe();
619 return DefR && match(DefR);
620 }
621
622 bool match(const VPRecipeBase *V) const {
623 if ((m_Binary<Opcodes>(Op0, Op1).match(V) || ...)) {
624 if (Predicate)
625 *Predicate = cast<VPRecipeWithIRFlags>(V)->getPredicate();
626 return true;
627 }
628 return false;
629 }
630};
631
632/// SpecificCmp_match is a variant of Cmp_match that matches the comparison
633/// predicate, instead of binding it.
634template <typename Op0_t, typename Op1_t, unsigned... Opcodes>
637 Op0_t Op0;
639
640 SpecificCmp_match(CmpPredicate Pred, const Op0_t &LHS, const Op1_t &RHS)
641 : Predicate(Pred), Op0(LHS), Op1(RHS) {}
642
643 bool match(const VPValue *V) const {
644 CmpPredicate CurrentPred;
645 return Cmp_match<Op0_t, Op1_t, Opcodes...>(CurrentPred, Op0, Op1)
646 .match(V) &&
648 }
649};
650
651template <typename Op0_t, typename Op1_t>
653 const Op1_t &Op1) {
655}
656
657template <typename Op0_t, typename Op1_t>
658inline Cmp_match<Op0_t, Op1_t, Instruction::ICmp>
659m_ICmp(CmpPredicate &Pred, const Op0_t &Op0, const Op1_t &Op1) {
660 return Cmp_match<Op0_t, Op1_t, Instruction::ICmp>(Pred, Op0, Op1);
661}
662
663template <typename Op0_t, typename Op1_t>
664inline SpecificCmp_match<Op0_t, Op1_t, Instruction::ICmp>
665m_SpecificICmp(CmpPredicate MatchPred, const Op0_t &Op0, const Op1_t &Op1) {
667 Op1);
668}
669
670template <typename Op0_t, typename Op1_t>
671inline Cmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>
672m_Cmp(const Op0_t &Op0, const Op1_t &Op1) {
674 Op1);
675}
676
677template <typename Op0_t, typename Op1_t>
678inline Cmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>
679m_Cmp(CmpPredicate &Pred, const Op0_t &Op0, const Op1_t &Op1) {
681 Pred, Op0, Op1);
682}
683
684template <typename Op0_t, typename Op1_t>
685inline SpecificCmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>
686m_SpecificCmp(CmpPredicate MatchPred, const Op0_t &Op0, const Op1_t &Op1) {
688 MatchPred, Op0, Op1);
689}
690
691template <typename Op0_t, typename Op1_t>
693 Recipe_match<std::tuple<Op0_t, Op1_t>, Instruction::GetElementPtr,
694 /*Commutative*/ false, VPReplicateRecipe, VPWidenGEPRecipe>,
698
699template <typename Op0_t, typename Op1_t>
701 const Op1_t &Op1) {
702 return m_CombineOr(
703 Recipe_match<std::tuple<Op0_t, Op1_t>, Instruction::GetElementPtr,
704 /*Commutative*/ false, VPReplicateRecipe, VPWidenGEPRecipe>(
705 Op0, Op1),
709 Op1)));
710}
711
712template <typename Op0_t, typename Op1_t, typename Op2_t>
714m_Select(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
716 {Op0, Op1, Op2});
717}
718
719template <typename Op0_t>
722 Instruction::Xor, int_pred_ty<is_all_ones>, Op0_t>>
727
728template <typename Op0_t, typename Op1_t>
729inline match_combine_or<
732m_LogicalAnd(const Op0_t &Op0, const Op1_t &Op1) {
733 return m_CombineOr(
735 m_Select(Op0, Op1, m_False()));
736}
737
738template <typename Op0_t, typename Op1_t>
740m_LogicalOr(const Op0_t &Op0, const Op1_t &Op1) {
741 return m_Select(Op0, m_True(), Op1);
742}
743
744template <typename Op0_t, typename Op1_t, typename Op2_t>
746 false, VPScalarIVStepsRecipe>;
747
748template <typename Op0_t, typename Op1_t, typename Op2_t>
750m_ScalarIVSteps(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
751 return VPScalarIVSteps_match<Op0_t, Op1_t, Op2_t>({Op0, Op1, Op2});
752}
753
754template <typename Op0_t, typename Op1_t, typename Op2_t>
757
758template <typename Op0_t, typename Op1_t, typename Op2_t>
760m_DerivedIV(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
761 return VPDerivedIV_match<Op0_t, Op1_t, Op2_t>({Op0, Op1, Op2});
762}
763
764template <typename Addr_t, typename Mask_t> struct Load_match {
765 Addr_t Addr;
766 Mask_t Mask;
767
768 Load_match(Addr_t Addr, Mask_t Mask) : Addr(Addr), Mask(Mask) {}
769
770 template <typename OpTy> bool match(const OpTy *V) const {
771 auto *Load = dyn_cast<VPWidenLoadRecipe>(V);
772 if (!Load || !Addr.match(Load->getAddr()) || !Load->isMasked() ||
773 !Mask.match(Load->getMask()))
774 return false;
775 return true;
776 }
777};
778
779/// Match a (possibly reversed) masked load.
780template <typename Addr_t, typename Mask_t>
781inline Load_match<Addr_t, Mask_t> m_MaskedLoad(const Addr_t &Addr,
782 const Mask_t &Mask) {
783 return Load_match<Addr_t, Mask_t>(Addr, Mask);
784}
785
786template <typename Addr_t, typename Val_t, typename Mask_t> struct Store_match {
787 Addr_t Addr;
788 Val_t Val;
789 Mask_t Mask;
790
791 Store_match(Addr_t Addr, Val_t Val, Mask_t Mask)
792 : Addr(Addr), Val(Val), Mask(Mask) {}
793
794 template <typename OpTy> bool match(const OpTy *V) const {
795 auto *Store = dyn_cast<VPWidenStoreRecipe>(V);
796 if (!Store || !Addr.match(Store->getAddr()) ||
797 !Val.match(Store->getStoredValue()) || !Store->isMasked() ||
798 !Mask.match(Store->getMask()))
799 return false;
800 return true;
801 }
802};
803
804/// Match a (possibly reversed) masked store.
805template <typename Addr_t, typename Val_t, typename Mask_t>
806inline Store_match<Addr_t, Val_t, Mask_t>
807m_MaskedStore(const Addr_t &Addr, const Val_t &Val, const Mask_t &Mask) {
808 return Store_match<Addr_t, Val_t, Mask_t>(Addr, Val, Mask);
809}
810
811template <typename Op0_t, typename Op1_t>
814 /*Commutative*/ false, VPVectorEndPointerRecipe>;
815
816template <typename Op0_t, typename Op1_t>
821
822/// Match a call argument at a given argument index.
823template <typename Opnd_t> struct Argument_match {
824 /// Call argument index to match.
825 unsigned OpI;
826 Opnd_t Val;
827
828 Argument_match(unsigned OpIdx, const Opnd_t &V) : OpI(OpIdx), Val(V) {}
829
830 template <typename OpTy> bool match(OpTy *V) const {
831 if (const auto *R = dyn_cast<VPWidenIntrinsicRecipe>(V))
832 return Val.match(R->getOperand(OpI));
833 if (const auto *R = dyn_cast<VPWidenCallRecipe>(V))
834 return Val.match(R->getOperand(OpI));
835 if (const auto *R = dyn_cast<VPReplicateRecipe>(V))
836 if (R->getOpcode() == Instruction::Call)
837 return Val.match(R->getOperand(OpI));
838 if (const auto *R = dyn_cast<VPInstruction>(V))
839 if (R->getOpcode() == Instruction::Call)
840 return Val.match(R->getOperand(OpI));
841 return false;
842 }
843};
844
845/// Match a call argument.
846template <unsigned OpI, typename Opnd_t>
847inline Argument_match<Opnd_t> m_Argument(const Opnd_t &Op) {
848 return Argument_match<Opnd_t>(OpI, Op);
849}
850
851/// Intrinsic matchers.
853 unsigned ID;
854
855 IntrinsicID_match(Intrinsic::ID IntrID) : ID(IntrID) {}
856
857 template <typename OpTy> bool match(OpTy *V) const {
858 if (const auto *R = dyn_cast<VPWidenIntrinsicRecipe>(V))
859 return R->getVectorIntrinsicID() == ID;
860 if (const auto *R = dyn_cast<VPWidenCallRecipe>(V))
861 return R->getCalledScalarFunction()->getIntrinsicID() == ID;
862
863 auto MatchCalleeIntrinsic = [&](VPValue *CalleeOp) {
864 if (!isa<VPIRValue>(CalleeOp))
865 return false;
866 auto *F = cast<Function>(CalleeOp->getLiveInIRValue());
867 return F->getIntrinsicID() == ID;
868 };
869 if (const auto *R = dyn_cast<VPReplicateRecipe>(V))
870 if (R->getOpcode() == Instruction::Call) {
871 // The mask is always the last operand if predicated.
872 return MatchCalleeIntrinsic(
873 R->getOperand(R->getNumOperands() - 1 - R->isPredicated()));
874 }
875 if (const auto *R = dyn_cast<VPInstruction>(V))
876 if (R->getOpcode() == Instruction::Call)
877 return MatchCalleeIntrinsic(R->getOperand(R->getNumOperands() - 1));
878 return false;
879 }
880};
881
882/// Intrinsic matches are combinations of ID matchers, and argument
883/// matchers. Higher arity matcher are defined recursively in terms of and-ing
884/// them with lower arity matchers. Here's some convenient typedefs for up to
885/// several arguments, and more can be added as needed
886template <typename T0 = void, typename T1 = void, typename T2 = void,
887 typename T3 = void>
888struct m_Intrinsic_Ty;
889template <typename T0> struct m_Intrinsic_Ty<T0> {
891};
892template <typename T0, typename T1> struct m_Intrinsic_Ty<T0, T1> {
893 using Ty =
895};
896template <typename T0, typename T1, typename T2>
901template <typename T0, typename T1, typename T2, typename T3>
906
907/// Match intrinsic calls like this:
908/// m_Intrinsic<Intrinsic::fabs>(m_VPValue(X), ...)
909template <Intrinsic::ID IntrID> inline IntrinsicID_match m_Intrinsic() {
910 return IntrinsicID_match(IntrID);
911}
912
913/// Match intrinsic calls with a runtime intrinsic ID.
915 return IntrinsicID_match(IntrID);
916}
917
918template <Intrinsic::ID IntrID, typename T0>
919inline typename m_Intrinsic_Ty<T0>::Ty m_Intrinsic(const T0 &Op0) {
921}
922
923template <Intrinsic::ID IntrID, typename T0, typename T1>
924inline typename m_Intrinsic_Ty<T0, T1>::Ty m_Intrinsic(const T0 &Op0,
925 const T1 &Op1) {
927}
928
929template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2>
930inline typename m_Intrinsic_Ty<T0, T1, T2>::Ty
931m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2) {
932 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1), m_Argument<2>(Op2));
933}
934
935template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2,
936 typename T3>
938m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3) {
939 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2), m_Argument<3>(Op3));
940}
941
943 template <typename ITy> bool match(ITy *V) const {
945 }
946};
947
949
950template <typename SubPattern_t> struct OneUse_match {
951 SubPattern_t SubPattern;
952
953 OneUse_match(const SubPattern_t &SP) : SubPattern(SP) {}
954
955 template <typename OpTy> bool match(OpTy *V) {
956 return V->hasOneUse() && SubPattern.match(V);
957 }
958};
959
960template <typename T> inline OneUse_match<T> m_OneUse(const T &SubPattern) {
961 return SubPattern;
962}
963
964} // namespace llvm::VPlanPatternMatch
965
966#endif
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define T
#define T1
MachineInstr unsigned OpIdx
#define P(N)
This file contains the declarations of the Vectorization Plan base classes:
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition APInt.h:78
std::optional< uint64_t > tryZExtValue() const
Get zero extended value if possible.
Definition APInt.h:1553
static bool isSameValue(const APInt &I1, const APInt &I2)
Determine if two APInts have the same value, after zero-extending one of them (if needed!...
Definition APInt.h:554
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
A recipe for converting the input value IV value to the corresponding value of an IV with different s...
Definition VPlan.h:3703
This is a concrete Recipe that models a single VPlan-level instruction.
Definition VPlan.h:1034
VPRecipeBase is a base class modeling a sequence of one or more output IR instructions.
Definition VPlan.h:387
VPReplicateRecipe replicates a given instruction producing multiple scalar copies of the original sca...
Definition VPlan.h:2922
A recipe for handling phi nodes of integer and floating-point inductions, producing their scalar valu...
Definition VPlan.h:3771
VPSingleDef is a base class for recipes for modeling a sequence of one or more output IR that define ...
Definition VPlan.h:531
This class augments VPValue with operands which provide the inverse def-use edges from VPValue's user...
Definition VPlanValue.h:229
This is the base class of the VPlan Def/Use graph, used for modeling the data flow into,...
Definition VPlanValue.h:45
A recipe to compute a pointer to the last element of each part of a widened memory access for widened...
Definition VPlan.h:1879
VPWidenCastRecipe is a recipe to create vector cast instructions.
Definition VPlan.h:1570
A recipe for handling GEP instructions.
Definition VPlan.h:1816
VPWidenRecipe is a recipe for producing a widened instruction using the opcode and operands of the re...
Definition VPlan.h:1522
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
VPInstruction_match< VPInstruction::ExtractLastLane, VPInstruction_match< VPInstruction::ExtractLastPart, Op0_t > > m_ExtractLastLaneOfLastPart(const Op0_t &Op0)
AllRecipe_match< Instruction::Select, Op0_t, Op1_t, Op2_t > m_Select(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2)
VPInstruction_match< Instruction::Freeze, Op0_t > m_Freeze(const Op0_t &Op0)
AllRecipe_commutative_match< Instruction::And, Op0_t, Op1_t > m_c_BinaryAnd(const Op0_t &Op0, const Op1_t &Op1)
Match a binary AND operation.
AllRecipe_match< Instruction::ZExt, Op0_t > m_ZExt(const Op0_t &Op0)
AllRecipe_match< Instruction::Or, Op0_t, Op1_t > m_BinaryOr(const Op0_t &Op0, const Op1_t &Op1)
Match a binary OR operation.
int_pred_ty< is_specific_int, Bitwidth > specific_intval
Store_match< Addr_t, Val_t, Mask_t > m_MaskedStore(const Addr_t &Addr, const Val_t &Val, const Mask_t &Mask)
Match a (possibly reversed) masked store.
int_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
SpecificCmp_match< Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp > m_SpecificCmp(CmpPredicate MatchPred, const Op0_t &Op0, const Op1_t &Op1)
match_combine_or< VPInstruction_match< VPInstruction::Not, Op0_t >, AllRecipe_commutative_match< Instruction::Xor, int_pred_ty< is_all_ones >, Op0_t > > m_Not(const Op0_t &Op0)
VPInstruction_match< VPInstruction::AnyOf > m_AnyOf()
int_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
AllRecipe_commutative_match< Opcode, Op0_t, Op1_t > m_c_Binary(const Op0_t &Op0, const Op1_t &Op1)
AllRecipe_commutative_match< Instruction::Add, Op0_t, Op1_t > m_c_Add(const Op0_t &Op0, const Op1_t &Op1)
AllRecipe_commutative_match< Instruction::Or, Op0_t, Op1_t > m_c_BinaryOr(const Op0_t &Op0, const Op1_t &Op1)
VPInstruction_match< VPInstruction::StepVector > m_StepVector()
match_combine_or< AllRecipe_match< Instruction::ZExt, Op0_t >, AllRecipe_match< Instruction::SExt, Op0_t > > m_ZExtOrSExt(const Op0_t &Op0)
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
SpecificCmp_match< Op0_t, Op1_t, Instruction::ICmp > m_SpecificICmp(CmpPredicate MatchPred, const Op0_t &Op0, const Op1_t &Op1)
VPScalarIVSteps_match< Op0_t, Op1_t, Op2_t > m_ScalarIVSteps(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
AllRecipe_match< Instruction::Add, Op0_t, Op1_t > m_Add(const Op0_t &Op0, const Op1_t &Op1)
GEPLikeRecipe_match< Op0_t, Op1_t > m_GetElementPtr(const Op0_t &Op0, const Op1_t &Op1)
Recipe_match< std::tuple< OpTys... >, Opcode, false, VPInstruction > VPInstruction_match
VPInstruction_match< VPInstruction::BranchOnTwoConds > m_BranchOnTwoConds()
AllRecipe_match< Opcode, Op0_t, Op1_t > m_Binary(const Op0_t &Op0, const Op1_t &Op1)
VPInstruction_match< VPInstruction::LastActiveLane, Op0_t > m_LastActiveLane(const Op0_t &Op0)
AllRecipe_match< Opcode, Op0_t > m_Unary(const Op0_t &Op0)
Load_match< Addr_t, Mask_t > m_MaskedLoad(const Addr_t &Addr, const Mask_t &Mask)
Match a (possibly reversed) masked load.
match_combine_or< AllRecipe_match< Instruction::Trunc, Op0_t >, Op0_t > m_TruncOrSelf(const Op0_t &Op0)
AllRecipe_commutative_match< Instruction::Mul, Op0_t, Op1_t > m_c_Mul(const Op0_t &Op0, const Op1_t &Op1)
Cmp_match< Op0_t, Op1_t, Instruction::ICmp > m_ICmp(const Op0_t &Op0, const Op1_t &Op1)
AllRecipe_match< Instruction::Mul, Op0_t, Op1_t > m_Mul(const Op0_t &Op0, const Op1_t &Op1)
specificval_ty m_Specific(const VPValue *VPV)
match_combine_or< Recipe_match< std::tuple< Op0_t, Op1_t >, Instruction::GetElementPtr, false, VPReplicateRecipe, VPWidenGEPRecipe >, match_combine_or< VPInstruction_match< VPInstruction::PtrAdd, Op0_t, Op1_t >, VPInstruction_match< VPInstruction::WidePtrAdd, Op0_t, Op1_t > > > GEPLikeRecipe_match
VPInstruction_match< Instruction::ExtractElement, Op0_t, Op1_t > m_ExtractElement(const Op0_t &Op0, const Op1_t &Op1)
specific_intval< 1 > m_False()
VPDerivedIV_match< Op0_t, Op1_t, Op2_t > m_DerivedIV(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2)
VPMatchFunctor< Val, Pattern > match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
VPInstruction_match< VPInstruction::ExtractLastLane, Op0_t > m_ExtractLastLane(const Op0_t &Op0)
specific_intval< 0 > m_SpecificInt(uint64_t V)
VPInstruction_match< VPInstruction::ActiveLaneMask, Op0_t, Op1_t, Op2_t > m_ActiveLaneMask(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2)
VPInstruction_match< VPInstruction::BranchOnCount > m_BranchOnCount()
Recipe_match< std::tuple< Op0_t, Op1_t, Op2_t >, 0, false, VPDerivedIVRecipe > VPDerivedIV_match
AllRecipe_match< Instruction::Sub, Op0_t, Op1_t > m_Sub(const Op0_t &Op0, const Op1_t &Op1)
AllRecipe_match< Instruction::SExt, Op0_t > m_SExt(const Op0_t &Op0)
specific_intval< 1 > m_True()
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_VPValue(X), ...)
Recipe_match< std::tuple< OpTys... >, Opcode, true, VPWidenRecipe, VPReplicateRecipe, VPInstruction > AllRecipe_commutative_match
deferredval_ty m_Deferred(VPValue *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
VectorEndPointerRecipe_match< Op0_t, Op1_t > m_VecEndPtr(const Op0_t &Op0, const Op1_t &Op1)
VPInstruction_match< VPInstruction::ExtractLastPart, Op0_t > m_ExtractLastPart(const Op0_t &Op0)
VPInstruction_match< VPInstruction::Broadcast, Op0_t > m_Broadcast(const Op0_t &Op0)
bool match(Val *V, const Pattern &P)
class_match< VPValue > m_VPValue()
Match an arbitrary VPValue and ignore it.
OneUse_match< T > m_OneUse(const T &SubPattern)
VPInstruction_match< VPInstruction::ExplicitVectorLength, Op0_t > m_EVL(const Op0_t &Op0)
VPInstruction_match< VPInstruction::BuildVector > m_BuildVector()
BuildVector is matches only its opcode, w/o matching its operands as the number of operands is not fi...
AllRecipe_match< Instruction::Trunc, Op0_t > m_Trunc(const Op0_t &Op0)
VPInstruction_match< VPInstruction::ExtractPenultimateElement, Op0_t > m_ExtractPenultimateElement(const Op0_t &Op0)
Recipe_match< std::tuple< Op0_t, Op1_t >, 0, false, VPVectorEndPointerRecipe > VectorEndPointerRecipe_match
match_combine_or< AllRecipe_match< Instruction::ZExt, Op0_t >, Op0_t > m_ZExtOrSelf(const Op0_t &Op0)
VPInstruction_match< VPInstruction::FirstActiveLane, Op0_t > m_FirstActiveLane(const Op0_t &Op0)
Argument_match< Opnd_t > m_Argument(const Opnd_t &Op)
Match a call argument.
bind_ty< VPInstruction > m_VPInstruction(VPInstruction *&V)
Match a VPInstruction, capturing if we match.
AllRecipe_match< Instruction::UDiv, Op0_t, Op1_t > m_UDiv(const Op0_t &Op0, const Op1_t &Op1)
Recipe_match< std::tuple< Op0_t, Op1_t, Op2_t >, 0, false, VPScalarIVStepsRecipe > VPScalarIVSteps_match
int_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
Recipe_match< std::tuple< OpTys... >, Opcode, false, VPWidenRecipe, VPReplicateRecipe, VPWidenCastRecipe, VPInstruction > AllRecipe_match
VPInstruction_match< VPInstruction::BranchOnCond > m_BranchOnCond()
VPInstruction_match< VPInstruction::ExtractLane, Op0_t, Op1_t > m_ExtractLane(const Op0_t &Op0, const Op1_t &Op1)
bind_apint m_APInt(const APInt *&C)
VPInstruction_match< VPInstruction::Reverse, Op0_t > m_Reverse(const Op0_t &Op0)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
DWARFExpression::Operation Op
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1915
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
Intrinsic matches are combinations of ID matchers, and argument matchers.
Match a call argument at a given argument index.
unsigned OpI
Call argument index to match.
Argument_match(unsigned OpIdx, const Opnd_t &V)
Cmp_match is a variant of BinaryRecipe_match that also binds the comparison predicate.
Cmp_match(CmpPredicate &Pred, const Op0_t &Op0, const Op1_t &Op1)
Cmp_match(const Op0_t &Op0, const Op1_t &Op1)
bool match(const VPValue *V) const
bool match(const VPRecipeBase *V) const
Load_match(Addr_t Addr, Mask_t Mask)
bool match(const VPSingleDefRecipe *R) const
bool match(const VPValue *V) const
bool match(const VPRecipeBase *R) const
SpecificCmp_match is a variant of Cmp_match that matches the comparison predicate,...
SpecificCmp_match(CmpPredicate Pred, const Op0_t &LHS, const Op1_t &RHS)
Store_match(Addr_t Addr, Val_t Val, Mask_t Mask)
Stores a reference to the VPValue *, not the VPValue * itself, thus can be used in commutative matche...
Match an integer constant or vector of constants if Pred::isValue returns true for the APInt.
bool isValue(const APInt &C) const
Match a specified integer value or vector of all elements of that value.
match_combine_and< typename m_Intrinsic_Ty< T0, T1 >::Ty, Argument_match< T2 > > Ty
match_combine_and< typename m_Intrinsic_Ty< T0 >::Ty, Argument_match< T1 > > Ty
match_combine_and< IntrinsicID_match, Argument_match< T0 > > Ty
Intrinsic matches are combinations of ID matchers, and argument matchers.
match_combine_and< typename m_Intrinsic_Ty< T0, T1, T2 >::Ty, Argument_match< T3 > > Ty
match_combine_and(const LTy &Left, const RTy &Right)
match_combine_or(const LTy &Left, const RTy &Right)