LLVM 20.0.0git
AArch64TargetTransformInfo.cpp
Go to the documentation of this file.
1//===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10#include "AArch64ExpandImm.h"
20#include "llvm/IR/Intrinsics.h"
21#include "llvm/IR/IntrinsicsAArch64.h"
23#include "llvm/Support/Debug.h"
26#include <algorithm>
27#include <optional>
28using namespace llvm;
29using namespace llvm::PatternMatch;
30
31#define DEBUG_TYPE "aarch64tti"
32
33static cl::opt<bool> EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix",
34 cl::init(true), cl::Hidden);
35
36static cl::opt<unsigned> SVEGatherOverhead("sve-gather-overhead", cl::init(10),
38
39static cl::opt<unsigned> SVEScatterOverhead("sve-scatter-overhead",
40 cl::init(10), cl::Hidden);
41
42static cl::opt<unsigned> SVETailFoldInsnThreshold("sve-tail-folding-insn-threshold",
43 cl::init(15), cl::Hidden);
44
46 NeonNonConstStrideOverhead("neon-nonconst-stride-overhead", cl::init(10),
48
50 "call-penalty-sm-change", cl::init(5), cl::Hidden,
52 "Penalty of calling a function that requires a change to PSTATE.SM"));
53
55 "inline-call-penalty-sm-change", cl::init(10), cl::Hidden,
56 cl::desc("Penalty of inlining a call that requires a change to PSTATE.SM"));
57
58static cl::opt<bool> EnableOrLikeSelectOpt("enable-aarch64-or-like-select",
59 cl::init(true), cl::Hidden);
60
61static cl::opt<bool> EnableLSRCostOpt("enable-aarch64-lsr-cost-opt",
62 cl::init(true), cl::Hidden);
63
64// A complete guess as to a reasonable cost.
66 BaseHistCntCost("aarch64-base-histcnt-cost", cl::init(8), cl::Hidden,
67 cl::desc("The cost of a histcnt instruction"));
68
69namespace {
70class TailFoldingOption {
71 // These bitfields will only ever be set to something non-zero in operator=,
72 // when setting the -sve-tail-folding option. This option should always be of
73 // the form (default|simple|all|disable)[+(Flag1|Flag2|etc)], where here
74 // InitialBits is one of (disabled|all|simple). EnableBits represents
75 // additional flags we're enabling, and DisableBits for those flags we're
76 // disabling. The default flag is tracked in the variable NeedsDefault, since
77 // at the time of setting the option we may not know what the default value
78 // for the CPU is.
79 TailFoldingOpts InitialBits = TailFoldingOpts::Disabled;
80 TailFoldingOpts EnableBits = TailFoldingOpts::Disabled;
81 TailFoldingOpts DisableBits = TailFoldingOpts::Disabled;
82
83 // This value needs to be initialised to true in case the user does not
84 // explicitly set the -sve-tail-folding option.
85 bool NeedsDefault = true;
86
87 void setInitialBits(TailFoldingOpts Bits) { InitialBits = Bits; }
88
89 void setNeedsDefault(bool V) { NeedsDefault = V; }
90
91 void setEnableBit(TailFoldingOpts Bit) {
92 EnableBits |= Bit;
93 DisableBits &= ~Bit;
94 }
95
96 void setDisableBit(TailFoldingOpts Bit) {
97 EnableBits &= ~Bit;
98 DisableBits |= Bit;
99 }
100
101 TailFoldingOpts getBits(TailFoldingOpts DefaultBits) const {
102 TailFoldingOpts Bits = TailFoldingOpts::Disabled;
103
104 assert((InitialBits == TailFoldingOpts::Disabled || !NeedsDefault) &&
105 "Initial bits should only include one of "
106 "(disabled|all|simple|default)");
107 Bits = NeedsDefault ? DefaultBits : InitialBits;
108 Bits |= EnableBits;
109 Bits &= ~DisableBits;
110
111 return Bits;
112 }
113
114 void reportError(std::string Opt) {
115 errs() << "invalid argument '" << Opt
116 << "' to -sve-tail-folding=; the option should be of the form\n"
117 " (disabled|all|default|simple)[+(reductions|recurrences"
118 "|reverse|noreductions|norecurrences|noreverse)]\n";
119 report_fatal_error("Unrecognised tail-folding option");
120 }
121
122public:
123
124 void operator=(const std::string &Val) {
125 // If the user explicitly sets -sve-tail-folding= then treat as an error.
126 if (Val.empty()) {
127 reportError("");
128 return;
129 }
130
131 // Since the user is explicitly setting the option we don't automatically
132 // need the default unless they require it.
133 setNeedsDefault(false);
134
135 SmallVector<StringRef, 4> TailFoldTypes;
136 StringRef(Val).split(TailFoldTypes, '+', -1, false);
137
138 unsigned StartIdx = 1;
139 if (TailFoldTypes[0] == "disabled")
140 setInitialBits(TailFoldingOpts::Disabled);
141 else if (TailFoldTypes[0] == "all")
142 setInitialBits(TailFoldingOpts::All);
143 else if (TailFoldTypes[0] == "default")
144 setNeedsDefault(true);
145 else if (TailFoldTypes[0] == "simple")
146 setInitialBits(TailFoldingOpts::Simple);
147 else {
148 StartIdx = 0;
149 setInitialBits(TailFoldingOpts::Disabled);
150 }
151
152 for (unsigned I = StartIdx; I < TailFoldTypes.size(); I++) {
153 if (TailFoldTypes[I] == "reductions")
154 setEnableBit(TailFoldingOpts::Reductions);
155 else if (TailFoldTypes[I] == "recurrences")
156 setEnableBit(TailFoldingOpts::Recurrences);
157 else if (TailFoldTypes[I] == "reverse")
158 setEnableBit(TailFoldingOpts::Reverse);
159 else if (TailFoldTypes[I] == "noreductions")
160 setDisableBit(TailFoldingOpts::Reductions);
161 else if (TailFoldTypes[I] == "norecurrences")
162 setDisableBit(TailFoldingOpts::Recurrences);
163 else if (TailFoldTypes[I] == "noreverse")
164 setDisableBit(TailFoldingOpts::Reverse);
165 else
166 reportError(Val);
167 }
168 }
169
170 bool satisfies(TailFoldingOpts DefaultBits, TailFoldingOpts Required) const {
171 return (getBits(DefaultBits) & Required) == Required;
172 }
173};
174} // namespace
175
176TailFoldingOption TailFoldingOptionLoc;
177
179 "sve-tail-folding",
180 cl::desc(
181 "Control the use of vectorisation using tail-folding for SVE where the"
182 " option is specified in the form (Initial)[+(Flag1|Flag2|...)]:"
183 "\ndisabled (Initial) No loop types will vectorize using "
184 "tail-folding"
185 "\ndefault (Initial) Uses the default tail-folding settings for "
186 "the target CPU"
187 "\nall (Initial) All legal loop types will vectorize using "
188 "tail-folding"
189 "\nsimple (Initial) Use tail-folding for simple loops (not "
190 "reductions or recurrences)"
191 "\nreductions Use tail-folding for loops containing reductions"
192 "\nnoreductions Inverse of above"
193 "\nrecurrences Use tail-folding for loops containing fixed order "
194 "recurrences"
195 "\nnorecurrences Inverse of above"
196 "\nreverse Use tail-folding for loops requiring reversed "
197 "predicates"
198 "\nnoreverse Inverse of above"),
200
201// Experimental option that will only be fully functional when the
202// code-generator is changed to use SVE instead of NEON for all fixed-width
203// operations.
205 "enable-fixedwidth-autovec-in-streaming-mode", cl::init(false), cl::Hidden);
206
207// Experimental option that will only be fully functional when the cost-model
208// and code-generator have been changed to avoid using scalable vector
209// instructions that are not legal in streaming SVE mode.
211 "enable-scalable-autovec-in-streaming-mode", cl::init(false), cl::Hidden);
212
213static bool isSMEABIRoutineCall(const CallInst &CI) {
214 const auto *F = CI.getCalledFunction();
215 return F && StringSwitch<bool>(F->getName())
216 .Case("__arm_sme_state", true)
217 .Case("__arm_tpidr2_save", true)
218 .Case("__arm_tpidr2_restore", true)
219 .Case("__arm_za_disable", true)
220 .Default(false);
221}
222
223/// Returns true if the function has explicit operations that can only be
224/// lowered using incompatible instructions for the selected mode. This also
225/// returns true if the function F may use or modify ZA state.
227 for (const BasicBlock &BB : *F) {
228 for (const Instruction &I : BB) {
229 // Be conservative for now and assume that any call to inline asm or to
230 // intrinsics could could result in non-streaming ops (e.g. calls to
231 // @llvm.aarch64.* or @llvm.gather/scatter intrinsics). We can assume that
232 // all native LLVM instructions can be lowered to compatible instructions.
233 if (isa<CallInst>(I) && !I.isDebugOrPseudoInst() &&
234 (cast<CallInst>(I).isInlineAsm() || isa<IntrinsicInst>(I) ||
235 isSMEABIRoutineCall(cast<CallInst>(I))))
236 return true;
237 }
238 }
239 return false;
240}
241
243 const Function *Callee) const {
244 SMEAttrs CallerAttrs(*Caller), CalleeAttrs(*Callee);
245
246 // When inlining, we should consider the body of the function, not the
247 // interface.
248 if (CalleeAttrs.hasStreamingBody()) {
249 CalleeAttrs.set(SMEAttrs::SM_Compatible, false);
250 CalleeAttrs.set(SMEAttrs::SM_Enabled, true);
251 }
252
253 if (CalleeAttrs.isNewZA())
254 return false;
255
256 if (CallerAttrs.requiresLazySave(CalleeAttrs) ||
257 CallerAttrs.requiresSMChange(CalleeAttrs) ||
258 CallerAttrs.requiresPreservingZT0(CalleeAttrs)) {
259 if (hasPossibleIncompatibleOps(Callee))
260 return false;
261 }
262
263 const TargetMachine &TM = getTLI()->getTargetMachine();
264
265 const FeatureBitset &CallerBits =
266 TM.getSubtargetImpl(*Caller)->getFeatureBits();
267 const FeatureBitset &CalleeBits =
268 TM.getSubtargetImpl(*Callee)->getFeatureBits();
269
270 // Inline a callee if its target-features are a subset of the callers
271 // target-features.
272 return (CallerBits & CalleeBits) == CalleeBits;
273}
274
276 const Function *Caller, const Function *Callee,
277 const ArrayRef<Type *> &Types) const {
278 if (!BaseT::areTypesABICompatible(Caller, Callee, Types))
279 return false;
280
281 // We need to ensure that argument promotion does not attempt to promote
282 // pointers to fixed-length vector types larger than 128 bits like
283 // <8 x float> (and pointers to aggregate types which have such fixed-length
284 // vector type members) into the values of the pointees. Such vector types
285 // are used for SVE VLS but there is no ABI for SVE VLS arguments and the
286 // backend cannot lower such value arguments. The 128-bit fixed-length SVE
287 // types can be safely treated as 128-bit NEON types and they cannot be
288 // distinguished in IR.
289 if (ST->useSVEForFixedLengthVectors() && llvm::any_of(Types, [](Type *Ty) {
290 auto FVTy = dyn_cast<FixedVectorType>(Ty);
291 return FVTy &&
292 FVTy->getScalarSizeInBits() * FVTy->getNumElements() > 128;
293 }))
294 return false;
295
296 return true;
297}
298
299unsigned
301 unsigned DefaultCallPenalty) const {
302 // This function calculates a penalty for executing Call in F.
303 //
304 // There are two ways this function can be called:
305 // (1) F:
306 // call from F -> G (the call here is Call)
307 //
308 // For (1), Call.getCaller() == F, so it will always return a high cost if
309 // a streaming-mode change is required (thus promoting the need to inline the
310 // function)
311 //
312 // (2) F:
313 // call from F -> G (the call here is not Call)
314 // G:
315 // call from G -> H (the call here is Call)
316 //
317 // For (2), if after inlining the body of G into F the call to H requires a
318 // streaming-mode change, and the call to G from F would also require a
319 // streaming-mode change, then there is benefit to do the streaming-mode
320 // change only once and avoid inlining of G into F.
321 SMEAttrs FAttrs(*F);
322 SMEAttrs CalleeAttrs(Call);
323 if (FAttrs.requiresSMChange(CalleeAttrs)) {
324 if (F == Call.getCaller()) // (1)
325 return CallPenaltyChangeSM * DefaultCallPenalty;
326 if (FAttrs.requiresSMChange(SMEAttrs(*Call.getCaller()))) // (2)
327 return InlineCallPenaltyChangeSM * DefaultCallPenalty;
328 }
329
330 return DefaultCallPenalty;
331}
332
337 ST->isNeonAvailable());
338}
339
340/// Calculate the cost of materializing a 64-bit value. This helper
341/// method might only calculate a fraction of a larger immediate. Therefore it
342/// is valid to return a cost of ZERO.
344 // Check if the immediate can be encoded within an instruction.
345 if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
346 return 0;
347
348 if (Val < 0)
349 Val = ~Val;
350
351 // Calculate how many moves we will need to materialize this constant.
354 return Insn.size();
355}
356
357/// Calculate the cost of materializing the given constant.
360 assert(Ty->isIntegerTy());
361
362 unsigned BitSize = Ty->getPrimitiveSizeInBits();
363 if (BitSize == 0)
364 return ~0U;
365
366 // Sign-extend all constants to a multiple of 64-bit.
367 APInt ImmVal = Imm;
368 if (BitSize & 0x3f)
369 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
370
371 // Split the constant into 64-bit chunks and calculate the cost for each
372 // chunk.
374 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
375 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
376 int64_t Val = Tmp.getSExtValue();
377 Cost += getIntImmCost(Val);
378 }
379 // We need at least one instruction to materialze the constant.
380 return std::max<InstructionCost>(1, Cost);
381}
382
384 const APInt &Imm, Type *Ty,
386 Instruction *Inst) {
387 assert(Ty->isIntegerTy());
388
389 unsigned BitSize = Ty->getPrimitiveSizeInBits();
390 // There is no cost model for constants with a bit size of 0. Return TCC_Free
391 // here, so that constant hoisting will ignore this constant.
392 if (BitSize == 0)
393 return TTI::TCC_Free;
394
395 unsigned ImmIdx = ~0U;
396 switch (Opcode) {
397 default:
398 return TTI::TCC_Free;
399 case Instruction::GetElementPtr:
400 // Always hoist the base address of a GetElementPtr.
401 if (Idx == 0)
402 return 2 * TTI::TCC_Basic;
403 return TTI::TCC_Free;
404 case Instruction::Store:
405 ImmIdx = 0;
406 break;
407 case Instruction::Add:
408 case Instruction::Sub:
409 case Instruction::Mul:
410 case Instruction::UDiv:
411 case Instruction::SDiv:
412 case Instruction::URem:
413 case Instruction::SRem:
414 case Instruction::And:
415 case Instruction::Or:
416 case Instruction::Xor:
417 case Instruction::ICmp:
418 ImmIdx = 1;
419 break;
420 // Always return TCC_Free for the shift value of a shift instruction.
421 case Instruction::Shl:
422 case Instruction::LShr:
423 case Instruction::AShr:
424 if (Idx == 1)
425 return TTI::TCC_Free;
426 break;
427 case Instruction::Trunc:
428 case Instruction::ZExt:
429 case Instruction::SExt:
430 case Instruction::IntToPtr:
431 case Instruction::PtrToInt:
432 case Instruction::BitCast:
433 case Instruction::PHI:
434 case Instruction::Call:
435 case Instruction::Select:
436 case Instruction::Ret:
437 case Instruction::Load:
438 break;
439 }
440
441 if (Idx == ImmIdx) {
442 int NumConstants = (BitSize + 63) / 64;
444 return (Cost <= NumConstants * TTI::TCC_Basic)
445 ? static_cast<int>(TTI::TCC_Free)
446 : Cost;
447 }
449}
450
453 const APInt &Imm, Type *Ty,
455 assert(Ty->isIntegerTy());
456
457 unsigned BitSize = Ty->getPrimitiveSizeInBits();
458 // There is no cost model for constants with a bit size of 0. Return TCC_Free
459 // here, so that constant hoisting will ignore this constant.
460 if (BitSize == 0)
461 return TTI::TCC_Free;
462
463 // Most (all?) AArch64 intrinsics do not support folding immediates into the
464 // selected instruction, so we compute the materialization cost for the
465 // immediate directly.
466 if (IID >= Intrinsic::aarch64_addg && IID <= Intrinsic::aarch64_udiv)
468
469 switch (IID) {
470 default:
471 return TTI::TCC_Free;
472 case Intrinsic::sadd_with_overflow:
473 case Intrinsic::uadd_with_overflow:
474 case Intrinsic::ssub_with_overflow:
475 case Intrinsic::usub_with_overflow:
476 case Intrinsic::smul_with_overflow:
477 case Intrinsic::umul_with_overflow:
478 if (Idx == 1) {
479 int NumConstants = (BitSize + 63) / 64;
481 return (Cost <= NumConstants * TTI::TCC_Basic)
482 ? static_cast<int>(TTI::TCC_Free)
483 : Cost;
484 }
485 break;
486 case Intrinsic::experimental_stackmap:
487 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
488 return TTI::TCC_Free;
489 break;
490 case Intrinsic::experimental_patchpoint_void:
491 case Intrinsic::experimental_patchpoint:
492 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
493 return TTI::TCC_Free;
494 break;
495 case Intrinsic::experimental_gc_statepoint:
496 if ((Idx < 5) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
497 return TTI::TCC_Free;
498 break;
499 }
501}
502
505 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
506 if (TyWidth == 32 || TyWidth == 64)
508 // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
509 return TTI::PSK_Software;
510}
511
512static bool isUnpackedVectorVT(EVT VecVT) {
513 return VecVT.isScalableVector() &&
515}
516
518 Type *BucketPtrsTy = ICA.getArgTypes()[0]; // Type of vector of pointers
519 Type *EltTy = ICA.getArgTypes()[1]; // Type of bucket elements
520
521 // Only allow (32b and 64b) integers or pointers for now...
522 if ((!EltTy->isIntegerTy() && !EltTy->isPointerTy()) ||
523 (EltTy->getScalarSizeInBits() != 32 &&
524 EltTy->getScalarSizeInBits() != 64))
526
527 // FIXME: Hacky check for legal vector types. We can promote smaller types
528 // but we cannot legalize vectors via splitting for histcnt.
529 // FIXME: We should be able to generate histcnt for fixed-length vectors
530 // using ptrue with a specific VL.
531 if (VectorType *VTy = dyn_cast<VectorType>(BucketPtrsTy))
532 if ((VTy->getElementCount().getKnownMinValue() != 2 &&
533 VTy->getElementCount().getKnownMinValue() != 4) ||
534 VTy->getPrimitiveSizeInBits().getKnownMinValue() > 128 ||
535 !VTy->isScalableTy())
537
539}
540
544 // The code-generator is currently not able to handle scalable vectors
545 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
546 // it. This change will be removed when code-generation for these types is
547 // sufficiently reliable.
548 auto *RetTy = ICA.getReturnType();
549 if (auto *VTy = dyn_cast<ScalableVectorType>(RetTy))
550 if (VTy->getElementCount() == ElementCount::getScalable(1))
552
553 switch (ICA.getID()) {
554 case Intrinsic::experimental_vector_histogram_add:
555 if (!ST->hasSVE2())
557 return getHistogramCost(ICA);
558 case Intrinsic::umin:
559 case Intrinsic::umax:
560 case Intrinsic::smin:
561 case Intrinsic::smax: {
562 static const auto ValidMinMaxTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16,
563 MVT::v8i16, MVT::v2i32, MVT::v4i32,
564 MVT::nxv16i8, MVT::nxv8i16, MVT::nxv4i32,
565 MVT::nxv2i64};
567 // v2i64 types get converted to cmp+bif hence the cost of 2
568 if (LT.second == MVT::v2i64)
569 return LT.first * 2;
570 if (any_of(ValidMinMaxTys, [&LT](MVT M) { return M == LT.second; }))
571 return LT.first;
572 break;
573 }
574 case Intrinsic::sadd_sat:
575 case Intrinsic::ssub_sat:
576 case Intrinsic::uadd_sat:
577 case Intrinsic::usub_sat: {
578 static const auto ValidSatTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16,
579 MVT::v8i16, MVT::v2i32, MVT::v4i32,
580 MVT::v2i64};
582 // This is a base cost of 1 for the vadd, plus 3 extract shifts if we
583 // need to extend the type, as it uses shr(qadd(shl, shl)).
584 unsigned Instrs =
585 LT.second.getScalarSizeInBits() == RetTy->getScalarSizeInBits() ? 1 : 4;
586 if (any_of(ValidSatTys, [&LT](MVT M) { return M == LT.second; }))
587 return LT.first * Instrs;
588 break;
589 }
590 case Intrinsic::abs: {
591 static const auto ValidAbsTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16,
592 MVT::v8i16, MVT::v2i32, MVT::v4i32,
593 MVT::v2i64};
595 if (any_of(ValidAbsTys, [&LT](MVT M) { return M == LT.second; }))
596 return LT.first;
597 break;
598 }
599 case Intrinsic::bswap: {
600 static const auto ValidAbsTys = {MVT::v4i16, MVT::v8i16, MVT::v2i32,
601 MVT::v4i32, MVT::v2i64};
603 if (any_of(ValidAbsTys, [&LT](MVT M) { return M == LT.second; }) &&
604 LT.second.getScalarSizeInBits() == RetTy->getScalarSizeInBits())
605 return LT.first;
606 break;
607 }
608 case Intrinsic::experimental_stepvector: {
609 InstructionCost Cost = 1; // Cost of the `index' instruction
611 // Legalisation of illegal vectors involves an `index' instruction plus
612 // (LT.first - 1) vector adds.
613 if (LT.first > 1) {
614 Type *LegalVTy = EVT(LT.second).getTypeForEVT(RetTy->getContext());
615 InstructionCost AddCost =
616 getArithmeticInstrCost(Instruction::Add, LegalVTy, CostKind);
617 Cost += AddCost * (LT.first - 1);
618 }
619 return Cost;
620 }
621 case Intrinsic::vector_extract:
622 case Intrinsic::vector_insert: {
623 // If both the vector and subvector types are legal types and the index
624 // is 0, then this should be a no-op or simple operation; return a
625 // relatively low cost.
626
627 // If arguments aren't actually supplied, then we cannot determine the
628 // value of the index. We also want to skip predicate types.
629 if (ICA.getArgs().size() != ICA.getArgTypes().size() ||
631 break;
632
633 LLVMContext &C = RetTy->getContext();
634 EVT VecVT = getTLI()->getValueType(DL, ICA.getArgTypes()[0]);
635 bool IsExtract = ICA.getID() == Intrinsic::vector_extract;
636 EVT SubVecVT = IsExtract ? getTLI()->getValueType(DL, RetTy)
637 : getTLI()->getValueType(DL, ICA.getArgTypes()[1]);
638 // Skip this if either the vector or subvector types are unpacked
639 // SVE types; they may get lowered to stack stores and loads.
640 if (isUnpackedVectorVT(VecVT) || isUnpackedVectorVT(SubVecVT))
641 break;
642
644 getTLI()->getTypeConversion(C, SubVecVT);
646 getTLI()->getTypeConversion(C, VecVT);
647 const Value *Idx = IsExtract ? ICA.getArgs()[1] : ICA.getArgs()[2];
648 const ConstantInt *CIdx = cast<ConstantInt>(Idx);
649 if (SubVecLK.first == TargetLoweringBase::TypeLegal &&
650 VecLK.first == TargetLoweringBase::TypeLegal && CIdx->isZero())
651 return TTI::TCC_Free;
652 break;
653 }
654 case Intrinsic::bitreverse: {
655 static const CostTblEntry BitreverseTbl[] = {
656 {Intrinsic::bitreverse, MVT::i32, 1},
657 {Intrinsic::bitreverse, MVT::i64, 1},
658 {Intrinsic::bitreverse, MVT::v8i8, 1},
659 {Intrinsic::bitreverse, MVT::v16i8, 1},
660 {Intrinsic::bitreverse, MVT::v4i16, 2},
661 {Intrinsic::bitreverse, MVT::v8i16, 2},
662 {Intrinsic::bitreverse, MVT::v2i32, 2},
663 {Intrinsic::bitreverse, MVT::v4i32, 2},
664 {Intrinsic::bitreverse, MVT::v1i64, 2},
665 {Intrinsic::bitreverse, MVT::v2i64, 2},
666 };
667 const auto LegalisationCost = getTypeLegalizationCost(RetTy);
668 const auto *Entry =
669 CostTableLookup(BitreverseTbl, ICA.getID(), LegalisationCost.second);
670 if (Entry) {
671 // Cost Model is using the legal type(i32) that i8 and i16 will be
672 // converted to +1 so that we match the actual lowering cost
673 if (TLI->getValueType(DL, RetTy, true) == MVT::i8 ||
674 TLI->getValueType(DL, RetTy, true) == MVT::i16)
675 return LegalisationCost.first * Entry->Cost + 1;
676
677 return LegalisationCost.first * Entry->Cost;
678 }
679 break;
680 }
681 case Intrinsic::ctpop: {
682 if (!ST->hasNEON()) {
683 // 32-bit or 64-bit ctpop without NEON is 12 instructions.
684 return getTypeLegalizationCost(RetTy).first * 12;
685 }
686 static const CostTblEntry CtpopCostTbl[] = {
687 {ISD::CTPOP, MVT::v2i64, 4},
688 {ISD::CTPOP, MVT::v4i32, 3},
689 {ISD::CTPOP, MVT::v8i16, 2},
690 {ISD::CTPOP, MVT::v16i8, 1},
691 {ISD::CTPOP, MVT::i64, 4},
692 {ISD::CTPOP, MVT::v2i32, 3},
693 {ISD::CTPOP, MVT::v4i16, 2},
694 {ISD::CTPOP, MVT::v8i8, 1},
695 {ISD::CTPOP, MVT::i32, 5},
696 };
698 MVT MTy = LT.second;
699 if (const auto *Entry = CostTableLookup(CtpopCostTbl, ISD::CTPOP, MTy)) {
700 // Extra cost of +1 when illegal vector types are legalized by promoting
701 // the integer type.
702 int ExtraCost = MTy.isVector() && MTy.getScalarSizeInBits() !=
703 RetTy->getScalarSizeInBits()
704 ? 1
705 : 0;
706 return LT.first * Entry->Cost + ExtraCost;
707 }
708 break;
709 }
710 case Intrinsic::sadd_with_overflow:
711 case Intrinsic::uadd_with_overflow:
712 case Intrinsic::ssub_with_overflow:
713 case Intrinsic::usub_with_overflow:
714 case Intrinsic::smul_with_overflow:
715 case Intrinsic::umul_with_overflow: {
716 static const CostTblEntry WithOverflowCostTbl[] = {
717 {Intrinsic::sadd_with_overflow, MVT::i8, 3},
718 {Intrinsic::uadd_with_overflow, MVT::i8, 3},
719 {Intrinsic::sadd_with_overflow, MVT::i16, 3},
720 {Intrinsic::uadd_with_overflow, MVT::i16, 3},
721 {Intrinsic::sadd_with_overflow, MVT::i32, 1},
722 {Intrinsic::uadd_with_overflow, MVT::i32, 1},
723 {Intrinsic::sadd_with_overflow, MVT::i64, 1},
724 {Intrinsic::uadd_with_overflow, MVT::i64, 1},
725 {Intrinsic::ssub_with_overflow, MVT::i8, 3},
726 {Intrinsic::usub_with_overflow, MVT::i8, 3},
727 {Intrinsic::ssub_with_overflow, MVT::i16, 3},
728 {Intrinsic::usub_with_overflow, MVT::i16, 3},
729 {Intrinsic::ssub_with_overflow, MVT::i32, 1},
730 {Intrinsic::usub_with_overflow, MVT::i32, 1},
731 {Intrinsic::ssub_with_overflow, MVT::i64, 1},
732 {Intrinsic::usub_with_overflow, MVT::i64, 1},
733 {Intrinsic::smul_with_overflow, MVT::i8, 5},
734 {Intrinsic::umul_with_overflow, MVT::i8, 4},
735 {Intrinsic::smul_with_overflow, MVT::i16, 5},
736 {Intrinsic::umul_with_overflow, MVT::i16, 4},
737 {Intrinsic::smul_with_overflow, MVT::i32, 2}, // eg umull;tst
738 {Intrinsic::umul_with_overflow, MVT::i32, 2}, // eg umull;cmp sxtw
739 {Intrinsic::smul_with_overflow, MVT::i64, 3}, // eg mul;smulh;cmp
740 {Intrinsic::umul_with_overflow, MVT::i64, 3}, // eg mul;umulh;cmp asr
741 };
742 EVT MTy = TLI->getValueType(DL, RetTy->getContainedType(0), true);
743 if (MTy.isSimple())
744 if (const auto *Entry = CostTableLookup(WithOverflowCostTbl, ICA.getID(),
745 MTy.getSimpleVT()))
746 return Entry->Cost;
747 break;
748 }
749 case Intrinsic::fptosi_sat:
750 case Intrinsic::fptoui_sat: {
751 if (ICA.getArgTypes().empty())
752 break;
753 bool IsSigned = ICA.getID() == Intrinsic::fptosi_sat;
754 auto LT = getTypeLegalizationCost(ICA.getArgTypes()[0]);
755 EVT MTy = TLI->getValueType(DL, RetTy);
756 // Check for the legal types, which are where the size of the input and the
757 // output are the same, or we are using cvt f64->i32 or f32->i64.
758 if ((LT.second == MVT::f32 || LT.second == MVT::f64 ||
759 LT.second == MVT::v2f32 || LT.second == MVT::v4f32 ||
760 LT.second == MVT::v2f64)) {
761 if ((LT.second.getScalarSizeInBits() == MTy.getScalarSizeInBits() ||
762 (LT.second == MVT::f64 && MTy == MVT::i32) ||
763 (LT.second == MVT::f32 && MTy == MVT::i64)))
764 return LT.first;
765 // Extending vector types v2f32->v2i64, fcvtl*2 + fcvt*2
766 if (LT.second.getScalarType() == MVT::f32 && MTy.isFixedLengthVector() &&
767 MTy.getScalarSizeInBits() == 64)
768 return LT.first * (MTy.getVectorNumElements() > 2 ? 4 : 2);
769 }
770 // Similarly for fp16 sizes. Without FullFP16 we generally need to fcvt to
771 // f32.
772 if (LT.second.getScalarType() == MVT::f16 && !ST->hasFullFP16())
773 return LT.first + getIntrinsicInstrCost(
774 {ICA.getID(),
775 RetTy,
776 {ICA.getArgTypes()[0]->getWithNewType(
777 Type::getFloatTy(RetTy->getContext()))}},
778 CostKind);
779 if ((LT.second == MVT::f16 && MTy == MVT::i32) ||
780 (LT.second == MVT::f16 && MTy == MVT::i64) ||
781 ((LT.second == MVT::v4f16 || LT.second == MVT::v8f16) &&
782 (LT.second.getScalarSizeInBits() == MTy.getScalarSizeInBits())))
783 return LT.first;
784 // Extending vector types v8f16->v8i32, fcvtl*2 + fcvt*2
785 if (LT.second.getScalarType() == MVT::f16 && MTy.isFixedLengthVector() &&
786 MTy.getScalarSizeInBits() == 32)
787 return LT.first * (MTy.getVectorNumElements() > 4 ? 4 : 2);
788 // Extending vector types v8f16->v8i32. These current scalarize but the
789 // codegen could be better.
790 if (LT.second.getScalarType() == MVT::f16 && MTy.isFixedLengthVector() &&
791 MTy.getScalarSizeInBits() == 64)
792 return MTy.getVectorNumElements() * 3;
793
794 // If we can we use a legal convert followed by a min+max
795 if ((LT.second.getScalarType() == MVT::f32 ||
796 LT.second.getScalarType() == MVT::f64 ||
797 LT.second.getScalarType() == MVT::f16) &&
798 LT.second.getScalarSizeInBits() >= MTy.getScalarSizeInBits()) {
799 Type *LegalTy =
800 Type::getIntNTy(RetTy->getContext(), LT.second.getScalarSizeInBits());
801 if (LT.second.isVector())
802 LegalTy = VectorType::get(LegalTy, LT.second.getVectorElementCount());
804 IntrinsicCostAttributes Attrs1(IsSigned ? Intrinsic::smin : Intrinsic::umin,
805 LegalTy, {LegalTy, LegalTy});
807 IntrinsicCostAttributes Attrs2(IsSigned ? Intrinsic::smax : Intrinsic::umax,
808 LegalTy, {LegalTy, LegalTy});
810 return LT.first * Cost +
811 ((LT.second.getScalarType() != MVT::f16 || ST->hasFullFP16()) ? 0
812 : 1);
813 }
814 // Otherwise we need to follow the default expansion that clamps the value
815 // using a float min/max with a fcmp+sel for nan handling when signed.
816 Type *FPTy = ICA.getArgTypes()[0]->getScalarType();
817 RetTy = RetTy->getScalarType();
818 if (LT.second.isVector()) {
819 FPTy = VectorType::get(FPTy, LT.second.getVectorElementCount());
820 RetTy = VectorType::get(RetTy, LT.second.getVectorElementCount());
821 }
822 IntrinsicCostAttributes Attrs1(Intrinsic::minnum, FPTy, {FPTy, FPTy});
824 IntrinsicCostAttributes Attrs2(Intrinsic::maxnum, FPTy, {FPTy, FPTy});
826 Cost +=
827 getCastInstrCost(IsSigned ? Instruction::FPToSI : Instruction::FPToUI,
829 if (IsSigned) {
830 Type *CondTy = RetTy->getWithNewBitWidth(1);
831 Cost += getCmpSelInstrCost(BinaryOperator::FCmp, FPTy, CondTy,
833 Cost += getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
835 }
836 return LT.first * Cost;
837 }
838 case Intrinsic::fshl:
839 case Intrinsic::fshr: {
840 if (ICA.getArgs().empty())
841 break;
842
843 // TODO: Add handling for fshl where third argument is not a constant.
844 const TTI::OperandValueInfo OpInfoZ = TTI::getOperandInfo(ICA.getArgs()[2]);
845 if (!OpInfoZ.isConstant())
846 break;
847
848 const auto LegalisationCost = getTypeLegalizationCost(RetTy);
849 if (OpInfoZ.isUniform()) {
850 // FIXME: The costs could be lower if the codegen is better.
851 static const CostTblEntry FshlTbl[] = {
852 {Intrinsic::fshl, MVT::v4i32, 3}, // ushr + shl + orr
853 {Intrinsic::fshl, MVT::v2i64, 3}, {Intrinsic::fshl, MVT::v16i8, 4},
854 {Intrinsic::fshl, MVT::v8i16, 4}, {Intrinsic::fshl, MVT::v2i32, 3},
855 {Intrinsic::fshl, MVT::v8i8, 4}, {Intrinsic::fshl, MVT::v4i16, 4}};
856 // Costs for both fshl & fshr are the same, so just pass Intrinsic::fshl
857 // to avoid having to duplicate the costs.
858 const auto *Entry =
859 CostTableLookup(FshlTbl, Intrinsic::fshl, LegalisationCost.second);
860 if (Entry)
861 return LegalisationCost.first * Entry->Cost;
862 }
863
864 auto TyL = getTypeLegalizationCost(RetTy);
865 if (!RetTy->isIntegerTy())
866 break;
867
868 // Estimate cost manually, as types like i8 and i16 will get promoted to
869 // i32 and CostTableLookup will ignore the extra conversion cost.
870 bool HigherCost = (RetTy->getScalarSizeInBits() != 32 &&
871 RetTy->getScalarSizeInBits() < 64) ||
872 (RetTy->getScalarSizeInBits() % 64 != 0);
873 unsigned ExtraCost = HigherCost ? 1 : 0;
874 if (RetTy->getScalarSizeInBits() == 32 ||
875 RetTy->getScalarSizeInBits() == 64)
876 ExtraCost = 0; // fhsl/fshr for i32 and i64 can be lowered to a single
877 // extr instruction.
878 else if (HigherCost)
879 ExtraCost = 1;
880 else
881 break;
882 return TyL.first + ExtraCost;
883 }
884 case Intrinsic::get_active_lane_mask: {
885 auto *RetTy = dyn_cast<FixedVectorType>(ICA.getReturnType());
886 if (RetTy) {
887 EVT RetVT = getTLI()->getValueType(DL, RetTy);
888 EVT OpVT = getTLI()->getValueType(DL, ICA.getArgTypes()[0]);
889 if (!getTLI()->shouldExpandGetActiveLaneMask(RetVT, OpVT) &&
890 !getTLI()->isTypeLegal(RetVT)) {
891 // We don't have enough context at this point to determine if the mask
892 // is going to be kept live after the block, which will force the vXi1
893 // type to be expanded to legal vectors of integers, e.g. v4i1->v4i32.
894 // For now, we just assume the vectorizer created this intrinsic and
895 // the result will be the input for a PHI. In this case the cost will
896 // be extremely high for fixed-width vectors.
897 // NOTE: getScalarizationOverhead returns a cost that's far too
898 // pessimistic for the actual generated codegen. In reality there are
899 // two instructions generated per lane.
900 return RetTy->getNumElements() * 2;
901 }
902 }
903 break;
904 }
905 default:
906 break;
907 }
909}
910
911/// The function will remove redundant reinterprets casting in the presence
912/// of the control flow
913static std::optional<Instruction *> processPhiNode(InstCombiner &IC,
914 IntrinsicInst &II) {
916 auto RequiredType = II.getType();
917
918 auto *PN = dyn_cast<PHINode>(II.getArgOperand(0));
919 assert(PN && "Expected Phi Node!");
920
921 // Don't create a new Phi unless we can remove the old one.
922 if (!PN->hasOneUse())
923 return std::nullopt;
924
925 for (Value *IncValPhi : PN->incoming_values()) {
926 auto *Reinterpret = dyn_cast<IntrinsicInst>(IncValPhi);
927 if (!Reinterpret ||
928 Reinterpret->getIntrinsicID() !=
929 Intrinsic::aarch64_sve_convert_to_svbool ||
930 RequiredType != Reinterpret->getArgOperand(0)->getType())
931 return std::nullopt;
932 }
933
934 // Create the new Phi
935 IC.Builder.SetInsertPoint(PN);
936 PHINode *NPN = IC.Builder.CreatePHI(RequiredType, PN->getNumIncomingValues());
937 Worklist.push_back(PN);
938
939 for (unsigned I = 0; I < PN->getNumIncomingValues(); I++) {
940 auto *Reinterpret = cast<Instruction>(PN->getIncomingValue(I));
941 NPN->addIncoming(Reinterpret->getOperand(0), PN->getIncomingBlock(I));
942 Worklist.push_back(Reinterpret);
943 }
944
945 // Cleanup Phi Node and reinterprets
946 return IC.replaceInstUsesWith(II, NPN);
947}
948
949// (from_svbool (binop (to_svbool pred) (svbool_t _) (svbool_t _))))
950// => (binop (pred) (from_svbool _) (from_svbool _))
951//
952// The above transformation eliminates a `to_svbool` in the predicate
953// operand of bitwise operation `binop` by narrowing the vector width of
954// the operation. For example, it would convert a `<vscale x 16 x i1>
955// and` into a `<vscale x 4 x i1> and`. This is profitable because
956// to_svbool must zero the new lanes during widening, whereas
957// from_svbool is free.
958static std::optional<Instruction *>
960 auto BinOp = dyn_cast<IntrinsicInst>(II.getOperand(0));
961 if (!BinOp)
962 return std::nullopt;
963
964 auto IntrinsicID = BinOp->getIntrinsicID();
965 switch (IntrinsicID) {
966 case Intrinsic::aarch64_sve_and_z:
967 case Intrinsic::aarch64_sve_bic_z:
968 case Intrinsic::aarch64_sve_eor_z:
969 case Intrinsic::aarch64_sve_nand_z:
970 case Intrinsic::aarch64_sve_nor_z:
971 case Intrinsic::aarch64_sve_orn_z:
972 case Intrinsic::aarch64_sve_orr_z:
973 break;
974 default:
975 return std::nullopt;
976 }
977
978 auto BinOpPred = BinOp->getOperand(0);
979 auto BinOpOp1 = BinOp->getOperand(1);
980 auto BinOpOp2 = BinOp->getOperand(2);
981
982 auto PredIntr = dyn_cast<IntrinsicInst>(BinOpPred);
983 if (!PredIntr ||
984 PredIntr->getIntrinsicID() != Intrinsic::aarch64_sve_convert_to_svbool)
985 return std::nullopt;
986
987 auto PredOp = PredIntr->getOperand(0);
988 auto PredOpTy = cast<VectorType>(PredOp->getType());
989 if (PredOpTy != II.getType())
990 return std::nullopt;
991
992 SmallVector<Value *> NarrowedBinOpArgs = {PredOp};
993 auto NarrowBinOpOp1 = IC.Builder.CreateIntrinsic(
994 Intrinsic::aarch64_sve_convert_from_svbool, {PredOpTy}, {BinOpOp1});
995 NarrowedBinOpArgs.push_back(NarrowBinOpOp1);
996 if (BinOpOp1 == BinOpOp2)
997 NarrowedBinOpArgs.push_back(NarrowBinOpOp1);
998 else
999 NarrowedBinOpArgs.push_back(IC.Builder.CreateIntrinsic(
1000 Intrinsic::aarch64_sve_convert_from_svbool, {PredOpTy}, {BinOpOp2}));
1001
1002 auto NarrowedBinOp =
1003 IC.Builder.CreateIntrinsic(IntrinsicID, {PredOpTy}, NarrowedBinOpArgs);
1004 return IC.replaceInstUsesWith(II, NarrowedBinOp);
1005}
1006
1007static std::optional<Instruction *>
1009 // If the reinterpret instruction operand is a PHI Node
1010 if (isa<PHINode>(II.getArgOperand(0)))
1011 return processPhiNode(IC, II);
1012
1013 if (auto BinOpCombine = tryCombineFromSVBoolBinOp(IC, II))
1014 return BinOpCombine;
1015
1016 // Ignore converts to/from svcount_t.
1017 if (isa<TargetExtType>(II.getArgOperand(0)->getType()) ||
1018 isa<TargetExtType>(II.getType()))
1019 return std::nullopt;
1020
1021 SmallVector<Instruction *, 32> CandidatesForRemoval;
1022 Value *Cursor = II.getOperand(0), *EarliestReplacement = nullptr;
1023
1024 const auto *IVTy = cast<VectorType>(II.getType());
1025
1026 // Walk the chain of conversions.
1027 while (Cursor) {
1028 // If the type of the cursor has fewer lanes than the final result, zeroing
1029 // must take place, which breaks the equivalence chain.
1030 const auto *CursorVTy = cast<VectorType>(Cursor->getType());
1031 if (CursorVTy->getElementCount().getKnownMinValue() <
1032 IVTy->getElementCount().getKnownMinValue())
1033 break;
1034
1035 // If the cursor has the same type as I, it is a viable replacement.
1036 if (Cursor->getType() == IVTy)
1037 EarliestReplacement = Cursor;
1038
1039 auto *IntrinsicCursor = dyn_cast<IntrinsicInst>(Cursor);
1040
1041 // If this is not an SVE conversion intrinsic, this is the end of the chain.
1042 if (!IntrinsicCursor || !(IntrinsicCursor->getIntrinsicID() ==
1043 Intrinsic::aarch64_sve_convert_to_svbool ||
1044 IntrinsicCursor->getIntrinsicID() ==
1045 Intrinsic::aarch64_sve_convert_from_svbool))
1046 break;
1047
1048 CandidatesForRemoval.insert(CandidatesForRemoval.begin(), IntrinsicCursor);
1049 Cursor = IntrinsicCursor->getOperand(0);
1050 }
1051
1052 // If no viable replacement in the conversion chain was found, there is
1053 // nothing to do.
1054 if (!EarliestReplacement)
1055 return std::nullopt;
1056
1057 return IC.replaceInstUsesWith(II, EarliestReplacement);
1058}
1059
1060static bool isAllActivePredicate(Value *Pred) {
1061 // Look through convert.from.svbool(convert.to.svbool(...) chain.
1062 Value *UncastedPred;
1063 if (match(Pred, m_Intrinsic<Intrinsic::aarch64_sve_convert_from_svbool>(
1064 m_Intrinsic<Intrinsic::aarch64_sve_convert_to_svbool>(
1065 m_Value(UncastedPred)))))
1066 // If the predicate has the same or less lanes than the uncasted
1067 // predicate then we know the casting has no effect.
1068 if (cast<ScalableVectorType>(Pred->getType())->getMinNumElements() <=
1069 cast<ScalableVectorType>(UncastedPred->getType())->getMinNumElements())
1070 Pred = UncastedPred;
1071
1072 return match(Pred, m_Intrinsic<Intrinsic::aarch64_sve_ptrue>(
1073 m_ConstantInt<AArch64SVEPredPattern::all>()));
1074}
1075
1076// Erase unary operation where predicate has all inactive lanes
1077static std::optional<Instruction *>
1079 int PredPos) {
1080 if (match(II.getOperand(PredPos), m_ZeroInt())) {
1081 return IC.eraseInstFromFunction(II);
1082 }
1083 return std::nullopt;
1084}
1085
1086// Simplify unary operation where predicate has all inactive lanes by replacing
1087// instruction with zeroed object
1088static std::optional<Instruction *>
1090 if (match(II.getOperand(0), m_ZeroInt())) {
1091 Constant *Node;
1092 Type *RetTy = II.getType();
1093 if (RetTy->isStructTy()) {
1094 auto StructT = cast<StructType>(RetTy);
1095 auto VecT = StructT->getElementType(0);
1097 for (unsigned i = 0; i < StructT->getNumElements(); i++) {
1098 ZerVec.push_back(VecT->isFPOrFPVectorTy() ? ConstantFP::get(VecT, 0.0)
1099 : ConstantInt::get(VecT, 0));
1100 }
1101 Node = ConstantStruct::get(StructT, ZerVec);
1102 } else if (RetTy->isFPOrFPVectorTy())
1103 Node = ConstantFP::get(RetTy, 0.0);
1104 else
1105 Node = ConstantInt::get(II.getType(), 0);
1106
1108 return IC.eraseInstFromFunction(II);
1109 }
1110 return std::nullopt;
1111}
1112
1113static std::optional<Instruction *> instCombineSVESel(InstCombiner &IC,
1114 IntrinsicInst &II) {
1115 // svsel(ptrue, x, y) => x
1116 auto *OpPredicate = II.getOperand(0);
1117 if (isAllActivePredicate(OpPredicate))
1118 return IC.replaceInstUsesWith(II, II.getOperand(1));
1119
1120 auto Select =
1121 IC.Builder.CreateSelect(OpPredicate, II.getOperand(1), II.getOperand(2));
1122 return IC.replaceInstUsesWith(II, Select);
1123}
1124
1125static std::optional<Instruction *> instCombineSVEDup(InstCombiner &IC,
1126 IntrinsicInst &II) {
1127 IntrinsicInst *Pg = dyn_cast<IntrinsicInst>(II.getArgOperand(1));
1128 if (!Pg)
1129 return std::nullopt;
1130
1131 if (Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
1132 return std::nullopt;
1133
1134 const auto PTruePattern =
1135 cast<ConstantInt>(Pg->getOperand(0))->getZExtValue();
1136 if (PTruePattern != AArch64SVEPredPattern::vl1)
1137 return std::nullopt;
1138
1139 // The intrinsic is inserting into lane zero so use an insert instead.
1140 auto *IdxTy = Type::getInt64Ty(II.getContext());
1141 auto *Insert = InsertElementInst::Create(
1142 II.getArgOperand(0), II.getArgOperand(2), ConstantInt::get(IdxTy, 0));
1143 Insert->insertBefore(&II);
1144 Insert->takeName(&II);
1145
1146 return IC.replaceInstUsesWith(II, Insert);
1147}
1148
1149static std::optional<Instruction *> instCombineSVEDupX(InstCombiner &IC,
1150 IntrinsicInst &II) {
1151 // Replace DupX with a regular IR splat.
1152 auto *RetTy = cast<ScalableVectorType>(II.getType());
1153 Value *Splat = IC.Builder.CreateVectorSplat(RetTy->getElementCount(),
1154 II.getArgOperand(0));
1155 Splat->takeName(&II);
1156 return IC.replaceInstUsesWith(II, Splat);
1157}
1158
1159static std::optional<Instruction *> instCombineSVECmpNE(InstCombiner &IC,
1160 IntrinsicInst &II) {
1161 LLVMContext &Ctx = II.getContext();
1162
1163 // Check that the predicate is all active
1164 auto *Pg = dyn_cast<IntrinsicInst>(II.getArgOperand(0));
1165 if (!Pg || Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
1166 return std::nullopt;
1167
1168 const auto PTruePattern =
1169 cast<ConstantInt>(Pg->getOperand(0))->getZExtValue();
1170 if (PTruePattern != AArch64SVEPredPattern::all)
1171 return std::nullopt;
1172
1173 // Check that we have a compare of zero..
1174 auto *SplatValue =
1175 dyn_cast_or_null<ConstantInt>(getSplatValue(II.getArgOperand(2)));
1176 if (!SplatValue || !SplatValue->isZero())
1177 return std::nullopt;
1178
1179 // ..against a dupq
1180 auto *DupQLane = dyn_cast<IntrinsicInst>(II.getArgOperand(1));
1181 if (!DupQLane ||
1182 DupQLane->getIntrinsicID() != Intrinsic::aarch64_sve_dupq_lane)
1183 return std::nullopt;
1184
1185 // Where the dupq is a lane 0 replicate of a vector insert
1186 if (!cast<ConstantInt>(DupQLane->getArgOperand(1))->isZero())
1187 return std::nullopt;
1188
1189 auto *VecIns = dyn_cast<IntrinsicInst>(DupQLane->getArgOperand(0));
1190 if (!VecIns || VecIns->getIntrinsicID() != Intrinsic::vector_insert)
1191 return std::nullopt;
1192
1193 // Where the vector insert is a fixed constant vector insert into undef at
1194 // index zero
1195 if (!isa<UndefValue>(VecIns->getArgOperand(0)))
1196 return std::nullopt;
1197
1198 if (!cast<ConstantInt>(VecIns->getArgOperand(2))->isZero())
1199 return std::nullopt;
1200
1201 auto *ConstVec = dyn_cast<Constant>(VecIns->getArgOperand(1));
1202 if (!ConstVec)
1203 return std::nullopt;
1204
1205 auto *VecTy = dyn_cast<FixedVectorType>(ConstVec->getType());
1206 auto *OutTy = dyn_cast<ScalableVectorType>(II.getType());
1207 if (!VecTy || !OutTy || VecTy->getNumElements() != OutTy->getMinNumElements())
1208 return std::nullopt;
1209
1210 unsigned NumElts = VecTy->getNumElements();
1211 unsigned PredicateBits = 0;
1212
1213 // Expand intrinsic operands to a 16-bit byte level predicate
1214 for (unsigned I = 0; I < NumElts; ++I) {
1215 auto *Arg = dyn_cast<ConstantInt>(ConstVec->getAggregateElement(I));
1216 if (!Arg)
1217 return std::nullopt;
1218 if (!Arg->isZero())
1219 PredicateBits |= 1 << (I * (16 / NumElts));
1220 }
1221
1222 // If all bits are zero bail early with an empty predicate
1223 if (PredicateBits == 0) {
1224 auto *PFalse = Constant::getNullValue(II.getType());
1225 PFalse->takeName(&II);
1226 return IC.replaceInstUsesWith(II, PFalse);
1227 }
1228
1229 // Calculate largest predicate type used (where byte predicate is largest)
1230 unsigned Mask = 8;
1231 for (unsigned I = 0; I < 16; ++I)
1232 if ((PredicateBits & (1 << I)) != 0)
1233 Mask |= (I % 8);
1234
1235 unsigned PredSize = Mask & -Mask;
1236 auto *PredType = ScalableVectorType::get(
1237 Type::getInt1Ty(Ctx), AArch64::SVEBitsPerBlock / (PredSize * 8));
1238
1239 // Ensure all relevant bits are set
1240 for (unsigned I = 0; I < 16; I += PredSize)
1241 if ((PredicateBits & (1 << I)) == 0)
1242 return std::nullopt;
1243
1244 auto *PTruePat =
1245 ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all);
1246 auto *PTrue = IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue,
1247 {PredType}, {PTruePat});
1248 auto *ConvertToSVBool = IC.Builder.CreateIntrinsic(
1249 Intrinsic::aarch64_sve_convert_to_svbool, {PredType}, {PTrue});
1250 auto *ConvertFromSVBool =
1251 IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool,
1252 {II.getType()}, {ConvertToSVBool});
1253
1254 ConvertFromSVBool->takeName(&II);
1255 return IC.replaceInstUsesWith(II, ConvertFromSVBool);
1256}
1257
1258static std::optional<Instruction *> instCombineSVELast(InstCombiner &IC,
1259 IntrinsicInst &II) {
1260 Value *Pg = II.getArgOperand(0);
1261 Value *Vec = II.getArgOperand(1);
1262 auto IntrinsicID = II.getIntrinsicID();
1263 bool IsAfter = IntrinsicID == Intrinsic::aarch64_sve_lasta;
1264
1265 // lastX(splat(X)) --> X
1266 if (auto *SplatVal = getSplatValue(Vec))
1267 return IC.replaceInstUsesWith(II, SplatVal);
1268
1269 // If x and/or y is a splat value then:
1270 // lastX (binop (x, y)) --> binop(lastX(x), lastX(y))
1271 Value *LHS, *RHS;
1272 if (match(Vec, m_OneUse(m_BinOp(m_Value(LHS), m_Value(RHS))))) {
1273 if (isSplatValue(LHS) || isSplatValue(RHS)) {
1274 auto *OldBinOp = cast<BinaryOperator>(Vec);
1275 auto OpC = OldBinOp->getOpcode();
1276 auto *NewLHS =
1277 IC.Builder.CreateIntrinsic(IntrinsicID, {Vec->getType()}, {Pg, LHS});
1278 auto *NewRHS =
1279 IC.Builder.CreateIntrinsic(IntrinsicID, {Vec->getType()}, {Pg, RHS});
1281 OpC, NewLHS, NewRHS, OldBinOp, OldBinOp->getName(), II.getIterator());
1282 return IC.replaceInstUsesWith(II, NewBinOp);
1283 }
1284 }
1285
1286 auto *C = dyn_cast<Constant>(Pg);
1287 if (IsAfter && C && C->isNullValue()) {
1288 // The intrinsic is extracting lane 0 so use an extract instead.
1289 auto *IdxTy = Type::getInt64Ty(II.getContext());
1290 auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, 0));
1291 Extract->insertBefore(&II);
1292 Extract->takeName(&II);
1293 return IC.replaceInstUsesWith(II, Extract);
1294 }
1295
1296 auto *IntrPG = dyn_cast<IntrinsicInst>(Pg);
1297 if (!IntrPG)
1298 return std::nullopt;
1299
1300 if (IntrPG->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
1301 return std::nullopt;
1302
1303 const auto PTruePattern =
1304 cast<ConstantInt>(IntrPG->getOperand(0))->getZExtValue();
1305
1306 // Can the intrinsic's predicate be converted to a known constant index?
1307 unsigned MinNumElts = getNumElementsFromSVEPredPattern(PTruePattern);
1308 if (!MinNumElts)
1309 return std::nullopt;
1310
1311 unsigned Idx = MinNumElts - 1;
1312 // Increment the index if extracting the element after the last active
1313 // predicate element.
1314 if (IsAfter)
1315 ++Idx;
1316
1317 // Ignore extracts whose index is larger than the known minimum vector
1318 // length. NOTE: This is an artificial constraint where we prefer to
1319 // maintain what the user asked for until an alternative is proven faster.
1320 auto *PgVTy = cast<ScalableVectorType>(Pg->getType());
1321 if (Idx >= PgVTy->getMinNumElements())
1322 return std::nullopt;
1323
1324 // The intrinsic is extracting a fixed lane so use an extract instead.
1325 auto *IdxTy = Type::getInt64Ty(II.getContext());
1326 auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, Idx));
1327 Extract->insertBefore(&II);
1328 Extract->takeName(&II);
1329 return IC.replaceInstUsesWith(II, Extract);
1330}
1331
1332static std::optional<Instruction *> instCombineSVECondLast(InstCombiner &IC,
1333 IntrinsicInst &II) {
1334 // The SIMD&FP variant of CLAST[AB] is significantly faster than the scalar
1335 // integer variant across a variety of micro-architectures. Replace scalar
1336 // integer CLAST[AB] intrinsic with optimal SIMD&FP variant. A simple
1337 // bitcast-to-fp + clast[ab] + bitcast-to-int will cost a cycle or two more
1338 // depending on the micro-architecture, but has been observed as generally
1339 // being faster, particularly when the CLAST[AB] op is a loop-carried
1340 // dependency.
1341 Value *Pg = II.getArgOperand(0);
1342 Value *Fallback = II.getArgOperand(1);
1343 Value *Vec = II.getArgOperand(2);
1344 Type *Ty = II.getType();
1345
1346 if (!Ty->isIntegerTy())
1347 return std::nullopt;
1348
1349 Type *FPTy;
1350 switch (cast<IntegerType>(Ty)->getBitWidth()) {
1351 default:
1352 return std::nullopt;
1353 case 16:
1354 FPTy = IC.Builder.getHalfTy();
1355 break;
1356 case 32:
1357 FPTy = IC.Builder.getFloatTy();
1358 break;
1359 case 64:
1360 FPTy = IC.Builder.getDoubleTy();
1361 break;
1362 }
1363
1364 Value *FPFallBack = IC.Builder.CreateBitCast(Fallback, FPTy);
1365 auto *FPVTy = VectorType::get(
1366 FPTy, cast<VectorType>(Vec->getType())->getElementCount());
1367 Value *FPVec = IC.Builder.CreateBitCast(Vec, FPVTy);
1368 auto *FPII = IC.Builder.CreateIntrinsic(
1369 II.getIntrinsicID(), {FPVec->getType()}, {Pg, FPFallBack, FPVec});
1370 Value *FPIItoInt = IC.Builder.CreateBitCast(FPII, II.getType());
1371 return IC.replaceInstUsesWith(II, FPIItoInt);
1372}
1373
1374static std::optional<Instruction *> instCombineRDFFR(InstCombiner &IC,
1375 IntrinsicInst &II) {
1376 LLVMContext &Ctx = II.getContext();
1377 // Replace rdffr with predicated rdffr.z intrinsic, so that optimizePTestInstr
1378 // can work with RDFFR_PP for ptest elimination.
1379 auto *AllPat =
1380 ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all);
1381 auto *PTrue = IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue,
1382 {II.getType()}, {AllPat});
1383 auto *RDFFR =
1384 IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_rdffr_z, {}, {PTrue});
1385 RDFFR->takeName(&II);
1386 return IC.replaceInstUsesWith(II, RDFFR);
1387}
1388
1389static std::optional<Instruction *>
1391 const auto Pattern = cast<ConstantInt>(II.getArgOperand(0))->getZExtValue();
1392
1393 if (Pattern == AArch64SVEPredPattern::all) {
1394 Constant *StepVal = ConstantInt::get(II.getType(), NumElts);
1395 auto *VScale = IC.Builder.CreateVScale(StepVal);
1396 VScale->takeName(&II);
1397 return IC.replaceInstUsesWith(II, VScale);
1398 }
1399
1400 unsigned MinNumElts = getNumElementsFromSVEPredPattern(Pattern);
1401
1402 return MinNumElts && NumElts >= MinNumElts
1403 ? std::optional<Instruction *>(IC.replaceInstUsesWith(
1404 II, ConstantInt::get(II.getType(), MinNumElts)))
1405 : std::nullopt;
1406}
1407
1408static std::optional<Instruction *> instCombineSVEPTest(InstCombiner &IC,
1409 IntrinsicInst &II) {
1410 Value *PgVal = II.getArgOperand(0);
1411 Value *OpVal = II.getArgOperand(1);
1412
1413 // PTEST_<FIRST|LAST>(X, X) is equivalent to PTEST_ANY(X, X).
1414 // Later optimizations prefer this form.
1415 if (PgVal == OpVal &&
1416 (II.getIntrinsicID() == Intrinsic::aarch64_sve_ptest_first ||
1417 II.getIntrinsicID() == Intrinsic::aarch64_sve_ptest_last)) {
1418 Value *Ops[] = {PgVal, OpVal};
1419 Type *Tys[] = {PgVal->getType()};
1420
1421 auto *PTest =
1422 IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptest_any, Tys, Ops);
1423 PTest->takeName(&II);
1424
1425 return IC.replaceInstUsesWith(II, PTest);
1426 }
1427
1428 IntrinsicInst *Pg = dyn_cast<IntrinsicInst>(PgVal);
1429 IntrinsicInst *Op = dyn_cast<IntrinsicInst>(OpVal);
1430
1431 if (!Pg || !Op)
1432 return std::nullopt;
1433
1434 Intrinsic::ID OpIID = Op->getIntrinsicID();
1435
1436 if (Pg->getIntrinsicID() == Intrinsic::aarch64_sve_convert_to_svbool &&
1437 OpIID == Intrinsic::aarch64_sve_convert_to_svbool &&
1438 Pg->getArgOperand(0)->getType() == Op->getArgOperand(0)->getType()) {
1439 Value *Ops[] = {Pg->getArgOperand(0), Op->getArgOperand(0)};
1440 Type *Tys[] = {Pg->getArgOperand(0)->getType()};
1441
1442 auto *PTest = IC.Builder.CreateIntrinsic(II.getIntrinsicID(), Tys, Ops);
1443
1444 PTest->takeName(&II);
1445 return IC.replaceInstUsesWith(II, PTest);
1446 }
1447
1448 // Transform PTEST_ANY(X=OP(PG,...), X) -> PTEST_ANY(PG, X)).
1449 // Later optimizations may rewrite sequence to use the flag-setting variant
1450 // of instruction X to remove PTEST.
1451 if ((Pg == Op) && (II.getIntrinsicID() == Intrinsic::aarch64_sve_ptest_any) &&
1452 ((OpIID == Intrinsic::aarch64_sve_brka_z) ||
1453 (OpIID == Intrinsic::aarch64_sve_brkb_z) ||
1454 (OpIID == Intrinsic::aarch64_sve_brkpa_z) ||
1455 (OpIID == Intrinsic::aarch64_sve_brkpb_z) ||
1456 (OpIID == Intrinsic::aarch64_sve_rdffr_z) ||
1457 (OpIID == Intrinsic::aarch64_sve_and_z) ||
1458 (OpIID == Intrinsic::aarch64_sve_bic_z) ||
1459 (OpIID == Intrinsic::aarch64_sve_eor_z) ||
1460 (OpIID == Intrinsic::aarch64_sve_nand_z) ||
1461 (OpIID == Intrinsic::aarch64_sve_nor_z) ||
1462 (OpIID == Intrinsic::aarch64_sve_orn_z) ||
1463 (OpIID == Intrinsic::aarch64_sve_orr_z))) {
1464 Value *Ops[] = {Pg->getArgOperand(0), Pg};
1465 Type *Tys[] = {Pg->getType()};
1466
1467 auto *PTest = IC.Builder.CreateIntrinsic(II.getIntrinsicID(), Tys, Ops);
1468 PTest->takeName(&II);
1469
1470 return IC.replaceInstUsesWith(II, PTest);
1471 }
1472
1473 return std::nullopt;
1474}
1475
1476template <Intrinsic::ID MulOpc, typename Intrinsic::ID FuseOpc>
1477static std::optional<Instruction *>
1479 bool MergeIntoAddendOp) {
1480 Value *P = II.getOperand(0);
1481 Value *MulOp0, *MulOp1, *AddendOp, *Mul;
1482 if (MergeIntoAddendOp) {
1483 AddendOp = II.getOperand(1);
1484 Mul = II.getOperand(2);
1485 } else {
1486 AddendOp = II.getOperand(2);
1487 Mul = II.getOperand(1);
1488 }
1489
1490 if (!match(Mul, m_Intrinsic<MulOpc>(m_Specific(P), m_Value(MulOp0),
1491 m_Value(MulOp1))))
1492 return std::nullopt;
1493
1494 if (!Mul->hasOneUse())
1495 return std::nullopt;
1496
1497 Instruction *FMFSource = nullptr;
1498 if (II.getType()->isFPOrFPVectorTy()) {
1499 llvm::FastMathFlags FAddFlags = II.getFastMathFlags();
1500 // Stop the combine when the flags on the inputs differ in case dropping
1501 // flags would lead to us missing out on more beneficial optimizations.
1502 if (FAddFlags != cast<CallInst>(Mul)->getFastMathFlags())
1503 return std::nullopt;
1504 if (!FAddFlags.allowContract())
1505 return std::nullopt;
1506 FMFSource = &II;
1507 }
1508
1509 CallInst *Res;
1510 if (MergeIntoAddendOp)
1511 Res = IC.Builder.CreateIntrinsic(FuseOpc, {II.getType()},
1512 {P, AddendOp, MulOp0, MulOp1}, FMFSource);
1513 else
1514 Res = IC.Builder.CreateIntrinsic(FuseOpc, {II.getType()},
1515 {P, MulOp0, MulOp1, AddendOp}, FMFSource);
1516
1517 return IC.replaceInstUsesWith(II, Res);
1518}
1519
1520static std::optional<Instruction *>
1522 Value *Pred = II.getOperand(0);
1523 Value *PtrOp = II.getOperand(1);
1524 Type *VecTy = II.getType();
1525
1526 // Replace by zero constant when all lanes are inactive
1527 if (auto II_NA = instCombineSVENoActiveUnaryZero(IC, II))
1528 return II_NA;
1529
1530 if (isAllActivePredicate(Pred)) {
1531 LoadInst *Load = IC.Builder.CreateLoad(VecTy, PtrOp);
1532 Load->copyMetadata(II);
1533 return IC.replaceInstUsesWith(II, Load);
1534 }
1535
1536 CallInst *MaskedLoad =
1537 IC.Builder.CreateMaskedLoad(VecTy, PtrOp, PtrOp->getPointerAlignment(DL),
1538 Pred, ConstantAggregateZero::get(VecTy));
1539 MaskedLoad->copyMetadata(II);
1540 return IC.replaceInstUsesWith(II, MaskedLoad);
1541}
1542
1543static std::optional<Instruction *>
1545 Value *VecOp = II.getOperand(0);
1546 Value *Pred = II.getOperand(1);
1547 Value *PtrOp = II.getOperand(2);
1548
1549 if (isAllActivePredicate(Pred)) {
1550 StoreInst *Store = IC.Builder.CreateStore(VecOp, PtrOp);
1551 Store->copyMetadata(II);
1552 return IC.eraseInstFromFunction(II);
1553 }
1554
1555 CallInst *MaskedStore = IC.Builder.CreateMaskedStore(
1556 VecOp, PtrOp, PtrOp->getPointerAlignment(DL), Pred);
1557 MaskedStore->copyMetadata(II);
1558 return IC.eraseInstFromFunction(II);
1559}
1560
1562 switch (Intrinsic) {
1563 case Intrinsic::aarch64_sve_fmul_u:
1564 return Instruction::BinaryOps::FMul;
1565 case Intrinsic::aarch64_sve_fadd_u:
1566 return Instruction::BinaryOps::FAdd;
1567 case Intrinsic::aarch64_sve_fsub_u:
1568 return Instruction::BinaryOps::FSub;
1569 default:
1570 return Instruction::BinaryOpsEnd;
1571 }
1572}
1573
1574static std::optional<Instruction *>
1576 // Bail due to missing support for ISD::STRICT_ scalable vector operations.
1577 if (II.isStrictFP())
1578 return std::nullopt;
1579
1580 auto *OpPredicate = II.getOperand(0);
1581 auto BinOpCode = intrinsicIDToBinOpCode(II.getIntrinsicID());
1582 if (BinOpCode == Instruction::BinaryOpsEnd ||
1583 !match(OpPredicate, m_Intrinsic<Intrinsic::aarch64_sve_ptrue>(
1584 m_ConstantInt<AArch64SVEPredPattern::all>())))
1585 return std::nullopt;
1587 IC.Builder.setFastMathFlags(II.getFastMathFlags());
1588 auto BinOp =
1589 IC.Builder.CreateBinOp(BinOpCode, II.getOperand(1), II.getOperand(2));
1590 return IC.replaceInstUsesWith(II, BinOp);
1591}
1592
1593// Canonicalise operations that take an all active predicate (e.g. sve.add ->
1594// sve.add_u).
1595static std::optional<Instruction *> instCombineSVEAllActive(IntrinsicInst &II,
1596 Intrinsic::ID IID) {
1597 auto *OpPredicate = II.getOperand(0);
1598 if (!match(OpPredicate, m_Intrinsic<Intrinsic::aarch64_sve_ptrue>(
1599 m_ConstantInt<AArch64SVEPredPattern::all>())))
1600 return std::nullopt;
1601
1602 auto *Mod = II.getModule();
1603 auto *NewDecl = Intrinsic::getDeclaration(Mod, IID, {II.getType()});
1604 II.setCalledFunction(NewDecl);
1605
1606 return &II;
1607}
1608
1609// Simplify operations where predicate has all inactive lanes or try to replace
1610// with _u form when all lanes are active
1611static std::optional<Instruction *>
1613 Intrinsic::ID IID) {
1614 if (match(II.getOperand(0), m_ZeroInt())) {
1615 // llvm_ir, pred(0), op1, op2 - Spec says to return op1 when all lanes are
1616 // inactive for sv[func]_m
1617 return IC.replaceInstUsesWith(II, II.getOperand(1));
1618 }
1619 return instCombineSVEAllActive(II, IID);
1620}
1621
1622static std::optional<Instruction *> instCombineSVEVectorAdd(InstCombiner &IC,
1623 IntrinsicInst &II) {
1624 if (auto II_U =
1625 instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_add_u))
1626 return II_U;
1627 if (auto MLA = instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul,
1628 Intrinsic::aarch64_sve_mla>(
1629 IC, II, true))
1630 return MLA;
1631 if (auto MAD = instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul,
1632 Intrinsic::aarch64_sve_mad>(
1633 IC, II, false))
1634 return MAD;
1635 return std::nullopt;
1636}
1637
1638static std::optional<Instruction *>
1640 if (auto II_U =
1641 instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fadd_u))
1642 return II_U;
1643 if (auto FMLA =
1644 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
1645 Intrinsic::aarch64_sve_fmla>(IC, II,
1646 true))
1647 return FMLA;
1648 if (auto FMAD =
1649 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
1650 Intrinsic::aarch64_sve_fmad>(IC, II,
1651 false))
1652 return FMAD;
1653 if (auto FMLA =
1654 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul_u,
1655 Intrinsic::aarch64_sve_fmla>(IC, II,
1656 true))
1657 return FMLA;
1658 return std::nullopt;
1659}
1660
1661static std::optional<Instruction *>
1663 if (auto FMLA =
1664 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
1665 Intrinsic::aarch64_sve_fmla>(IC, II,
1666 true))
1667 return FMLA;
1668 if (auto FMAD =
1669 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
1670 Intrinsic::aarch64_sve_fmad>(IC, II,
1671 false))
1672 return FMAD;
1673 if (auto FMLA_U =
1674 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul_u,
1675 Intrinsic::aarch64_sve_fmla_u>(
1676 IC, II, true))
1677 return FMLA_U;
1678 return instCombineSVEVectorBinOp(IC, II);
1679}
1680
1681static std::optional<Instruction *>
1683 if (auto II_U =
1684 instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fsub_u))
1685 return II_U;
1686 if (auto FMLS =
1687 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
1688 Intrinsic::aarch64_sve_fmls>(IC, II,
1689 true))
1690 return FMLS;
1691 if (auto FMSB =
1692 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
1693 Intrinsic::aarch64_sve_fnmsb>(
1694 IC, II, false))
1695 return FMSB;
1696 if (auto FMLS =
1697 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul_u,
1698 Intrinsic::aarch64_sve_fmls>(IC, II,
1699 true))
1700 return FMLS;
1701 return std::nullopt;
1702}
1703
1704static std::optional<Instruction *>
1706 if (auto FMLS =
1707 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
1708 Intrinsic::aarch64_sve_fmls>(IC, II,
1709 true))
1710 return FMLS;
1711 if (auto FMSB =
1712 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
1713 Intrinsic::aarch64_sve_fnmsb>(
1714 IC, II, false))
1715 return FMSB;
1716 if (auto FMLS_U =
1717 instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul_u,
1718 Intrinsic::aarch64_sve_fmls_u>(
1719 IC, II, true))
1720 return FMLS_U;
1721 return instCombineSVEVectorBinOp(IC, II);
1722}
1723
1724static std::optional<Instruction *> instCombineSVEVectorSub(InstCombiner &IC,
1725 IntrinsicInst &II) {
1726 if (auto II_U =
1727 instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_sub_u))
1728 return II_U;
1729 if (auto MLS = instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul,
1730 Intrinsic::aarch64_sve_mls>(
1731 IC, II, true))
1732 return MLS;
1733 return std::nullopt;
1734}
1735
1736static std::optional<Instruction *> instCombineSVEVectorMul(InstCombiner &IC,
1738 Intrinsic::ID IID) {
1739 auto *OpPredicate = II.getOperand(0);
1740 auto *OpMultiplicand = II.getOperand(1);
1741 auto *OpMultiplier = II.getOperand(2);
1742
1743 // Return true if a given instruction is a unit splat value, false otherwise.
1744 auto IsUnitSplat = [](auto *I) {
1745 auto *SplatValue = getSplatValue(I);
1746 if (!SplatValue)
1747 return false;
1748 return match(SplatValue, m_FPOne()) || match(SplatValue, m_One());
1749 };
1750
1751 // Return true if a given instruction is an aarch64_sve_dup intrinsic call
1752 // with a unit splat value, false otherwise.
1753 auto IsUnitDup = [](auto *I) {
1754 auto *IntrI = dyn_cast<IntrinsicInst>(I);
1755 if (!IntrI || IntrI->getIntrinsicID() != Intrinsic::aarch64_sve_dup)
1756 return false;
1757
1758 auto *SplatValue = IntrI->getOperand(2);
1759 return match(SplatValue, m_FPOne()) || match(SplatValue, m_One());
1760 };
1761
1762 if (IsUnitSplat(OpMultiplier)) {
1763 // [f]mul pg %n, (dupx 1) => %n
1764 OpMultiplicand->takeName(&II);
1765 return IC.replaceInstUsesWith(II, OpMultiplicand);
1766 } else if (IsUnitDup(OpMultiplier)) {
1767 // [f]mul pg %n, (dup pg 1) => %n
1768 auto *DupInst = cast<IntrinsicInst>(OpMultiplier);
1769 auto *DupPg = DupInst->getOperand(1);
1770 // TODO: this is naive. The optimization is still valid if DupPg
1771 // 'encompasses' OpPredicate, not only if they're the same predicate.
1772 if (OpPredicate == DupPg) {
1773 OpMultiplicand->takeName(&II);
1774 return IC.replaceInstUsesWith(II, OpMultiplicand);
1775 }
1776 }
1777
1778 return instCombineSVEVectorBinOp(IC, II);
1779}
1780
1781static std::optional<Instruction *> instCombineSVEUnpack(InstCombiner &IC,
1782 IntrinsicInst &II) {
1783 Value *UnpackArg = II.getArgOperand(0);
1784 auto *RetTy = cast<ScalableVectorType>(II.getType());
1785 bool IsSigned = II.getIntrinsicID() == Intrinsic::aarch64_sve_sunpkhi ||
1786 II.getIntrinsicID() == Intrinsic::aarch64_sve_sunpklo;
1787
1788 // Hi = uunpkhi(splat(X)) --> Hi = splat(extend(X))
1789 // Lo = uunpklo(splat(X)) --> Lo = splat(extend(X))
1790 if (auto *ScalarArg = getSplatValue(UnpackArg)) {
1791 ScalarArg =
1792 IC.Builder.CreateIntCast(ScalarArg, RetTy->getScalarType(), IsSigned);
1793 Value *NewVal =
1794 IC.Builder.CreateVectorSplat(RetTy->getElementCount(), ScalarArg);
1795 NewVal->takeName(&II);
1796 return IC.replaceInstUsesWith(II, NewVal);
1797 }
1798
1799 return std::nullopt;
1800}
1801static std::optional<Instruction *> instCombineSVETBL(InstCombiner &IC,
1802 IntrinsicInst &II) {
1803 auto *OpVal = II.getOperand(0);
1804 auto *OpIndices = II.getOperand(1);
1805 VectorType *VTy = cast<VectorType>(II.getType());
1806
1807 // Check whether OpIndices is a constant splat value < minimal element count
1808 // of result.
1809 auto *SplatValue = dyn_cast_or_null<ConstantInt>(getSplatValue(OpIndices));
1810 if (!SplatValue ||
1811 SplatValue->getValue().uge(VTy->getElementCount().getKnownMinValue()))
1812 return std::nullopt;
1813
1814 // Convert sve_tbl(OpVal sve_dup_x(SplatValue)) to
1815 // splat_vector(extractelement(OpVal, SplatValue)) for further optimization.
1816 auto *Extract = IC.Builder.CreateExtractElement(OpVal, SplatValue);
1817 auto *VectorSplat =
1818 IC.Builder.CreateVectorSplat(VTy->getElementCount(), Extract);
1819
1820 VectorSplat->takeName(&II);
1821 return IC.replaceInstUsesWith(II, VectorSplat);
1822}
1823
1824static std::optional<Instruction *> instCombineSVEUzp1(InstCombiner &IC,
1825 IntrinsicInst &II) {
1826 Value *A, *B;
1827 Type *RetTy = II.getType();
1828 constexpr Intrinsic::ID FromSVB = Intrinsic::aarch64_sve_convert_from_svbool;
1829 constexpr Intrinsic::ID ToSVB = Intrinsic::aarch64_sve_convert_to_svbool;
1830
1831 // uzp1(to_svbool(A), to_svbool(B)) --> <A, B>
1832 // uzp1(from_svbool(to_svbool(A)), from_svbool(to_svbool(B))) --> <A, B>
1833 if ((match(II.getArgOperand(0),
1834 m_Intrinsic<FromSVB>(m_Intrinsic<ToSVB>(m_Value(A)))) &&
1835 match(II.getArgOperand(1),
1836 m_Intrinsic<FromSVB>(m_Intrinsic<ToSVB>(m_Value(B))))) ||
1837 (match(II.getArgOperand(0), m_Intrinsic<ToSVB>(m_Value(A))) &&
1838 match(II.getArgOperand(1), m_Intrinsic<ToSVB>(m_Value(B))))) {
1839 auto *TyA = cast<ScalableVectorType>(A->getType());
1840 if (TyA == B->getType() &&
1842 auto *SubVec = IC.Builder.CreateInsertVector(
1844 auto *ConcatVec = IC.Builder.CreateInsertVector(
1845 RetTy, SubVec, B, IC.Builder.getInt64(TyA->getMinNumElements()));
1846 ConcatVec->takeName(&II);
1847 return IC.replaceInstUsesWith(II, ConcatVec);
1848 }
1849 }
1850
1851 return std::nullopt;
1852}
1853
1854static std::optional<Instruction *> instCombineSVEZip(InstCombiner &IC,
1855 IntrinsicInst &II) {
1856 // zip1(uzp1(A, B), uzp2(A, B)) --> A
1857 // zip2(uzp1(A, B), uzp2(A, B)) --> B
1858 Value *A, *B;
1859 if (match(II.getArgOperand(0),
1860 m_Intrinsic<Intrinsic::aarch64_sve_uzp1>(m_Value(A), m_Value(B))) &&
1861 match(II.getArgOperand(1), m_Intrinsic<Intrinsic::aarch64_sve_uzp2>(
1862 m_Specific(A), m_Specific(B))))
1863 return IC.replaceInstUsesWith(
1864 II, (II.getIntrinsicID() == Intrinsic::aarch64_sve_zip1 ? A : B));
1865
1866 return std::nullopt;
1867}
1868
1869static std::optional<Instruction *>
1871 Value *Mask = II.getOperand(0);
1872 Value *BasePtr = II.getOperand(1);
1873 Value *Index = II.getOperand(2);
1874 Type *Ty = II.getType();
1875 Value *PassThru = ConstantAggregateZero::get(Ty);
1876
1877 // Replace by zero constant when all lanes are inactive
1878 if (auto II_NA = instCombineSVENoActiveUnaryZero(IC, II))
1879 return II_NA;
1880
1881 // Contiguous gather => masked load.
1882 // (sve.ld1.gather.index Mask BasePtr (sve.index IndexBase 1))
1883 // => (masked.load (gep BasePtr IndexBase) Align Mask zeroinitializer)
1884 Value *IndexBase;
1885 if (match(Index, m_Intrinsic<Intrinsic::aarch64_sve_index>(
1886 m_Value(IndexBase), m_SpecificInt(1)))) {
1887 Align Alignment =
1888 BasePtr->getPointerAlignment(II.getDataLayout());
1889
1890 Type *VecPtrTy = PointerType::getUnqual(Ty);
1891 Value *Ptr = IC.Builder.CreateGEP(cast<VectorType>(Ty)->getElementType(),
1892 BasePtr, IndexBase);
1893 Ptr = IC.Builder.CreateBitCast(Ptr, VecPtrTy);
1894 CallInst *MaskedLoad =
1895 IC.Builder.CreateMaskedLoad(Ty, Ptr, Alignment, Mask, PassThru);
1896 MaskedLoad->takeName(&II);
1897 return IC.replaceInstUsesWith(II, MaskedLoad);
1898 }
1899
1900 return std::nullopt;
1901}
1902
1903static std::optional<Instruction *>
1905 Value *Val = II.getOperand(0);
1906 Value *Mask = II.getOperand(1);
1907 Value *BasePtr = II.getOperand(2);
1908 Value *Index = II.getOperand(3);
1909 Type *Ty = Val->getType();
1910
1911 // Contiguous scatter => masked store.
1912 // (sve.st1.scatter.index Value Mask BasePtr (sve.index IndexBase 1))
1913 // => (masked.store Value (gep BasePtr IndexBase) Align Mask)
1914 Value *IndexBase;
1915 if (match(Index, m_Intrinsic<Intrinsic::aarch64_sve_index>(
1916 m_Value(IndexBase), m_SpecificInt(1)))) {
1917 Align Alignment =
1918 BasePtr->getPointerAlignment(II.getDataLayout());
1919
1920 Value *Ptr = IC.Builder.CreateGEP(cast<VectorType>(Ty)->getElementType(),
1921 BasePtr, IndexBase);
1922 Type *VecPtrTy = PointerType::getUnqual(Ty);
1923 Ptr = IC.Builder.CreateBitCast(Ptr, VecPtrTy);
1924
1925 (void)IC.Builder.CreateMaskedStore(Val, Ptr, Alignment, Mask);
1926
1927 return IC.eraseInstFromFunction(II);
1928 }
1929
1930 return std::nullopt;
1931}
1932
1933static std::optional<Instruction *> instCombineSVESDIV(InstCombiner &IC,
1934 IntrinsicInst &II) {
1935 Type *Int32Ty = IC.Builder.getInt32Ty();
1936 Value *Pred = II.getOperand(0);
1937 Value *Vec = II.getOperand(1);
1938 Value *DivVec = II.getOperand(2);
1939
1940 Value *SplatValue = getSplatValue(DivVec);
1941 ConstantInt *SplatConstantInt = dyn_cast_or_null<ConstantInt>(SplatValue);
1942 if (!SplatConstantInt)
1943 return std::nullopt;
1944 APInt Divisor = SplatConstantInt->getValue();
1945
1946 if (Divisor.isPowerOf2()) {
1947 Constant *DivisorLog2 = ConstantInt::get(Int32Ty, Divisor.logBase2());
1948 auto ASRD = IC.Builder.CreateIntrinsic(
1949 Intrinsic::aarch64_sve_asrd, {II.getType()}, {Pred, Vec, DivisorLog2});
1950 return IC.replaceInstUsesWith(II, ASRD);
1951 }
1952 if (Divisor.isNegatedPowerOf2()) {
1953 Divisor.negate();
1954 Constant *DivisorLog2 = ConstantInt::get(Int32Ty, Divisor.logBase2());
1955 auto ASRD = IC.Builder.CreateIntrinsic(
1956 Intrinsic::aarch64_sve_asrd, {II.getType()}, {Pred, Vec, DivisorLog2});
1957 auto NEG = IC.Builder.CreateIntrinsic(
1958 Intrinsic::aarch64_sve_neg, {ASRD->getType()}, {ASRD, Pred, ASRD});
1959 return IC.replaceInstUsesWith(II, NEG);
1960 }
1961
1962 return std::nullopt;
1963}
1964
1965bool SimplifyValuePattern(SmallVector<Value *> &Vec, bool AllowPoison) {
1966 size_t VecSize = Vec.size();
1967 if (VecSize == 1)
1968 return true;
1969 if (!isPowerOf2_64(VecSize))
1970 return false;
1971 size_t HalfVecSize = VecSize / 2;
1972
1973 for (auto LHS = Vec.begin(), RHS = Vec.begin() + HalfVecSize;
1974 RHS != Vec.end(); LHS++, RHS++) {
1975 if (*LHS != nullptr && *RHS != nullptr) {
1976 if (*LHS == *RHS)
1977 continue;
1978 else
1979 return false;
1980 }
1981 if (!AllowPoison)
1982 return false;
1983 if (*LHS == nullptr && *RHS != nullptr)
1984 *LHS = *RHS;
1985 }
1986
1987 Vec.resize(HalfVecSize);
1988 SimplifyValuePattern(Vec, AllowPoison);
1989 return true;
1990}
1991
1992// Try to simplify dupqlane patterns like dupqlane(f32 A, f32 B, f32 A, f32 B)
1993// to dupqlane(f64(C)) where C is A concatenated with B
1994static std::optional<Instruction *> instCombineSVEDupqLane(InstCombiner &IC,
1995 IntrinsicInst &II) {
1996 Value *CurrentInsertElt = nullptr, *Default = nullptr;
1997 if (!match(II.getOperand(0),
1998 m_Intrinsic<Intrinsic::vector_insert>(
1999 m_Value(Default), m_Value(CurrentInsertElt), m_Value())) ||
2000 !isa<FixedVectorType>(CurrentInsertElt->getType()))
2001 return std::nullopt;
2002 auto IIScalableTy = cast<ScalableVectorType>(II.getType());
2003
2004 // Insert the scalars into a container ordered by InsertElement index
2005 SmallVector<Value *> Elts(IIScalableTy->getMinNumElements(), nullptr);
2006 while (auto InsertElt = dyn_cast<InsertElementInst>(CurrentInsertElt)) {
2007 auto Idx = cast<ConstantInt>(InsertElt->getOperand(2));
2008 Elts[Idx->getValue().getZExtValue()] = InsertElt->getOperand(1);
2009 CurrentInsertElt = InsertElt->getOperand(0);
2010 }
2011
2012 bool AllowPoison =
2013 isa<PoisonValue>(CurrentInsertElt) && isa<PoisonValue>(Default);
2014 if (!SimplifyValuePattern(Elts, AllowPoison))
2015 return std::nullopt;
2016
2017 // Rebuild the simplified chain of InsertElements. e.g. (a, b, a, b) as (a, b)
2018 Value *InsertEltChain = PoisonValue::get(CurrentInsertElt->getType());
2019 for (size_t I = 0; I < Elts.size(); I++) {
2020 if (Elts[I] == nullptr)
2021 continue;
2022 InsertEltChain = IC.Builder.CreateInsertElement(InsertEltChain, Elts[I],
2023 IC.Builder.getInt64(I));
2024 }
2025 if (InsertEltChain == nullptr)
2026 return std::nullopt;
2027
2028 // Splat the simplified sequence, e.g. (f16 a, f16 b, f16 c, f16 d) as one i64
2029 // value or (f16 a, f16 b) as one i32 value. This requires an InsertSubvector
2030 // be bitcast to a type wide enough to fit the sequence, be splatted, and then
2031 // be narrowed back to the original type.
2032 unsigned PatternWidth = IIScalableTy->getScalarSizeInBits() * Elts.size();
2033 unsigned PatternElementCount = IIScalableTy->getScalarSizeInBits() *
2034 IIScalableTy->getMinNumElements() /
2035 PatternWidth;
2036
2037 IntegerType *WideTy = IC.Builder.getIntNTy(PatternWidth);
2038 auto *WideScalableTy = ScalableVectorType::get(WideTy, PatternElementCount);
2039 auto *WideShuffleMaskTy =
2040 ScalableVectorType::get(IC.Builder.getInt32Ty(), PatternElementCount);
2041
2042 auto ZeroIdx = ConstantInt::get(IC.Builder.getInt64Ty(), APInt(64, 0));
2043 auto InsertSubvector = IC.Builder.CreateInsertVector(
2044 II.getType(), PoisonValue::get(II.getType()), InsertEltChain, ZeroIdx);
2045 auto WideBitcast =
2046 IC.Builder.CreateBitOrPointerCast(InsertSubvector, WideScalableTy);
2047 auto WideShuffleMask = ConstantAggregateZero::get(WideShuffleMaskTy);
2048 auto WideShuffle = IC.Builder.CreateShuffleVector(
2049 WideBitcast, PoisonValue::get(WideScalableTy), WideShuffleMask);
2050 auto NarrowBitcast =
2051 IC.Builder.CreateBitOrPointerCast(WideShuffle, II.getType());
2052
2053 return IC.replaceInstUsesWith(II, NarrowBitcast);
2054}
2055
2056static std::optional<Instruction *> instCombineMaxMinNM(InstCombiner &IC,
2057 IntrinsicInst &II) {
2058 Value *A = II.getArgOperand(0);
2059 Value *B = II.getArgOperand(1);
2060 if (A == B)
2061 return IC.replaceInstUsesWith(II, A);
2062
2063 return std::nullopt;
2064}
2065
2066static std::optional<Instruction *> instCombineSVESrshl(InstCombiner &IC,
2067 IntrinsicInst &II) {
2068 Value *Pred = II.getOperand(0);
2069 Value *Vec = II.getOperand(1);
2070 Value *Shift = II.getOperand(2);
2071
2072 // Convert SRSHL into the simpler LSL intrinsic when fed by an ABS intrinsic.
2073 Value *AbsPred, *MergedValue;
2074 if (!match(Vec, m_Intrinsic<Intrinsic::aarch64_sve_sqabs>(
2075 m_Value(MergedValue), m_Value(AbsPred), m_Value())) &&
2076 !match(Vec, m_Intrinsic<Intrinsic::aarch64_sve_abs>(
2077 m_Value(MergedValue), m_Value(AbsPred), m_Value())))
2078
2079 return std::nullopt;
2080
2081 // Transform is valid if any of the following are true:
2082 // * The ABS merge value is an undef or non-negative
2083 // * The ABS predicate is all active
2084 // * The ABS predicate and the SRSHL predicates are the same
2085 if (!isa<UndefValue>(MergedValue) && !match(MergedValue, m_NonNegative()) &&
2086 AbsPred != Pred && !isAllActivePredicate(AbsPred))
2087 return std::nullopt;
2088
2089 // Only valid when the shift amount is non-negative, otherwise the rounding
2090 // behaviour of SRSHL cannot be ignored.
2091 if (!match(Shift, m_NonNegative()))
2092 return std::nullopt;
2093
2094 auto LSL = IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_lsl,
2095 {II.getType()}, {Pred, Vec, Shift});
2096
2097 return IC.replaceInstUsesWith(II, LSL);
2098}
2099
2100std::optional<Instruction *>
2102 IntrinsicInst &II) const {
2103 Intrinsic::ID IID = II.getIntrinsicID();
2104 switch (IID) {
2105 default:
2106 break;
2107
2108 case Intrinsic::aarch64_sve_st1_scatter:
2109 case Intrinsic::aarch64_sve_st1_scatter_scalar_offset:
2110 case Intrinsic::aarch64_sve_st1_scatter_sxtw:
2111 case Intrinsic::aarch64_sve_st1_scatter_sxtw_index:
2112 case Intrinsic::aarch64_sve_st1_scatter_uxtw:
2113 case Intrinsic::aarch64_sve_st1_scatter_uxtw_index:
2114 case Intrinsic::aarch64_sve_st1dq:
2115 case Intrinsic::aarch64_sve_st1q_scatter_index:
2116 case Intrinsic::aarch64_sve_st1q_scatter_scalar_offset:
2117 case Intrinsic::aarch64_sve_st1q_scatter_vector_offset:
2118 case Intrinsic::aarch64_sve_st1wq:
2119 case Intrinsic::aarch64_sve_stnt1:
2120 case Intrinsic::aarch64_sve_stnt1_scatter:
2121 case Intrinsic::aarch64_sve_stnt1_scatter_index:
2122 case Intrinsic::aarch64_sve_stnt1_scatter_scalar_offset:
2123 case Intrinsic::aarch64_sve_stnt1_scatter_uxtw:
2124 return instCombineSVENoActiveUnaryErase(IC, II, 1);
2125 case Intrinsic::aarch64_sve_st2:
2126 case Intrinsic::aarch64_sve_st2q:
2127 return instCombineSVENoActiveUnaryErase(IC, II, 2);
2128 case Intrinsic::aarch64_sve_st3:
2129 case Intrinsic::aarch64_sve_st3q:
2130 return instCombineSVENoActiveUnaryErase(IC, II, 3);
2131 case Intrinsic::aarch64_sve_st4:
2132 case Intrinsic::aarch64_sve_st4q:
2133 return instCombineSVENoActiveUnaryErase(IC, II, 4);
2134 case Intrinsic::aarch64_sve_ld1_gather:
2135 case Intrinsic::aarch64_sve_ld1_gather_scalar_offset:
2136 case Intrinsic::aarch64_sve_ld1_gather_sxtw:
2137 case Intrinsic::aarch64_sve_ld1_gather_sxtw_index:
2138 case Intrinsic::aarch64_sve_ld1_gather_uxtw:
2139 case Intrinsic::aarch64_sve_ld1_gather_uxtw_index:
2140 case Intrinsic::aarch64_sve_ld1q_gather_index:
2141 case Intrinsic::aarch64_sve_ld1q_gather_scalar_offset:
2142 case Intrinsic::aarch64_sve_ld1q_gather_vector_offset:
2143 case Intrinsic::aarch64_sve_ld1ro:
2144 case Intrinsic::aarch64_sve_ld1rq:
2145 case Intrinsic::aarch64_sve_ld1udq:
2146 case Intrinsic::aarch64_sve_ld1uwq:
2147 case Intrinsic::aarch64_sve_ld2_sret:
2148 case Intrinsic::aarch64_sve_ld2q_sret:
2149 case Intrinsic::aarch64_sve_ld3_sret:
2150 case Intrinsic::aarch64_sve_ld3q_sret:
2151 case Intrinsic::aarch64_sve_ld4_sret:
2152 case Intrinsic::aarch64_sve_ld4q_sret:
2153 case Intrinsic::aarch64_sve_ldff1:
2154 case Intrinsic::aarch64_sve_ldff1_gather:
2155 case Intrinsic::aarch64_sve_ldff1_gather_index:
2156 case Intrinsic::aarch64_sve_ldff1_gather_scalar_offset:
2157 case Intrinsic::aarch64_sve_ldff1_gather_sxtw:
2158 case Intrinsic::aarch64_sve_ldff1_gather_sxtw_index:
2159 case Intrinsic::aarch64_sve_ldff1_gather_uxtw:
2160 case Intrinsic::aarch64_sve_ldff1_gather_uxtw_index:
2161 case Intrinsic::aarch64_sve_ldnf1:
2162 case Intrinsic::aarch64_sve_ldnt1:
2163 case Intrinsic::aarch64_sve_ldnt1_gather:
2164 case Intrinsic::aarch64_sve_ldnt1_gather_index:
2165 case Intrinsic::aarch64_sve_ldnt1_gather_scalar_offset:
2166 case Intrinsic::aarch64_sve_ldnt1_gather_uxtw:
2168 case Intrinsic::aarch64_neon_fmaxnm:
2169 case Intrinsic::aarch64_neon_fminnm:
2170 return instCombineMaxMinNM(IC, II);
2171 case Intrinsic::aarch64_sve_convert_from_svbool:
2172 return instCombineConvertFromSVBool(IC, II);
2173 case Intrinsic::aarch64_sve_dup:
2174 return instCombineSVEDup(IC, II);
2175 case Intrinsic::aarch64_sve_dup_x:
2176 return instCombineSVEDupX(IC, II);
2177 case Intrinsic::aarch64_sve_cmpne:
2178 case Intrinsic::aarch64_sve_cmpne_wide:
2179 return instCombineSVECmpNE(IC, II);
2180 case Intrinsic::aarch64_sve_rdffr:
2181 return instCombineRDFFR(IC, II);
2182 case Intrinsic::aarch64_sve_lasta:
2183 case Intrinsic::aarch64_sve_lastb:
2184 return instCombineSVELast(IC, II);
2185 case Intrinsic::aarch64_sve_clasta_n:
2186 case Intrinsic::aarch64_sve_clastb_n:
2187 return instCombineSVECondLast(IC, II);
2188 case Intrinsic::aarch64_sve_cntd:
2189 return instCombineSVECntElts(IC, II, 2);
2190 case Intrinsic::aarch64_sve_cntw:
2191 return instCombineSVECntElts(IC, II, 4);
2192 case Intrinsic::aarch64_sve_cnth:
2193 return instCombineSVECntElts(IC, II, 8);
2194 case Intrinsic::aarch64_sve_cntb:
2195 return instCombineSVECntElts(IC, II, 16);
2196 case Intrinsic::aarch64_sve_ptest_any:
2197 case Intrinsic::aarch64_sve_ptest_first:
2198 case Intrinsic::aarch64_sve_ptest_last:
2199 return instCombineSVEPTest(IC, II);
2200 case Intrinsic::aarch64_sve_fabd:
2201 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fabd_u);
2202 case Intrinsic::aarch64_sve_fadd:
2203 return instCombineSVEVectorFAdd(IC, II);
2204 case Intrinsic::aarch64_sve_fadd_u:
2205 return instCombineSVEVectorFAddU(IC, II);
2206 case Intrinsic::aarch64_sve_fdiv:
2207 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fdiv_u);
2208 case Intrinsic::aarch64_sve_fmax:
2209 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fmax_u);
2210 case Intrinsic::aarch64_sve_fmaxnm:
2211 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fmaxnm_u);
2212 case Intrinsic::aarch64_sve_fmin:
2213 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fmin_u);
2214 case Intrinsic::aarch64_sve_fminnm:
2215 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fminnm_u);
2216 case Intrinsic::aarch64_sve_fmla:
2217 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fmla_u);
2218 case Intrinsic::aarch64_sve_fmls:
2219 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fmls_u);
2220 case Intrinsic::aarch64_sve_fmul:
2221 if (auto II_U =
2222 instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fmul_u))
2223 return II_U;
2224 return instCombineSVEVectorMul(IC, II, Intrinsic::aarch64_sve_fmul_u);
2225 case Intrinsic::aarch64_sve_fmul_u:
2226 return instCombineSVEVectorMul(IC, II, Intrinsic::aarch64_sve_fmul_u);
2227 case Intrinsic::aarch64_sve_fmulx:
2228 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fmulx_u);
2229 case Intrinsic::aarch64_sve_fnmla:
2230 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fnmla_u);
2231 case Intrinsic::aarch64_sve_fnmls:
2232 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fnmls_u);
2233 case Intrinsic::aarch64_sve_fsub:
2234 return instCombineSVEVectorFSub(IC, II);
2235 case Intrinsic::aarch64_sve_fsub_u:
2236 return instCombineSVEVectorFSubU(IC, II);
2237 case Intrinsic::aarch64_sve_add:
2238 return instCombineSVEVectorAdd(IC, II);
2239 case Intrinsic::aarch64_sve_add_u:
2240 return instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul_u,
2241 Intrinsic::aarch64_sve_mla_u>(
2242 IC, II, true);
2243 case Intrinsic::aarch64_sve_mla:
2244 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_mla_u);
2245 case Intrinsic::aarch64_sve_mls:
2246 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_mls_u);
2247 case Intrinsic::aarch64_sve_mul:
2248 if (auto II_U =
2249 instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_mul_u))
2250 return II_U;
2251 return instCombineSVEVectorMul(IC, II, Intrinsic::aarch64_sve_mul_u);
2252 case Intrinsic::aarch64_sve_mul_u:
2253 return instCombineSVEVectorMul(IC, II, Intrinsic::aarch64_sve_mul_u);
2254 case Intrinsic::aarch64_sve_sabd:
2255 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_sabd_u);
2256 case Intrinsic::aarch64_sve_smax:
2257 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_smax_u);
2258 case Intrinsic::aarch64_sve_smin:
2259 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_smin_u);
2260 case Intrinsic::aarch64_sve_smulh:
2261 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_smulh_u);
2262 case Intrinsic::aarch64_sve_sub:
2263 return instCombineSVEVectorSub(IC, II);
2264 case Intrinsic::aarch64_sve_sub_u:
2265 return instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul_u,
2266 Intrinsic::aarch64_sve_mls_u>(
2267 IC, II, true);
2268 case Intrinsic::aarch64_sve_uabd:
2269 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_uabd_u);
2270 case Intrinsic::aarch64_sve_umax:
2271 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_umax_u);
2272 case Intrinsic::aarch64_sve_umin:
2273 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_umin_u);
2274 case Intrinsic::aarch64_sve_umulh:
2275 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_umulh_u);
2276 case Intrinsic::aarch64_sve_asr:
2277 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_asr_u);
2278 case Intrinsic::aarch64_sve_lsl:
2279 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_lsl_u);
2280 case Intrinsic::aarch64_sve_lsr:
2281 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_lsr_u);
2282 case Intrinsic::aarch64_sve_and:
2283 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_and_u);
2284 case Intrinsic::aarch64_sve_bic:
2285 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_bic_u);
2286 case Intrinsic::aarch64_sve_eor:
2287 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_eor_u);
2288 case Intrinsic::aarch64_sve_orr:
2289 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_orr_u);
2290 case Intrinsic::aarch64_sve_sqsub:
2291 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_sqsub_u);
2292 case Intrinsic::aarch64_sve_uqsub:
2293 return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_uqsub_u);
2294 case Intrinsic::aarch64_sve_tbl:
2295 return instCombineSVETBL(IC, II);
2296 case Intrinsic::aarch64_sve_uunpkhi:
2297 case Intrinsic::aarch64_sve_uunpklo:
2298 case Intrinsic::aarch64_sve_sunpkhi:
2299 case Intrinsic::aarch64_sve_sunpklo:
2300 return instCombineSVEUnpack(IC, II);
2301 case Intrinsic::aarch64_sve_uzp1:
2302 return instCombineSVEUzp1(IC, II);
2303 case Intrinsic::aarch64_sve_zip1:
2304 case Intrinsic::aarch64_sve_zip2:
2305 return instCombineSVEZip(IC, II);
2306 case Intrinsic::aarch64_sve_ld1_gather_index:
2307 return instCombineLD1GatherIndex(IC, II);
2308 case Intrinsic::aarch64_sve_st1_scatter_index:
2309 return instCombineST1ScatterIndex(IC, II);
2310 case Intrinsic::aarch64_sve_ld1:
2311 return instCombineSVELD1(IC, II, DL);
2312 case Intrinsic::aarch64_sve_st1:
2313 return instCombineSVEST1(IC, II, DL);
2314 case Intrinsic::aarch64_sve_sdiv:
2315 return instCombineSVESDIV(IC, II);
2316 case Intrinsic::aarch64_sve_sel:
2317 return instCombineSVESel(IC, II);
2318 case Intrinsic::aarch64_sve_srshl:
2319 return instCombineSVESrshl(IC, II);
2320 case Intrinsic::aarch64_sve_dupq_lane:
2321 return instCombineSVEDupqLane(IC, II);
2322 }
2323
2324 return std::nullopt;
2325}
2326
2328 InstCombiner &IC, IntrinsicInst &II, APInt OrigDemandedElts,
2329 APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3,
2330 std::function<void(Instruction *, unsigned, APInt, APInt &)>
2331 SimplifyAndSetOp) const {
2332 switch (II.getIntrinsicID()) {
2333 default:
2334 break;
2335 case Intrinsic::aarch64_neon_fcvtxn:
2336 case Intrinsic::aarch64_neon_rshrn:
2337 case Intrinsic::aarch64_neon_sqrshrn:
2338 case Intrinsic::aarch64_neon_sqrshrun:
2339 case Intrinsic::aarch64_neon_sqshrn:
2340 case Intrinsic::aarch64_neon_sqshrun:
2341 case Intrinsic::aarch64_neon_sqxtn:
2342 case Intrinsic::aarch64_neon_sqxtun:
2343 case Intrinsic::aarch64_neon_uqrshrn:
2344 case Intrinsic::aarch64_neon_uqshrn:
2345 case Intrinsic::aarch64_neon_uqxtn:
2346 SimplifyAndSetOp(&II, 0, OrigDemandedElts, UndefElts);
2347 break;
2348 }
2349
2350 return std::nullopt;
2351}
2352
2354 return ST->isSVEAvailable() || (ST->isSVEorStreamingSVEAvailable() &&
2356}
2357
2360 switch (K) {
2362 return TypeSize::getFixed(64);
2364 if (ST->useSVEForFixedLengthVectors() &&
2366 return TypeSize::getFixed(
2367 std::max(ST->getMinSVEVectorSizeInBits(), 128u));
2368 else if (ST->isNeonAvailable())
2369 return TypeSize::getFixed(128);
2370 else
2371 return TypeSize::getFixed(0);
2373 if (ST->isSVEAvailable() || (ST->isSVEorStreamingSVEAvailable() &&
2375 return TypeSize::getScalable(128);
2376 else
2377 return TypeSize::getScalable(0);
2378 }
2379 llvm_unreachable("Unsupported register kind");
2380}
2381
2382bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode,
2384 Type *SrcOverrideTy) {
2385 // A helper that returns a vector type from the given type. The number of
2386 // elements in type Ty determines the vector width.
2387 auto toVectorTy = [&](Type *ArgTy) {
2388 return VectorType::get(ArgTy->getScalarType(),
2389 cast<VectorType>(DstTy)->getElementCount());
2390 };
2391
2392 // Exit early if DstTy is not a vector type whose elements are one of [i16,
2393 // i32, i64]. SVE doesn't generally have the same set of instructions to
2394 // perform an extend with the add/sub/mul. There are SMULLB style
2395 // instructions, but they operate on top/bottom, requiring some sort of lane
2396 // interleaving to be used with zext/sext.
2397 unsigned DstEltSize = DstTy->getScalarSizeInBits();
2398 if (!useNeonVector(DstTy) || Args.size() != 2 ||
2399 (DstEltSize != 16 && DstEltSize != 32 && DstEltSize != 64))
2400 return false;
2401
2402 // Determine if the operation has a widening variant. We consider both the
2403 // "long" (e.g., usubl) and "wide" (e.g., usubw) versions of the
2404 // instructions.
2405 //
2406 // TODO: Add additional widening operations (e.g., shl, etc.) once we
2407 // verify that their extending operands are eliminated during code
2408 // generation.
2409 Type *SrcTy = SrcOverrideTy;
2410 switch (Opcode) {
2411 case Instruction::Add: // UADDL(2), SADDL(2), UADDW(2), SADDW(2).
2412 case Instruction::Sub: // USUBL(2), SSUBL(2), USUBW(2), SSUBW(2).
2413 // The second operand needs to be an extend
2414 if (isa<SExtInst>(Args[1]) || isa<ZExtInst>(Args[1])) {
2415 if (!SrcTy)
2416 SrcTy =
2417 toVectorTy(cast<Instruction>(Args[1])->getOperand(0)->getType());
2418 } else
2419 return false;
2420 break;
2421 case Instruction::Mul: { // SMULL(2), UMULL(2)
2422 // Both operands need to be extends of the same type.
2423 if ((isa<SExtInst>(Args[0]) && isa<SExtInst>(Args[1])) ||
2424 (isa<ZExtInst>(Args[0]) && isa<ZExtInst>(Args[1]))) {
2425 if (!SrcTy)
2426 SrcTy =
2427 toVectorTy(cast<Instruction>(Args[0])->getOperand(0)->getType());
2428 } else if (isa<ZExtInst>(Args[0]) || isa<ZExtInst>(Args[1])) {
2429 // If one of the operands is a Zext and the other has enough zero bits to
2430 // be treated as unsigned, we can still general a umull, meaning the zext
2431 // is free.
2432 KnownBits Known =
2433 computeKnownBits(isa<ZExtInst>(Args[0]) ? Args[1] : Args[0], DL);
2434 if (Args[0]->getType()->getScalarSizeInBits() -
2435 Known.Zero.countLeadingOnes() >
2436 DstTy->getScalarSizeInBits() / 2)
2437 return false;
2438 if (!SrcTy)
2439 SrcTy = toVectorTy(Type::getIntNTy(DstTy->getContext(),
2440 DstTy->getScalarSizeInBits() / 2));
2441 } else
2442 return false;
2443 break;
2444 }
2445 default:
2446 return false;
2447 }
2448
2449 // Legalize the destination type and ensure it can be used in a widening
2450 // operation.
2451 auto DstTyL = getTypeLegalizationCost(DstTy);
2452 if (!DstTyL.second.isVector() || DstEltSize != DstTy->getScalarSizeInBits())
2453 return false;
2454
2455 // Legalize the source type and ensure it can be used in a widening
2456 // operation.
2457 assert(SrcTy && "Expected some SrcTy");
2458 auto SrcTyL = getTypeLegalizationCost(SrcTy);
2459 unsigned SrcElTySize = SrcTyL.second.getScalarSizeInBits();
2460 if (!SrcTyL.second.isVector() || SrcElTySize != SrcTy->getScalarSizeInBits())
2461 return false;
2462
2463 // Get the total number of vector elements in the legalized types.
2464 InstructionCost NumDstEls =
2465 DstTyL.first * DstTyL.second.getVectorMinNumElements();
2466 InstructionCost NumSrcEls =
2467 SrcTyL.first * SrcTyL.second.getVectorMinNumElements();
2468
2469 // Return true if the legalized types have the same number of vector elements
2470 // and the destination element type size is twice that of the source type.
2471 return NumDstEls == NumSrcEls && 2 * SrcElTySize == DstEltSize;
2472}
2473
2474// s/urhadd instructions implement the following pattern, making the
2475// extends free:
2476// %x = add ((zext i8 -> i16), 1)
2477// %y = (zext i8 -> i16)
2478// trunc i16 (lshr (add %x, %y), 1) -> i8
2479//
2481 Type *Src) {
2482 // The source should be a legal vector type.
2483 if (!Src->isVectorTy() || !TLI->isTypeLegal(TLI->getValueType(DL, Src)) ||
2484 (Src->isScalableTy() && !ST->hasSVE2()))
2485 return false;
2486
2487 if (ExtUser->getOpcode() != Instruction::Add || !ExtUser->hasOneUse())
2488 return false;
2489
2490 // Look for trunc/shl/add before trying to match the pattern.
2491 const Instruction *Add = ExtUser;
2492 auto *AddUser =
2493 dyn_cast_or_null<Instruction>(Add->getUniqueUndroppableUser());
2494 if (AddUser && AddUser->getOpcode() == Instruction::Add)
2495 Add = AddUser;
2496
2497 auto *Shr = dyn_cast_or_null<Instruction>(Add->getUniqueUndroppableUser());
2498 if (!Shr || Shr->getOpcode() != Instruction::LShr)
2499 return false;
2500
2501 auto *Trunc = dyn_cast_or_null<Instruction>(Shr->getUniqueUndroppableUser());
2502 if (!Trunc || Trunc->getOpcode() != Instruction::Trunc ||
2503 Src->getScalarSizeInBits() !=
2504 cast<CastInst>(Trunc)->getDestTy()->getScalarSizeInBits())
2505 return false;
2506
2507 // Try to match the whole pattern. Ext could be either the first or second
2508 // m_ZExtOrSExt matched.
2509 Instruction *Ex1, *Ex2;
2510 if (!(match(Add, m_c_Add(m_Instruction(Ex1),
2511 m_c_Add(m_Instruction(Ex2), m_SpecificInt(1))))))
2512 return false;
2513
2514 // Ensure both extends are of the same type
2515 if (match(Ex1, m_ZExtOrSExt(m_Value())) &&
2516 Ex1->getOpcode() == Ex2->getOpcode())
2517 return true;
2518
2519 return false;
2520}
2521
2523 Type *Src,
2526 const Instruction *I) {
2527 int ISD = TLI->InstructionOpcodeToISD(Opcode);
2528 assert(ISD && "Invalid opcode");
2529 // If the cast is observable, and it is used by a widening instruction (e.g.,
2530 // uaddl, saddw, etc.), it may be free.
2531 if (I && I->hasOneUser()) {
2532 auto *SingleUser = cast<Instruction>(*I->user_begin());
2533 SmallVector<const Value *, 4> Operands(SingleUser->operand_values());
2534 if (isWideningInstruction(Dst, SingleUser->getOpcode(), Operands, Src)) {
2535 // For adds only count the second operand as free if both operands are
2536 // extends but not the same operation. (i.e both operands are not free in
2537 // add(sext, zext)).
2538 if (SingleUser->getOpcode() == Instruction::Add) {
2539 if (I == SingleUser->getOperand(1) ||
2540 (isa<CastInst>(SingleUser->getOperand(1)) &&
2541 cast<CastInst>(SingleUser->getOperand(1))->getOpcode() == Opcode))
2542 return 0;
2543 } else // Others are free so long as isWideningInstruction returned true.
2544 return 0;
2545 }
2546
2547 // The cast will be free for the s/urhadd instructions
2548 if ((isa<ZExtInst>(I) || isa<SExtInst>(I)) &&
2549 isExtPartOfAvgExpr(SingleUser, Dst, Src))
2550 return 0;
2551 }
2552
2553 // TODO: Allow non-throughput costs that aren't binary.
2554 auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
2556 return Cost == 0 ? 0 : 1;
2557 return Cost;
2558 };
2559
2560 EVT SrcTy = TLI->getValueType(DL, Src);
2561 EVT DstTy = TLI->getValueType(DL, Dst);
2562
2563 if (!SrcTy.isSimple() || !DstTy.isSimple())
2564 return AdjustCost(
2565 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
2566
2567 static const TypeConversionCostTblEntry
2568 ConversionTbl[] = {
2569 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 1}, // xtn
2570 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i64, 1}, // xtn
2571 { ISD::TRUNCATE, MVT::v2i32, MVT::v2i64, 1}, // xtn
2572 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1}, // xtn
2573 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 3}, // 2 xtn + 1 uzp1
2574 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1}, // xtn
2575 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2}, // 1 uzp1 + 1 xtn
2576 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1}, // 1 uzp1
2577 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1}, // 1 xtn
2578 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2}, // 1 uzp1 + 1 xtn
2579 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 4}, // 3 x uzp1 + xtn
2580 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 1}, // 1 uzp1
2581 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 3}, // 3 x uzp1
2582 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 2}, // 2 x uzp1
2583 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 1}, // uzp1
2584 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 3}, // (2 + 1) x uzp1
2585 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 7}, // (4 + 2 + 1) x uzp1
2586 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 2}, // 2 x uzp1
2587 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i64, 6}, // (4 + 2) x uzp1
2588 { ISD::TRUNCATE, MVT::v16i32, MVT::v16i64, 4}, // 4 x uzp1
2589
2590 // Truncations on nxvmiN
2591 { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i16, 1 },
2592 { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i32, 1 },
2593 { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i64, 1 },
2594 { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i16, 1 },
2595 { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i32, 1 },
2596 { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i64, 2 },
2597 { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i16, 1 },
2598 { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i32, 3 },
2599 { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i64, 5 },
2600 { ISD::TRUNCATE, MVT::nxv16i1, MVT::nxv16i8, 1 },
2601 { ISD::TRUNCATE, MVT::nxv2i16, MVT::nxv2i32, 1 },
2602 { ISD::TRUNCATE, MVT::nxv2i32, MVT::nxv2i64, 1 },
2603 { ISD::TRUNCATE, MVT::nxv4i16, MVT::nxv4i32, 1 },
2604 { ISD::TRUNCATE, MVT::nxv4i32, MVT::nxv4i64, 2 },
2605 { ISD::TRUNCATE, MVT::nxv8i16, MVT::nxv8i32, 3 },
2606 { ISD::TRUNCATE, MVT::nxv8i32, MVT::nxv8i64, 6 },
2607
2608 // The number of shll instructions for the extension.
2609 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
2610 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
2611 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
2612 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
2613 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
2614 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
2615 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
2616 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
2617 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
2618 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
2619 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
2620 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
2621 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
2622 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
2623 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
2624 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
2625
2626 // LowerVectorINT_TO_FP:
2627 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
2628 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
2629 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
2630 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
2631 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
2632 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
2633
2634 // Complex: to v2f32
2635 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
2636 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
2637 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
2638 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
2639 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
2640 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
2641
2642 // Complex: to v4f32
2643 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 4 },
2644 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
2645 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
2646 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
2647
2648 // Complex: to v8f32
2649 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 },
2650 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
2651 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 },
2652 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
2653
2654 // Complex: to v16f32
2655 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
2656 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
2657
2658 // Complex: to v2f64
2659 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
2660 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
2661 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
2662 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
2663 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
2664 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
2665
2666 // Complex: to v4f64
2667 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 4 },
2668 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 4 },
2669
2670 // LowerVectorFP_TO_INT
2671 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
2672 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
2673 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
2674 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
2675 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
2676 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
2677
2678 // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
2679 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
2680 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
2681 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 1 },
2682 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
2683 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
2684 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 1 },
2685
2686 // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
2687 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
2688 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 2 },
2689 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
2690 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 2 },
2691
2692 // Complex, from nxv2f32.
2693 { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f32, 1 },
2694 { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f32, 1 },
2695 { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f32, 1 },
2696 { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f32, 1 },
2697 { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f32, 1 },
2698 { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f32, 1 },
2699 { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f32, 1 },
2700 { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f32, 1 },
2701
2702 // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
2703 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
2704 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
2705 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 2 },
2706 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
2707 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
2708 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 2 },
2709
2710 // Complex, from nxv2f64.
2711 { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f64, 1 },
2712 { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f64, 1 },
2713 { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f64, 1 },
2714 { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f64, 1 },
2715 { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f64, 1 },
2716 { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f64, 1 },
2717 { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f64, 1 },
2718 { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f64, 1 },
2719
2720 // Complex, from nxv4f32.
2721 { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f32, 4 },
2722 { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f32, 1 },
2723 { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f32, 1 },
2724 { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f32, 1 },
2725 { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f32, 4 },
2726 { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f32, 1 },
2727 { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f32, 1 },
2728 { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f32, 1 },
2729
2730 // Complex, from nxv8f64. Illegal -> illegal conversions not required.
2731 { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f64, 7 },
2732 { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f64, 7 },
2733 { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f64, 7 },
2734 { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f64, 7 },
2735
2736 // Complex, from nxv4f64. Illegal -> illegal conversions not required.
2737 { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f64, 3 },
2738 { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f64, 3 },
2739 { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f64, 3 },
2740 { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f64, 3 },
2741 { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f64, 3 },
2742 { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f64, 3 },
2743
2744 // Complex, from nxv8f32. Illegal -> illegal conversions not required.
2745 { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f32, 3 },
2746 { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f32, 3 },
2747 { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f32, 3 },
2748 { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f32, 3 },
2749
2750 // Complex, from nxv8f16.
2751 { ISD::FP_TO_SINT, MVT::nxv8i64, MVT::nxv8f16, 10 },
2752 { ISD::FP_TO_SINT, MVT::nxv8i32, MVT::nxv8f16, 4 },
2753 { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f16, 1 },
2754 { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f16, 1 },
2755 { ISD::FP_TO_UINT, MVT::nxv8i64, MVT::nxv8f16, 10 },
2756 { ISD::FP_TO_UINT, MVT::nxv8i32, MVT::nxv8f16, 4 },
2757 { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f16, 1 },
2758 { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f16, 1 },
2759
2760 // Complex, from nxv4f16.
2761 { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f16, 4 },
2762 { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f16, 1 },
2763 { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f16, 1 },
2764 { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f16, 1 },
2765 { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f16, 4 },
2766 { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f16, 1 },
2767 { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f16, 1 },
2768 { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f16, 1 },
2769
2770 // Complex, from nxv2f16.
2771 { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f16, 1 },
2772 { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f16, 1 },
2773 { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f16, 1 },
2774 { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f16, 1 },
2775 { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f16, 1 },
2776 { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f16, 1 },
2777 { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f16, 1 },
2778 { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f16, 1 },
2779
2780 // Truncate from nxvmf32 to nxvmf16.
2781 { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f32, 1 },
2782 { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f32, 1 },
2783 { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f32, 3 },
2784
2785 // Truncate from nxvmf64 to nxvmf16.
2786 { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f64, 1 },
2787 { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f64, 3 },
2788 { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f64, 7 },
2789
2790 // Truncate from nxvmf64 to nxvmf32.
2791 { ISD::FP_ROUND, MVT::nxv2f32, MVT::nxv2f64, 1 },
2792 { ISD::FP_ROUND, MVT::nxv4f32, MVT::nxv4f64, 3 },
2793 { ISD::FP_ROUND, MVT::nxv8f32, MVT::nxv8f64, 6 },
2794
2795 // Extend from nxvmf16 to nxvmf32.
2796 { ISD::FP_EXTEND, MVT::nxv2f32, MVT::nxv2f16, 1},
2797 { ISD::FP_EXTEND, MVT::nxv4f32, MVT::nxv4f16, 1},
2798 { ISD::FP_EXTEND, MVT::nxv8f32, MVT::nxv8f16, 2},
2799
2800 // Extend from nxvmf16 to nxvmf64.
2801 { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f16, 1},
2802 { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f16, 2},
2803 { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f16, 4},
2804
2805 // Extend from nxvmf32 to nxvmf64.
2806 { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f32, 1},
2807 { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f32, 2},
2808 { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f32, 6},
2809
2810 // Bitcasts from float to integer
2811 { ISD::BITCAST, MVT::nxv2f16, MVT::nxv2i16, 0 },
2812 { ISD::BITCAST, MVT::nxv4f16, MVT::nxv4i16, 0 },
2813 { ISD::BITCAST, MVT::nxv2f32, MVT::nxv2i32, 0 },
2814
2815 // Bitcasts from integer to float
2816 { ISD::BITCAST, MVT::nxv2i16, MVT::nxv2f16, 0 },
2817 { ISD::BITCAST, MVT::nxv4i16, MVT::nxv4f16, 0 },
2818 { ISD::BITCAST, MVT::nxv2i32, MVT::nxv2f32, 0 },
2819
2820 // Add cost for extending to illegal -too wide- scalable vectors.
2821 // zero/sign extend are implemented by multiple unpack operations,
2822 // where each operation has a cost of 1.
2823 { ISD::ZERO_EXTEND, MVT::nxv16i16, MVT::nxv16i8, 2},
2824 { ISD::ZERO_EXTEND, MVT::nxv16i32, MVT::nxv16i8, 6},
2825 { ISD::ZERO_EXTEND, MVT::nxv16i64, MVT::nxv16i8, 14},
2826 { ISD::ZERO_EXTEND, MVT::nxv8i32, MVT::nxv8i16, 2},
2827 { ISD::ZERO_EXTEND, MVT::nxv8i64, MVT::nxv8i16, 6},
2828 { ISD::ZERO_EXTEND, MVT::nxv4i64, MVT::nxv4i32, 2},
2829
2830 { ISD::SIGN_EXTEND, MVT::nxv16i16, MVT::nxv16i8, 2},
2831 { ISD::SIGN_EXTEND, MVT::nxv16i32, MVT::nxv16i8, 6},
2832 { ISD::SIGN_EXTEND, MVT::nxv16i64, MVT::nxv16i8, 14},
2833 { ISD::SIGN_EXTEND, MVT::nxv8i32, MVT::nxv8i16, 2},
2834 { ISD::SIGN_EXTEND, MVT::nxv8i64, MVT::nxv8i16, 6},
2835 { ISD::SIGN_EXTEND, MVT::nxv4i64, MVT::nxv4i32, 2},
2836 };
2837
2838 // We have to estimate a cost of fixed length operation upon
2839 // SVE registers(operations) with the number of registers required
2840 // for a fixed type to be represented upon SVE registers.
2841 EVT WiderTy = SrcTy.bitsGT(DstTy) ? SrcTy : DstTy;
2842 if (SrcTy.isFixedLengthVector() && DstTy.isFixedLengthVector() &&
2843 SrcTy.getVectorNumElements() == DstTy.getVectorNumElements() &&
2844 ST->useSVEForFixedLengthVectors(WiderTy)) {
2845 std::pair<InstructionCost, MVT> LT =
2846 getTypeLegalizationCost(WiderTy.getTypeForEVT(Dst->getContext()));
2847 unsigned NumElements = AArch64::SVEBitsPerBlock /
2848 LT.second.getScalarSizeInBits();
2849 return AdjustCost(
2850 LT.first *
2852 Opcode, ScalableVectorType::get(Dst->getScalarType(), NumElements),
2853 ScalableVectorType::get(Src->getScalarType(), NumElements), CCH,
2854 CostKind, I));
2855 }
2856
2857 if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD,
2858 DstTy.getSimpleVT(),
2859 SrcTy.getSimpleVT()))
2860 return AdjustCost(Entry->Cost);
2861
2862 static const TypeConversionCostTblEntry FP16Tbl[] = {
2863 {ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f16, 1}, // fcvtzs
2864 {ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f16, 1},
2865 {ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f16, 1}, // fcvtzs
2866 {ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f16, 1},
2867 {ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f16, 2}, // fcvtl+fcvtzs
2868 {ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f16, 2},
2869 {ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f16, 2}, // fcvtzs+xtn
2870 {ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f16, 2},
2871 {ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f16, 1}, // fcvtzs
2872 {ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f16, 1},
2873 {ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f16, 4}, // 2*fcvtl+2*fcvtzs
2874 {ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f16, 4},
2875 {ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f16, 3}, // 2*fcvtzs+xtn
2876 {ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f16, 3},
2877 {ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f16, 2}, // 2*fcvtzs
2878 {ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f16, 2},
2879 {ISD::FP_TO_SINT, MVT::v16i32, MVT::v16f16, 8}, // 4*fcvtl+4*fcvtzs
2880 {ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f16, 8},
2881 {ISD::UINT_TO_FP, MVT::v8f16, MVT::v8i8, 2}, // ushll + ucvtf
2882 {ISD::SINT_TO_FP, MVT::v8f16, MVT::v8i8, 2}, // sshll + scvtf
2883 {ISD::UINT_TO_FP, MVT::v16f16, MVT::v16i8, 4}, // 2 * ushl(2) + 2 * ucvtf
2884 {ISD::SINT_TO_FP, MVT::v16f16, MVT::v16i8, 4}, // 2 * sshl(2) + 2 * scvtf
2885 };
2886
2887 if (ST->hasFullFP16())
2888 if (const auto *Entry = ConvertCostTableLookup(
2889 FP16Tbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
2890 return AdjustCost(Entry->Cost);
2891
2892 if ((ISD == ISD::ZERO_EXTEND || ISD == ISD::SIGN_EXTEND) &&
2895 TLI->getTypeAction(Src->getContext(), SrcTy) ==
2897 TLI->getTypeAction(Dst->getContext(), DstTy) ==
2899 // The standard behaviour in the backend for these cases is to split the
2900 // extend up into two parts:
2901 // 1. Perform an extending load or masked load up to the legal type.
2902 // 2. Extend the loaded data to the final type.
2903 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Src);
2904 Type *LegalTy = EVT(SrcLT.second).getTypeForEVT(Src->getContext());
2906 Opcode, LegalTy, Src, CCH, CostKind, I);
2908 Opcode, Dst, LegalTy, TTI::CastContextHint::None, CostKind, I);
2909 return Part1 + Part2;
2910 }
2911
2912 // The BasicTTIImpl version only deals with CCH==TTI::CastContextHint::Normal,
2913 // but we also want to include the TTI::CastContextHint::Masked case too.
2914 if ((ISD == ISD::ZERO_EXTEND || ISD == ISD::SIGN_EXTEND) &&
2916 ST->isSVEorStreamingSVEAvailable() && TLI->isTypeLegal(DstTy))
2918
2919 return AdjustCost(
2920 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
2921}
2922
2924 Type *Dst,
2925 VectorType *VecTy,
2926 unsigned Index) {
2927
2928 // Make sure we were given a valid extend opcode.
2929 assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) &&
2930 "Invalid opcode");
2931
2932 // We are extending an element we extract from a vector, so the source type
2933 // of the extend is the element type of the vector.
2934 auto *Src = VecTy->getElementType();
2935
2936 // Sign- and zero-extends are for integer types only.
2937 assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type");
2938
2939 // Get the cost for the extract. We compute the cost (if any) for the extend
2940 // below.
2942 InstructionCost Cost = getVectorInstrCost(Instruction::ExtractElement, VecTy,
2943 CostKind, Index, nullptr, nullptr);
2944
2945 // Legalize the types.
2946 auto VecLT = getTypeLegalizationCost(VecTy);
2947 auto DstVT = TLI->getValueType(DL, Dst);
2948 auto SrcVT = TLI->getValueType(DL, Src);
2949
2950 // If the resulting type is still a vector and the destination type is legal,
2951 // we may get the extension for free. If not, get the default cost for the
2952 // extend.
2953 if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT))
2954 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
2955 CostKind);
2956
2957 // The destination type should be larger than the element type. If not, get
2958 // the default cost for the extend.
2959 if (DstVT.getFixedSizeInBits() < SrcVT.getFixedSizeInBits())
2960 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
2961 CostKind);
2962
2963 switch (Opcode) {
2964 default:
2965 llvm_unreachable("Opcode should be either SExt or ZExt");
2966
2967 // For sign-extends, we only need a smov, which performs the extension
2968 // automatically.
2969 case Instruction::SExt:
2970 return Cost;
2971
2972 // For zero-extends, the extend is performed automatically by a umov unless
2973 // the destination type is i64 and the element type is i8 or i16.
2974 case Instruction::ZExt:
2975 if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u)
2976 return Cost;
2977 }
2978
2979 // If we are unable to perform the extend for free, get the default cost.
2980 return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
2981 CostKind);
2982}
2983
2986 const Instruction *I) {
2988 return Opcode == Instruction::PHI ? 0 : 1;
2989 assert(CostKind == TTI::TCK_RecipThroughput && "unexpected CostKind");
2990 // Branches are assumed to be predicted.
2991 return 0;
2992}
2993
2994InstructionCost AArch64TTIImpl::getVectorInstrCostHelper(const Instruction *I,
2995 Type *Val,
2996 unsigned Index,
2997 bool HasRealUse) {
2998 assert(Val->isVectorTy() && "This must be a vector type");
2999
3000 if (Index != -1U) {
3001 // Legalize the type.
3002 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Val);
3003
3004 // This type is legalized to a scalar type.
3005 if (!LT.second.isVector())
3006 return 0;
3007
3008 // The type may be split. For fixed-width vectors we can normalize the
3009 // index to the new type.
3010 if (LT.second.isFixedLengthVector()) {
3011 unsigned Width = LT.second.getVectorNumElements();
3012 Index = Index % Width;
3013 }
3014
3015 // The element at index zero is already inside the vector.
3016 // - For a physical (HasRealUse==true) insert-element or extract-element
3017 // instruction that extracts integers, an explicit FPR -> GPR move is
3018 // needed. So it has non-zero cost.
3019 // - For the rest of cases (virtual instruction or element type is float),
3020 // consider the instruction free.
3021 if (Index == 0 && (!HasRealUse || !Val->getScalarType()->isIntegerTy()))
3022 return 0;
3023
3024 // This is recognising a LD1 single-element structure to one lane of one
3025 // register instruction. I.e., if this is an `insertelement` instruction,
3026 // and its second operand is a load, then we will generate a LD1, which
3027 // are expensive instructions.
3028 if (I && dyn_cast<LoadInst>(I->getOperand(1)))
3029 return ST->getVectorInsertExtractBaseCost() + 1;
3030
3031 // i1 inserts and extract will include an extra cset or cmp of the vector
3032 // value. Increase the cost by 1 to account.
3033 if (Val->getScalarSizeInBits() == 1)
3034 return ST->getVectorInsertExtractBaseCost() + 1;
3035
3036 // FIXME:
3037 // If the extract-element and insert-element instructions could be
3038 // simplified away (e.g., could be combined into users by looking at use-def
3039 // context), they have no cost. This is not done in the first place for
3040 // compile-time considerations.
3041 }
3042
3043 // All other insert/extracts cost this much.
3044 return ST->getVectorInsertExtractBaseCost();
3045}
3046
3049 unsigned Index, Value *Op0,
3050 Value *Op1) {
3051 bool HasRealUse =
3052 Opcode == Instruction::InsertElement && Op0 && !isa<UndefValue>(Op0);
3053 return getVectorInstrCostHelper(nullptr, Val, Index, HasRealUse);
3054}
3055
3057 Type *Val,
3059 unsigned Index) {
3060 return getVectorInstrCostHelper(&I, Val, Index, true /* HasRealUse */);
3061}
3062
3064 VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
3066 if (isa<ScalableVectorType>(Ty))
3068 if (Ty->getElementType()->isFloatingPointTy())
3069 return BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
3070 CostKind);
3071 return DemandedElts.popcount() * (Insert + Extract) *
3073}
3074
3076 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
3079 const Instruction *CxtI) {
3080
3081 // The code-generator is currently not able to handle scalable vectors
3082 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
3083 // it. This change will be removed when code-generation for these types is
3084 // sufficiently reliable.
3085 if (auto *VTy = dyn_cast<ScalableVectorType>(Ty))
3086 if (VTy->getElementCount() == ElementCount::getScalable(1))
3088
3089 // TODO: Handle more cost kinds.
3091 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
3092 Op2Info, Args, CxtI);
3093
3094 // Legalize the type.
3095 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
3096 int ISD = TLI->InstructionOpcodeToISD(Opcode);
3097
3098 switch (ISD) {
3099 default:
3100 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
3101 Op2Info);
3102 case ISD::SDIV:
3103 if (Op2Info.isConstant() && Op2Info.isUniform() && Op2Info.isPowerOf2()) {
3104 // On AArch64, scalar signed division by constants power-of-two are
3105 // normally expanded to the sequence ADD + CMP + SELECT + SRA.
3106 // The OperandValue properties many not be same as that of previous
3107 // operation; conservatively assume OP_None.
3109 Instruction::Add, Ty, CostKind,
3110 Op1Info.getNoProps(), Op2Info.getNoProps());
3111 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind,
3112 Op1Info.getNoProps(), Op2Info.getNoProps());
3114 Instruction::Select, Ty, CostKind,
3115 Op1Info.getNoProps(), Op2Info.getNoProps());
3116 Cost += getArithmeticInstrCost(Instruction::AShr, Ty, CostKind,
3117 Op1Info.getNoProps(), Op2Info.getNoProps());
3118 return Cost;
3119 }
3120 [[fallthrough]];
3121 case ISD::UDIV: {
3122 if (Op2Info.isConstant() && Op2Info.isUniform()) {
3123 auto VT = TLI->getValueType(DL, Ty);
3124 if (TLI->isOperationLegalOrCustom(ISD::MULHU, VT)) {
3125 // Vector signed division by constant are expanded to the
3126 // sequence MULHS + ADD/SUB + SRA + SRL + ADD, and unsigned division
3127 // to MULHS + SUB + SRL + ADD + SRL.
3129 Instruction::Mul, Ty, CostKind, Op1Info.getNoProps(), Op2Info.getNoProps());
3131 Instruction::Add, Ty, CostKind, Op1Info.getNoProps(), Op2Info.getNoProps());
3133 Instruction::AShr, Ty, CostKind, Op1Info.getNoProps(), Op2Info.getNoProps());
3134 return MulCost * 2 + AddCost * 2 + ShrCost * 2 + 1;
3135 }
3136 }
3137
3139 Opcode, Ty, CostKind, Op1Info, Op2Info);
3140 if (Ty->isVectorTy()) {
3141 if (TLI->isOperationLegalOrCustom(ISD, LT.second) && ST->hasSVE()) {
3142 // SDIV/UDIV operations are lowered using SVE, then we can have less
3143 // costs.
3144 if (isa<FixedVectorType>(Ty) && cast<FixedVectorType>(Ty)
3145 ->getPrimitiveSizeInBits()
3146 .getFixedValue() < 128) {
3147 EVT VT = TLI->getValueType(DL, Ty);
3148 static const CostTblEntry DivTbl[]{
3149 {ISD::SDIV, MVT::v2i8, 5}, {ISD::SDIV, MVT::v4i8, 8},
3150 {ISD::SDIV, MVT::v8i8, 8}, {ISD::SDIV, MVT::v2i16, 5},
3151 {ISD::SDIV, MVT::v4i16, 5}, {ISD::SDIV, MVT::v2i32, 1},
3152 {ISD::UDIV, MVT::v2i8, 5}, {ISD::UDIV, MVT::v4i8, 8},
3153 {ISD::UDIV, MVT::v8i8, 8}, {ISD::UDIV, MVT::v2i16, 5},
3154 {ISD::UDIV, MVT::v4i16, 5}, {ISD::UDIV, MVT::v2i32, 1}};
3155
3156 const auto *Entry = CostTableLookup(DivTbl, ISD, VT.getSimpleVT());
3157 if (nullptr != Entry)
3158 return Entry->Cost;
3159 }
3160 // For 8/16-bit elements, the cost is higher because the type
3161 // requires promotion and possibly splitting:
3162 if (LT.second.getScalarType() == MVT::i8)
3163 Cost *= 8;
3164 else if (LT.second.getScalarType() == MVT::i16)
3165 Cost *= 4;
3166 return Cost;
3167 } else {
3168 // If one of the operands is a uniform constant then the cost for each
3169 // element is Cost for insertion, extraction and division.
3170 // Insertion cost = 2, Extraction Cost = 2, Division = cost for the
3171 // operation with scalar type
3172 if ((Op1Info.isConstant() && Op1Info.isUniform()) ||
3173 (Op2Info.isConstant() && Op2Info.isUniform())) {
3174 if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
3176 Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info);
3177 return (4 + DivCost) * VTy->getNumElements();
3178 }
3179 }
3180 // On AArch64, without SVE, vector divisions are expanded
3181 // into scalar divisions of each pair of elements.
3182 Cost += getArithmeticInstrCost(Instruction::ExtractElement, Ty,
3183 CostKind, Op1Info, Op2Info);
3184 Cost += getArithmeticInstrCost(Instruction::InsertElement, Ty, CostKind,
3185 Op1Info, Op2Info);
3186 }
3187
3188 // TODO: if one of the arguments is scalar, then it's not necessary to
3189 // double the cost of handling the vector elements.
3190 Cost += Cost;
3191 }
3192 return Cost;
3193 }
3194 case ISD::MUL:
3195 // When SVE is available, then we can lower the v2i64 operation using
3196 // the SVE mul instruction, which has a lower cost.
3197 if (LT.second == MVT::v2i64 && ST->hasSVE())
3198 return LT.first;
3199
3200 // When SVE is not available, there is no MUL.2d instruction,
3201 // which means mul <2 x i64> is expensive as elements are extracted
3202 // from the vectors and the muls scalarized.
3203 // As getScalarizationOverhead is a bit too pessimistic, we
3204 // estimate the cost for a i64 vector directly here, which is:
3205 // - four 2-cost i64 extracts,
3206 // - two 2-cost i64 inserts, and
3207 // - two 1-cost muls.
3208 // So, for a v2i64 with LT.First = 1 the cost is 14, and for a v4i64 with
3209 // LT.first = 2 the cost is 28. If both operands are extensions it will not
3210 // need to scalarize so the cost can be cheaper (smull or umull).
3211 // so the cost can be cheaper (smull or umull).
3212 if (LT.second != MVT::v2i64 || isWideningInstruction(Ty, Opcode, Args))
3213 return LT.first;
3214 return LT.first * 14;
3215 case ISD::ADD:
3216 case ISD::XOR:
3217 case ISD::OR:
3218 case ISD::AND:
3219 case ISD::SRL:
3220 case ISD::SRA:
3221 case ISD::SHL:
3222 // These nodes are marked as 'custom' for combining purposes only.
3223 // We know that they are legal. See LowerAdd in ISelLowering.
3224 return LT.first;
3225
3226 case ISD::FNEG:
3227 case ISD::FADD:
3228 case ISD::FSUB:
3229 // Increase the cost for half and bfloat types if not architecturally
3230 // supported.
3231 if ((Ty->getScalarType()->isHalfTy() && !ST->hasFullFP16()) ||
3232 (Ty->getScalarType()->isBFloatTy() && !ST->hasBF16()))
3233 return 2 * LT.first;
3234 if (!Ty->getScalarType()->isFP128Ty())
3235 return LT.first;
3236 [[fallthrough]];
3237 case ISD::FMUL:
3238 case ISD::FDIV:
3239 // These nodes are marked as 'custom' just to lower them to SVE.
3240 // We know said lowering will incur no additional cost.
3241 if (!Ty->getScalarType()->isFP128Ty())
3242 return 2 * LT.first;
3243
3244 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
3245 Op2Info);
3246 case ISD::FREM:
3247 // Pass nullptr as fmod/fmodf calls are emitted by the backend even when
3248 // those functions are not declared in the module.
3249 if (!Ty->isVectorTy())
3250 return getCallInstrCost(/*Function*/ nullptr, Ty, {Ty, Ty}, CostKind);
3251 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
3252 Op2Info);
3253 }
3254}
3255
3257 ScalarEvolution *SE,
3258 const SCEV *Ptr) {
3259 // Address computations in vectorized code with non-consecutive addresses will
3260 // likely result in more instructions compared to scalar code where the
3261 // computation can more often be merged into the index mode. The resulting
3262 // extra micro-ops can significantly decrease throughput.
3263 unsigned NumVectorInstToHideOverhead = NeonNonConstStrideOverhead;
3264 int MaxMergeDistance = 64;
3265
3266 if (Ty->isVectorTy() && SE &&
3267 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
3268 return NumVectorInstToHideOverhead;
3269
3270 // In many cases the address computation is not merged into the instruction
3271 // addressing mode.
3272 return 1;
3273}
3274
3276 Type *CondTy,
3277 CmpInst::Predicate VecPred,
3279 const Instruction *I) {
3280 // TODO: Handle other cost kinds.
3282 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
3283 I);
3284
3285 int ISD = TLI->InstructionOpcodeToISD(Opcode);
3286 // We don't lower some vector selects well that are wider than the register
3287 // width.
3288 if (isa<FixedVectorType>(ValTy) && ISD == ISD::SELECT) {
3289 // We would need this many instructions to hide the scalarization happening.
3290 const int AmortizationCost = 20;
3291
3292 // If VecPred is not set, check if we can get a predicate from the context
3293 // instruction, if its type matches the requested ValTy.
3294 if (VecPred == CmpInst::BAD_ICMP_PREDICATE && I && I->getType() == ValTy) {
3295 CmpInst::Predicate CurrentPred;
3296 if (match(I, m_Select(m_Cmp(CurrentPred, m_Value(), m_Value()), m_Value(),
3297 m_Value())))
3298 VecPred = CurrentPred;
3299 }
3300 // Check if we have a compare/select chain that can be lowered using
3301 // a (F)CMxx & BFI pair.
3302 if (CmpInst::isIntPredicate(VecPred) || VecPred == CmpInst::FCMP_OLE ||
3303 VecPred == CmpInst::FCMP_OLT || VecPred == CmpInst::FCMP_OGT ||
3304 VecPred == CmpInst::FCMP_OGE || VecPred == CmpInst::FCMP_OEQ ||
3305 VecPred == CmpInst::FCMP_UNE) {
3306 static const auto ValidMinMaxTys = {
3307 MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32,
3308 MVT::v4i32, MVT::v2i64, MVT::v2f32, MVT::v4f32, MVT::v2f64};
3309 static const auto ValidFP16MinMaxTys = {MVT::v4f16, MVT::v8f16};
3310
3311 auto LT = getTypeLegalizationCost(ValTy);
3312 if (any_of(ValidMinMaxTys, [&LT](MVT M) { return M == LT.second; }) ||
3313 (ST->hasFullFP16() &&
3314 any_of(ValidFP16MinMaxTys, [&LT](MVT M) { return M == LT.second; })))
3315 return LT.first;
3316 }
3317
3318 static const TypeConversionCostTblEntry
3319 VectorSelectTbl[] = {
3320 { ISD::SELECT, MVT::v2i1, MVT::v2f32, 2 },
3321 { ISD::SELECT, MVT::v2i1, MVT::v2f64, 2 },
3322 { ISD::SELECT, MVT::v4i1, MVT::v4f32, 2 },
3323 { ISD::SELECT, MVT::v4i1, MVT::v4f16, 2 },
3324 { ISD::SELECT, MVT::v8i1, MVT::v8f16, 2 },
3325 { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 },
3326 { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 },
3327 { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 },
3328 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
3329 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
3330 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
3331 };
3332
3333 EVT SelCondTy = TLI->getValueType(DL, CondTy);
3334 EVT SelValTy = TLI->getValueType(DL, ValTy);
3335 if (SelCondTy.isSimple() && SelValTy.isSimple()) {
3336 if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD,
3337 SelCondTy.getSimpleVT(),
3338 SelValTy.getSimpleVT()))
3339 return Entry->Cost;
3340 }
3341 }
3342
3343 if (isa<FixedVectorType>(ValTy) && ISD == ISD::SETCC) {
3344 auto LT = getTypeLegalizationCost(ValTy);
3345 // Cost v4f16 FCmp without FP16 support via converting to v4f32 and back.
3346 if (LT.second == MVT::v4f16 && !ST->hasFullFP16())
3347 return LT.first * 4; // fcvtl + fcvtl + fcmp + xtn
3348 }
3349
3350 // Treat the icmp in icmp(and, 0) as free, as we can make use of ands.
3351 // FIXME: This can apply to more conditions and add/sub if it can be shown to
3352 // be profitable.
3353 if (ValTy->isIntegerTy() && ISD == ISD::SETCC && I &&
3354 ICmpInst::isEquality(VecPred) &&
3355 TLI->isTypeLegal(TLI->getValueType(DL, ValTy)) &&
3356 match(I->getOperand(1), m_Zero()) &&
3357 match(I->getOperand(0), m_And(m_Value(), m_Value())))
3358 return 0;
3359
3360 // The base case handles scalable vectors fine for now, since it treats the
3361 // cost as 1 * legalization cost.
3362 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
3363}
3364
3366AArch64TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
3368 if (ST->requiresStrictAlign()) {
3369 // TODO: Add cost modeling for strict align. Misaligned loads expand to
3370 // a bunch of instructions when strict align is enabled.
3371 return Options;
3372 }
3373 Options.AllowOverlappingLoads = true;
3374 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
3375 Options.NumLoadsPerBlock = Options.MaxNumLoads;
3376 // TODO: Though vector loads usually perform well on AArch64, in some targets
3377 // they may wake up the FP unit, which raises the power consumption. Perhaps
3378 // they could be used with no holds barred (-O3).
3379 Options.LoadSizes = {8, 4, 2, 1};
3380 Options.AllowedTailExpansions = {3, 5, 6};
3381 return Options;
3382}
3383
3385 return ST->hasSVE();
3386}
3387
3390 Align Alignment, unsigned AddressSpace,
3392 if (useNeonVector(Src))
3393 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3394 CostKind);
3395 auto LT = getTypeLegalizationCost(Src);
3396 if (!LT.first.isValid())
3398
3399 // Return an invalid cost for element types that we are unable to lower.
3400 auto *VT = cast<VectorType>(Src);
3401 if (VT->getElementType()->isIntegerTy(1))
3403
3404 // The code-generator is currently not able to handle scalable vectors
3405 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
3406 // it. This change will be removed when code-generation for these types is
3407 // sufficiently reliable.
3408 if (VT->getElementCount() == ElementCount::getScalable(1))
3410
3411 return LT.first;
3412}
3413
3414// This function returns gather/scatter overhead either from
3415// user-provided value or specialized values per-target from \p ST.
3416static unsigned getSVEGatherScatterOverhead(unsigned Opcode,
3417 const AArch64Subtarget *ST) {
3418 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
3419 "Should be called on only load or stores.");
3420 switch (Opcode) {
3421 case Instruction::Load:
3422 if (SVEGatherOverhead.getNumOccurrences() > 0)
3423 return SVEGatherOverhead;
3424 return ST->getGatherOverhead();
3425 break;
3426 case Instruction::Store:
3427 if (SVEScatterOverhead.getNumOccurrences() > 0)
3428 return SVEScatterOverhead;
3429 return ST->getScatterOverhead();
3430 break;
3431 default:
3432 llvm_unreachable("Shouldn't have reached here");
3433 }
3434}
3435
3437 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
3438 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
3439 if (useNeonVector(DataTy) || !isLegalMaskedGatherScatter(DataTy))
3440 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
3441 Alignment, CostKind, I);
3442 auto *VT = cast<VectorType>(DataTy);
3443 auto LT = getTypeLegalizationCost(DataTy);
3444 if (!LT.first.isValid())
3446
3447 // Return an invalid cost for element types that we are unable to lower.
3448 if (!LT.second.isVector() ||
3449 !isElementTypeLegalForScalableVector(VT->getElementType()) ||
3450 VT->getElementType()->isIntegerTy(1))
3452
3453 // The code-generator is currently not able to handle scalable vectors
3454 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
3455 // it. This change will be removed when code-generation for these types is
3456 // sufficiently reliable.
3457 if (VT->getElementCount() == ElementCount::getScalable(1))
3459
3460 ElementCount LegalVF = LT.second.getVectorElementCount();
3461 InstructionCost MemOpCost =
3462 getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind,
3463 {TTI::OK_AnyValue, TTI::OP_None}, I);
3464 // Add on an overhead cost for using gathers/scatters.
3465 MemOpCost *= getSVEGatherScatterOverhead(Opcode, ST);
3466 return LT.first * MemOpCost * getMaxNumElements(LegalVF);
3467}
3468
3470 return isa<FixedVectorType>(Ty) && !ST->useSVEForFixedLengthVectors();
3471}
3472
3474 MaybeAlign Alignment,
3475 unsigned AddressSpace,
3477 TTI::OperandValueInfo OpInfo,
3478 const Instruction *I) {
3479 EVT VT = TLI->getValueType(DL, Ty, true);
3480 // Type legalization can't handle structs
3481 if (VT == MVT::Other)
3482 return BaseT::getMemoryOpCost(Opcode, Ty, Alignment, AddressSpace,
3483 CostKind);
3484
3485 auto LT = getTypeLegalizationCost(Ty);
3486 if (!LT.first.isValid())
3488
3489 // The code-generator is currently not able to handle scalable vectors
3490 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
3491 // it. This change will be removed when code-generation for these types is
3492 // sufficiently reliable.
3493 // We also only support full register predicate loads and stores.
3494 if (auto *VTy = dyn_cast<ScalableVectorType>(Ty))
3495 if (VTy->getElementCount() == ElementCount::getScalable(1) ||
3496 (VTy->getElementType()->isIntegerTy(1) &&
3497 !VTy->getElementCount().isKnownMultipleOf(
3500
3501 // TODO: consider latency as well for TCK_SizeAndLatency.
3503 return LT.first;
3504
3506 return 1;
3507
3508 if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store &&
3509 LT.second.is128BitVector() && (!Alignment || *Alignment < Align(16))) {
3510 // Unaligned stores are extremely inefficient. We don't split all
3511 // unaligned 128-bit stores because the negative impact that has shown in
3512 // practice on inlined block copy code.
3513 // We make such stores expensive so that we will only vectorize if there
3514 // are 6 other instructions getting vectorized.
3515 const int AmortizationCost = 6;
3516
3517 return LT.first * 2 * AmortizationCost;
3518 }
3519
3520 // Opaque ptr or ptr vector types are i64s and can be lowered to STP/LDPs.
3521 if (Ty->isPtrOrPtrVectorTy())
3522 return LT.first;
3523
3524 if (useNeonVector(Ty)) {
3525 // Check truncating stores and extending loads.
3526 if (Ty->getScalarSizeInBits() != LT.second.getScalarSizeInBits()) {
3527 // v4i8 types are lowered to scalar a load/store and sshll/xtn.
3528 if (VT == MVT::v4i8)
3529 return 2;
3530 // Otherwise we need to scalarize.
3531 return cast<FixedVectorType>(Ty)->getNumElements() * 2;
3532 }
3533 EVT EltVT = VT.getVectorElementType();
3534 unsigned EltSize = EltVT.getScalarSizeInBits();
3535 if (!isPowerOf2_32(EltSize) || EltSize < 8 || EltSize > 64 ||
3536 VT.getVectorNumElements() >= (128 / EltSize) || !Alignment ||
3537 *Alignment != Align(1))
3538 return LT.first;
3539 // FIXME: v3i8 lowering currently is very inefficient, due to automatic
3540 // widening to v4i8, which produces suboptimal results.
3541 if (VT.getVectorNumElements() == 3 && EltVT == MVT::i8)
3542 return LT.first;
3543
3544 // Check non-power-of-2 loads/stores for legal vector element types with
3545 // NEON. Non-power-of-2 memory ops will get broken down to a set of
3546 // operations on smaller power-of-2 ops, including ld1/st1.
3547 LLVMContext &C = Ty->getContext();
3549 SmallVector<EVT> TypeWorklist;
3550 TypeWorklist.push_back(VT);
3551 while (!TypeWorklist.empty()) {
3552 EVT CurrVT = TypeWorklist.pop_back_val();
3553 unsigned CurrNumElements = CurrVT.getVectorNumElements();
3554 if (isPowerOf2_32(CurrNumElements)) {
3555 Cost += 1;
3556 continue;
3557 }
3558
3559 unsigned PrevPow2 = NextPowerOf2(CurrNumElements) / 2;
3560 TypeWorklist.push_back(EVT::getVectorVT(C, EltVT, PrevPow2));
3561 TypeWorklist.push_back(
3562 EVT::getVectorVT(C, EltVT, CurrNumElements - PrevPow2));
3563 }
3564 return Cost;
3565 }
3566
3567 return LT.first;
3568}
3569
3571 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
3572 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
3573 bool UseMaskForCond, bool UseMaskForGaps) {
3574 assert(Factor >= 2 && "Invalid interleave factor");
3575 auto *VecVTy = cast<VectorType>(VecTy);
3576
3577 if (VecTy->isScalableTy() && (!ST->hasSVE() || Factor != 2))
3579
3580 // Vectorization for masked interleaved accesses is only enabled for scalable
3581 // VF.
3582 if (!VecTy->isScalableTy() && (UseMaskForCond || UseMaskForGaps))
3584
3585 if (!UseMaskForGaps && Factor <= TLI->getMaxSupportedInterleaveFactor()) {
3586 unsigned MinElts = VecVTy->getElementCount().getKnownMinValue();
3587 auto *SubVecTy =
3588 VectorType::get(VecVTy->getElementType(),
3589 VecVTy->getElementCount().divideCoefficientBy(Factor));
3590
3591 // ldN/stN only support legal vector types of size 64 or 128 in bits.
3592 // Accesses having vector types that are a multiple of 128 bits can be
3593 // matched to more than one ldN/stN instruction.
3594 bool UseScalable;
3595 if (MinElts % Factor == 0 &&
3596 TLI->isLegalInterleavedAccessType(SubVecTy, DL, UseScalable))
3597 return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL, UseScalable);
3598 }
3599
3600 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
3601 Alignment, AddressSpace, CostKind,
3602 UseMaskForCond, UseMaskForGaps);
3603}
3604
3609 for (auto *I : Tys) {
3610 if (!I->isVectorTy())
3611 continue;
3612 if (I->getScalarSizeInBits() * cast<FixedVectorType>(I)->getNumElements() ==
3613 128)
3614 Cost += getMemoryOpCost(Instruction::Store, I, Align(128), 0, CostKind) +
3615 getMemoryOpCost(Instruction::Load, I, Align(128), 0, CostKind);
3616 }
3617 return Cost;
3618}
3619
3621 return ST->getMaxInterleaveFactor();
3622}
3623
3624// For Falkor, we want to avoid having too many strided loads in a loop since
3625// that can exhaust the HW prefetcher resources. We adjust the unroller
3626// MaxCount preference below to attempt to ensure unrolling doesn't create too
3627// many strided loads.
3628static void
3631 enum { MaxStridedLoads = 7 };
3632 auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) {
3633 int StridedLoads = 0;
3634 // FIXME? We could make this more precise by looking at the CFG and
3635 // e.g. not counting loads in each side of an if-then-else diamond.
3636 for (const auto BB : L->blocks()) {
3637 for (auto &I : *BB) {
3638 LoadInst *LMemI = dyn_cast<LoadInst>(&I);
3639 if (!LMemI)
3640 continue;
3641
3642 Value *PtrValue = LMemI->getPointerOperand();
3643 if (L->isLoopInvariant(PtrValue))
3644 continue;
3645
3646 const SCEV *LSCEV = SE.getSCEV(PtrValue);
3647 const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV);
3648 if (!LSCEVAddRec || !LSCEVAddRec->isAffine())
3649 continue;
3650
3651 // FIXME? We could take pairing of unrolled load copies into account
3652 // by looking at the AddRec, but we would probably have to limit this
3653 // to loops with no stores or other memory optimization barriers.
3654 ++StridedLoads;
3655 // We've seen enough strided loads that seeing more won't make a
3656 // difference.
3657 if (StridedLoads > MaxStridedLoads / 2)
3658 return StridedLoads;
3659 }
3660 }
3661 return StridedLoads;
3662 };
3663
3664 int StridedLoads = countStridedLoads(L, SE);
3665 LLVM_DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads
3666 << " strided loads\n");
3667 // Pick the largest power of 2 unroll count that won't result in too many
3668 // strided loads.
3669 if (StridedLoads) {
3670 UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads);
3671 LLVM_DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to "
3672 << UP.MaxCount << '\n');
3673 }
3674}
3675
3679 // Enable partial unrolling and runtime unrolling.
3680 BaseT::getUnrollingPreferences(L, SE, UP, ORE);
3681
3682 UP.UpperBound = true;
3683
3684 // For inner loop, it is more likely to be a hot one, and the runtime check
3685 // can be promoted out from LICM pass, so the overhead is less, let's try
3686 // a larger threshold to unroll more loops.
3687 if (L->getLoopDepth() > 1)
3688 UP.PartialThreshold *= 2;
3689
3690 // Disable partial & runtime unrolling on -Os.
3692
3693 if (ST->getProcFamily() == AArch64Subtarget::Falkor &&
3696
3697 // Scan the loop: don't unroll loops with calls as this could prevent
3698 // inlining. Don't unroll vector loops either, as they don't benefit much from
3699 // unrolling.
3700 for (auto *BB : L->getBlocks()) {
3701 for (auto &I : *BB) {
3702 // Don't unroll vectorised loop.
3703 if (I.getType()->isVectorTy())
3704 return;
3705
3706 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
3707 if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
3708 if (!isLoweredToCall(F))
3709 continue;
3710 }
3711 return;
3712 }
3713 }
3714 }
3715
3716 // Enable runtime unrolling for in-order models
3717 // If mcpu is omitted, getProcFamily() returns AArch64Subtarget::Others, so by
3718 // checking for that case, we can ensure that the default behaviour is
3719 // unchanged
3721 !ST->getSchedModel().isOutOfOrder()) {
3722 UP.Runtime = true;
3723 UP.Partial = true;
3724 UP.UnrollRemainder = true;
3726
3727 UP.UnrollAndJam = true;
3729 }
3730}
3731
3735}
3736
3738 Type *ExpectedType) {
3739 switch (Inst->getIntrinsicID()) {
3740 default:
3741 return nullptr;
3742 case Intrinsic::aarch64_neon_st2:
3743 case Intrinsic::aarch64_neon_st3:
3744 case Intrinsic::aarch64_neon_st4: {
3745 // Create a struct type
3746 StructType *ST = dyn_cast<StructType>(ExpectedType);
3747 if (!ST)
3748 return nullptr;
3749 unsigned NumElts = Inst->arg_size() - 1;
3750 if (ST->getNumElements() != NumElts)
3751 return nullptr;
3752 for (unsigned i = 0, e = NumElts; i != e; ++i) {
3753 if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
3754 return nullptr;
3755 }
3756 Value *Res = PoisonValue::get(ExpectedType);
3757 IRBuilder<> Builder(Inst);
3758 for (unsigned i = 0, e = NumElts; i != e; ++i) {
3759 Value *L = Inst->getArgOperand(i);
3760 Res = Builder.CreateInsertValue(Res, L, i);
3761 }
3762 return Res;
3763 }
3764 case Intrinsic::aarch64_neon_ld2:
3765 case Intrinsic::aarch64_neon_ld3:
3766 case Intrinsic::aarch64_neon_ld4:
3767 if (Inst->getType() == ExpectedType)
3768 return Inst;
3769 return nullptr;
3770 }
3771}
3772
3774 MemIntrinsicInfo &Info) {
3775 switch (Inst->getIntrinsicID()) {
3776 default:
3777 break;
3778 case Intrinsic::aarch64_neon_ld2:
3779 case Intrinsic::aarch64_neon_ld3:
3780 case Intrinsic::aarch64_neon_ld4:
3781 Info.ReadMem = true;
3782 Info.WriteMem = false;
3783 Info.PtrVal = Inst->getArgOperand(0);
3784 break;
3785 case Intrinsic::aarch64_neon_st2:
3786 case Intrinsic::aarch64_neon_st3:
3787 case Intrinsic::aarch64_neon_st4:
3788 Info.ReadMem = false;
3789 Info.WriteMem = true;
3790 Info.PtrVal = Inst->getArgOperand(Inst->arg_size() - 1);
3791 break;
3792 }
3793
3794 switch (Inst->getIntrinsicID()) {
3795 default:
3796 return false;
3797 case Intrinsic::aarch64_neon_ld2:
3798 case Intrinsic::aarch64_neon_st2:
3799 Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
3800 break;
3801 case Intrinsic::aarch64_neon_ld3:
3802 case Intrinsic::aarch64_neon_st3:
3803 Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
3804 break;
3805 case Intrinsic::aarch64_neon_ld4:
3806 case Intrinsic::aarch64_neon_st4:
3807 Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;
3808 break;
3809 }
3810 return true;
3811}
3812
3813/// See if \p I should be considered for address type promotion. We check if \p
3814/// I is a sext with right type and used in memory accesses. If it used in a
3815/// "complex" getelementptr, we allow it to be promoted without finding other
3816/// sext instructions that sign extended the same initial value. A getelementptr
3817/// is considered as "complex" if it has more than 2 operands.
3819 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) {
3820 bool Considerable = false;
3821 AllowPromotionWithoutCommonHeader = false;
3822 if (!isa<SExtInst>(&I))
3823 return false;
3824 Type *ConsideredSExtType =
3825 Type::getInt64Ty(I.getParent()->getParent()->getContext());
3826 if (I.getType() != ConsideredSExtType)
3827 return false;
3828 // See if the sext is the one with the right type and used in at least one
3829 // GetElementPtrInst.
3830 for (const User *U : I.users()) {
3831 if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) {
3832 Considerable = true;
3833 // A getelementptr is considered as "complex" if it has more than 2
3834 // operands. We will promote a SExt used in such complex GEP as we
3835 // expect some computation to be merged if they are done on 64 bits.
3836 if (GEPInst->getNumOperands() > 2) {
3837 AllowPromotionWithoutCommonHeader = true;
3838 break;
3839 }
3840 }
3841 }
3842 return Considerable;
3843}
3844
3846 const RecurrenceDescriptor &RdxDesc, ElementCount VF) const {
3847 if (!VF.isScalable())
3848 return true;
3849
3850 Type *Ty = RdxDesc.getRecurrenceType();
3852 return false;
3853
3854 switch (RdxDesc.getRecurrenceKind()) {
3855 case RecurKind::Add:
3856 case RecurKind::FAdd:
3857 case RecurKind::And:
3858 case RecurKind::Or:
3859 case RecurKind::Xor:
3860 case RecurKind::SMin:
3861 case RecurKind::SMax:
3862 case RecurKind::UMin:
3863 case RecurKind::UMax:
3864 case RecurKind::FMin:
3865 case RecurKind::FMax:
3866 case RecurKind::FMulAdd:
3867 case RecurKind::IAnyOf:
3868 case RecurKind::FAnyOf:
3869 return true;
3870 default:
3871 return false;
3872 }
3873}
3874
3877 FastMathFlags FMF,
3879 // The code-generator is currently not able to handle scalable vectors
3880 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
3881 // it. This change will be removed when code-generation for these types is
3882 // sufficiently reliable.
3883 if (auto *VTy = dyn_cast<ScalableVectorType>(Ty))
3884 if (VTy->getElementCount() == ElementCount::getScalable(1))
3886
3887 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
3888
3889 if (LT.second.getScalarType() == MVT::f16 && !ST->hasFullFP16())
3890 return BaseT::getMinMaxReductionCost(IID, Ty, FMF, CostKind);
3891
3892 InstructionCost LegalizationCost = 0;
3893 if (LT.first > 1) {
3894 Type *LegalVTy = EVT(LT.second).getTypeForEVT(Ty->getContext());
3895 IntrinsicCostAttributes Attrs(IID, LegalVTy, {LegalVTy, LegalVTy}, FMF);
3896 LegalizationCost = getIntrinsicInstrCost(Attrs, CostKind) * (LT.first - 1);
3897 }
3898
3899 return LegalizationCost + /*Cost of horizontal reduction*/ 2;
3900}
3901
3903 unsigned Opcode, VectorType *ValTy, TTI::TargetCostKind CostKind) {
3904 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
3905 InstructionCost LegalizationCost = 0;
3906 if (LT.first > 1) {
3907 Type *LegalVTy = EVT(LT.second).getTypeForEVT(ValTy->getContext());
3908 LegalizationCost = getArithmeticInstrCost(Opcode, LegalVTy, CostKind);
3909 LegalizationCost *= LT.first - 1;
3910 }
3911
3912 int ISD = TLI->InstructionOpcodeToISD(Opcode);
3913 assert(ISD && "Invalid opcode");
3914 // Add the final reduction cost for the legal horizontal reduction
3915 switch (ISD) {
3916 case ISD::ADD:
3917 case ISD::AND:
3918 case ISD::OR:
3919 case ISD::XOR:
3920 case ISD::FADD:
3921 return LegalizationCost + 2;
3922 default:
3924 }
3925}
3926
3929 std::optional<FastMathFlags> FMF,
3931 // The code-generator is currently not able to handle scalable vectors
3932 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
3933 // it. This change will be removed when code-generation for these types is
3934 // sufficiently reliable.
3935 if (auto *VTy = dyn_cast<ScalableVectorType>(ValTy))
3936 if (VTy->getElementCount() == ElementCount::getScalable(1))
3938
3940 if (auto *FixedVTy = dyn_cast<FixedVectorType>(ValTy)) {
3941 InstructionCost BaseCost =
3942 BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
3943 // Add on extra cost to reflect the extra overhead on some CPUs. We still
3944 // end up vectorizing for more computationally intensive loops.
3945 return BaseCost + FixedVTy->getNumElements();
3946 }
3947
3948 if (Opcode != Instruction::FAdd)
3950
3951 auto *VTy = cast<ScalableVectorType>(ValTy);
3953 getArithmeticInstrCost(Opcode, VTy->getScalarType(), CostKind);
3954 Cost *= getMaxNumElements(VTy->getElementCount());
3955 return Cost;
3956 }
3957
3958 if (isa<ScalableVectorType>(ValTy))
3959 return getArithmeticReductionCostSVE(Opcode, ValTy, CostKind);
3960
3961 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
3962 MVT MTy = LT.second;
3963 int ISD = TLI->InstructionOpcodeToISD(Opcode);
3964 assert(ISD && "Invalid opcode");
3965
3966 // Horizontal adds can use the 'addv' instruction. We model the cost of these
3967 // instructions as twice a normal vector add, plus 1 for each legalization
3968 // step (LT.first). This is the only arithmetic vector reduction operation for
3969 // which we have an instruction.
3970 // OR, XOR and AND costs should match the codegen from:
3971 // OR: llvm/test/CodeGen/AArch64/reduce-or.ll
3972 // XOR: llvm/test/CodeGen/AArch64/reduce-xor.ll
3973 // AND: llvm/test/CodeGen/AArch64/reduce-and.ll
3974 static const CostTblEntry CostTblNoPairwise[]{
3975 {ISD::ADD, MVT::v8i8, 2},
3976 {ISD::ADD, MVT::v16i8, 2},
3977 {ISD::ADD, MVT::v4i16, 2},
3978 {ISD::ADD, MVT::v8i16, 2},
3979 {ISD::ADD, MVT::v4i32, 2},
3980 {ISD::ADD, MVT::v2i64, 2},
3981 {ISD::OR, MVT::v8i8, 15},
3982 {ISD::OR, MVT::v16i8, 17},
3983 {ISD::OR, MVT::v4i16, 7},
3984 {ISD::OR, MVT::v8i16, 9},
3985 {ISD::OR, MVT::v2i32, 3},
3986 {ISD::OR, MVT::v4i32, 5},
3987 {ISD::OR, MVT::v2i64, 3},
3988 {ISD::XOR, MVT::v8i8, 15},
3989 {ISD::XOR, MVT::v16i8, 17},
3990 {ISD::XOR, MVT::v4i16, 7},
3991 {ISD::XOR, MVT::v8i16, 9},
3992 {ISD::XOR, MVT::v2i32, 3},
3993 {ISD::XOR, MVT::v4i32, 5},
3994 {ISD::XOR, MVT::v2i64, 3},
3995 {ISD::AND, MVT::v8i8, 15},
3996 {ISD::AND, MVT::v16i8, 17},
3997 {ISD::AND, MVT::v4i16, 7},
3998 {ISD::AND, MVT::v8i16, 9},
3999 {ISD::AND, MVT::v2i32, 3},
4000 {ISD::AND, MVT::v4i32, 5},
4001 {ISD::AND, MVT::v2i64, 3},
4002 };
4003 switch (ISD) {
4004 default:
4005 break;
4006 case ISD::ADD:
4007 if (const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy))
4008 return (LT.first - 1) + Entry->Cost;
4009 break;
4010 case ISD::XOR:
4011 case ISD::AND:
4012 case ISD::OR:
4013 const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy);
4014 if (!Entry)
4015 break;
4016 auto *ValVTy = cast<FixedVectorType>(ValTy);
4017 if (MTy.getVectorNumElements() <= ValVTy->getNumElements() &&
4018 isPowerOf2_32(ValVTy->getNumElements())) {
4019 InstructionCost ExtraCost = 0;
4020 if (LT.first != 1) {
4021 // Type needs to be split, so there is an extra cost of LT.first - 1
4022 // arithmetic ops.
4023 auto *Ty = FixedVectorType::get(ValTy->getElementType(),
4024 MTy.getVectorNumElements());
4025 ExtraCost = getArithmeticInstrCost(Opcode, Ty, CostKind);
4026 ExtraCost *= LT.first - 1;
4027 }
4028 // All and/or/xor of i1 will be lowered with maxv/minv/addv + fmov
4029 auto Cost = ValVTy->getElementType()->isIntegerTy(1) ? 2 : Entry->Cost;
4030 return Cost + ExtraCost;
4031 }
4032 break;
4033 }
4034 return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
4035}
4036
4038 static const CostTblEntry ShuffleTbl[] = {
4039 { TTI::SK_Splice, MVT::nxv16i8, 1 },
4040 { TTI::SK_Splice, MVT::nxv8i16, 1 },
4041 { TTI::SK_Splice, MVT::nxv4i32, 1 },
4042 { TTI::SK_Splice, MVT::nxv2i64, 1 },
4043 { TTI::SK_Splice, MVT::nxv2f16, 1 },
4044 { TTI::SK_Splice, MVT::nxv4f16, 1 },
4045 { TTI::SK_Splice, MVT::nxv8f16, 1 },
4046 { TTI::SK_Splice, MVT::nxv2bf16, 1 },
4047 { TTI::SK_Splice, MVT::nxv4bf16, 1 },
4048 { TTI::SK_Splice, MVT::nxv8bf16, 1 },
4049 { TTI::SK_Splice, MVT::nxv2f32, 1 },
4050 { TTI::SK_Splice, MVT::nxv4f32, 1 },
4051 { TTI::SK_Splice, MVT::nxv2f64, 1 },
4052 };
4053
4054 // The code-generator is currently not able to handle scalable vectors
4055 // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
4056 // it. This change will be removed when code-generation for these types is
4057 // sufficiently reliable.
4060
4061 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
4062 Type *LegalVTy = EVT(LT.second).getTypeForEVT(Tp->getContext());
4064 EVT PromotedVT = LT.second.getScalarType() == MVT::i1
4065 ? TLI->getPromotedVTForPredicate(EVT(LT.second))
4066 : LT.second;
4067 Type *PromotedVTy = EVT(PromotedVT).getTypeForEVT(Tp->getContext());
4068 InstructionCost LegalizationCost = 0;
4069 if (Index < 0) {
4070 LegalizationCost =
4071 getCmpSelInstrCost(Instruction::ICmp, PromotedVTy, PromotedVTy,
4073 getCmpSelInstrCost(Instruction::Select, PromotedVTy, LegalVTy,
4075 }
4076
4077 // Predicated splice are promoted when lowering. See AArch64ISelLowering.cpp
4078 // Cost performed on a promoted type.
4079 if (LT.second.getScalarType() == MVT::i1) {
4080 LegalizationCost +=
4081 getCastInstrCost(Instruction::ZExt, PromotedVTy, LegalVTy,
4083 getCastInstrCost(Instruction::Trunc, LegalVTy, PromotedVTy,
4085 }
4086 const auto *Entry =
4087 CostTableLookup(ShuffleTbl, TTI::SK_Splice, PromotedVT.getSimpleVT());
4088 assert(Entry && "Illegal Type for Splice");
4089 LegalizationCost += Entry->Cost;
4090 return LegalizationCost * LT.first;
4091}
4092
4096 ArrayRef<const Value *> Args, const Instruction *CxtI) {
4097 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
4098
4099 // If we have a Mask, and the LT is being legalized somehow, split the Mask
4100 // into smaller vectors and sum the cost of each shuffle.
4101 if (!Mask.empty() && isa<FixedVectorType>(Tp) && LT.second.isVector() &&
4102 Tp->getScalarSizeInBits() == LT.second.getScalarSizeInBits() &&
4103 Mask.size() > LT.second.getVectorNumElements() && !Index && !SubTp) {
4104
4105 // Check for LD3/LD4 instructions, which are represented in llvm IR as
4106 // deinterleaving-shuffle(load). The shuffle cost could potentially be free,
4107 // but we model it with a cost of LT.first so that LD3/LD4 have a higher
4108 // cost than just the load.
4109 if (Args.size() >= 1 && isa<LoadInst>(Args[0]) &&
4112 return std::max<InstructionCost>(1, LT.first / 4);
4113
4114 // Check for ST3/ST4 instructions, which are represented in llvm IR as
4115 // store(interleaving-shuffle). The shuffle cost could potentially be free,
4116 // but we model it with a cost of LT.first so that ST3/ST4 have a higher
4117 // cost than just the store.
4118 if (CxtI && CxtI->hasOneUse() && isa<StoreInst>(*CxtI->user_begin()) &&
4120 Mask, 4, Tp->getElementCount().getKnownMinValue() * 2) ||
4122 Mask, 3, Tp->getElementCount().getKnownMinValue() * 2)))
4123 return LT.first;
4124
4125 unsigned TpNumElts = Mask.size();
4126 unsigned LTNumElts = LT.second.getVectorNumElements();
4127 unsigned NumVecs = (TpNumElts + LTNumElts - 1) / LTNumElts;
4128 VectorType *NTp =
4129 VectorType::get(Tp->getScalarType(), LT.second.getVectorElementCount());
4131 for (unsigned N = 0; N < NumVecs; N++) {
4132 SmallVector<int> NMask;
4133 // Split the existing mask into chunks of size LTNumElts. Track the source
4134 // sub-vectors to ensure the result has at most 2 inputs.
4135 unsigned Source1, Source2;
4136 unsigned NumSources = 0;
4137 for (unsigned E = 0; E < LTNumElts; E++) {
4138 int MaskElt = (N * LTNumElts + E < TpNumElts) ? Mask[N * LTNumElts + E]
4140 if (MaskElt < 0) {
4142 continue;
4143 }
4144
4145 // Calculate which source from the input this comes from and whether it
4146 // is new to us.
4147 unsigned Source = MaskElt / LTNumElts;
4148 if (NumSources == 0) {
4149 Source1 = Source;
4150 NumSources = 1;
4151 } else if (NumSources == 1 && Source != Source1) {
4152 Source2 = Source;
4153 NumSources = 2;
4154 } else if (NumSources >= 2 && Source != Source1 && Source != Source2) {
4155 NumSources++;
4156 }
4157
4158 // Add to the new mask. For the NumSources>2 case these are not correct,
4159 // but are only used for the modular lane number.
4160 if (Source == Source1)
4161 NMask.push_back(MaskElt % LTNumElts);
4162 else if (Source == Source2)
4163 NMask.push_back(MaskElt % LTNumElts + LTNumElts);
4164 else
4165 NMask.push_back(MaskElt % LTNumElts);
4166 }
4167 // If the sub-mask has at most 2 input sub-vectors then re-cost it using
4168 // getShuffleCost. If not then cost it using the worst case as the number
4169 // of element moves into a new vector.
4170 if (NumSources <= 2)
4171 Cost += getShuffleCost(NumSources <= 1 ? TTI::SK_PermuteSingleSrc
4173 NTp, NMask, CostKind, 0, nullptr, Args, CxtI);
4174 else
4175 Cost += LTNumElts;
4176 }
4177 return Cost;
4178 }
4179
4180 Kind = improveShuffleKindFromMask(Kind, Mask, Tp, Index, SubTp);
4181 // Treat extractsubvector as single op permutation.
4182 bool IsExtractSubvector = Kind == TTI::SK_ExtractSubvector;
4183 if (IsExtractSubvector && LT.second.isFixedLengthVector())
4185
4186 // Check for broadcast loads, which are supported by the LD1R instruction.
4187 // In terms of code-size, the shuffle vector is free when a load + dup get
4188 // folded into a LD1R. That's what we check and return here. For performance
4189 // and reciprocal throughput, a LD1R is not completely free. In this case, we
4190 // return the cost for the broadcast below (i.e. 1 for most/all types), so
4191 // that we model the load + dup sequence slightly higher because LD1R is a
4192 // high latency instruction.
4193 if (CostKind == TTI::TCK_CodeSize && Kind == TTI::SK_Broadcast) {
4194 bool IsLoad = !Args.empty() && isa<LoadInst>(Args[0]);
4195 if (IsLoad && LT.second.isVector() &&
4197 LT.second.getVectorElementCount()))
4198 return 0;
4199 }
4200
4201 // If we have 4 elements for the shuffle and a Mask, get the cost straight
4202 // from the perfect shuffle tables.
4203 if (Mask.size() == 4 && Tp->getElementCount() == ElementCount::getFixed(4) &&
4204 (Tp->getScalarSizeInBits() == 16 || Tp->getScalarSizeInBits() == 32) &&
4205 all_of(Mask, [](int E) { return E < 8; }))
4206 return getPerfectShuffleCost(Mask);
4207
4208 // Check for identity masks, which we can treat as free.
4209 if (!Mask.empty() && LT.second.isFixedLengthVector() &&
4210 (Kind == TTI::SK_PermuteTwoSrc || Kind == TTI::SK_PermuteSingleSrc) &&
4211 all_of(enumerate(Mask), [](const auto &M) {
4212 return M.value() < 0 || M.value() == (int)M.index();
4213 }))
4214 return 0;
4215
4216 // Check for other shuffles that are not SK_ kinds but we have native
4217 // instructions for, for example ZIP and UZP.
4218 unsigned Unused;
4219 if (LT.second.isFixedLengthVector() &&
4220 LT.second.getVectorNumElements() == Mask.size() &&
4221 (Kind == TTI::SK_PermuteTwoSrc || Kind == TTI::SK_PermuteSingleSrc) &&
4222 (isZIPMask(Mask, LT.second.getVectorNumElements(), Unused) ||
4223 isUZPMask(Mask, LT.second.getVectorNumElements(), Unused) ||
4224 // Check for non-zero lane splats
4225 all_of(drop_begin(Mask),
4226 [&Mask](int M) { return M < 0 || M == Mask[0]; })))
4227 return 1;
4228
4229 if (Kind == TTI::SK_Broadcast || Kind == TTI::SK_Transpose ||
4230 Kind == TTI::SK_Select || Kind == TTI::SK_PermuteSingleSrc ||
4231 Kind == TTI::SK_Reverse || Kind == TTI::SK_Splice) {
4232 static const CostTblEntry ShuffleTbl[] = {
4233 // Broadcast shuffle kinds can be performed with 'dup'.
4234 {TTI::SK_Broadcast, MVT::v8i8, 1},
4235 {TTI::SK_Broadcast, MVT::v16i8, 1},
4236 {TTI::SK_Broadcast, MVT::v4i16, 1},
4237 {TTI::SK_Broadcast, MVT::v8i16, 1},
4238 {TTI::SK_Broadcast, MVT::v2i32, 1},
4239 {TTI::SK_Broadcast, MVT::v4i32, 1},
4240 {TTI::SK_Broadcast, MVT::v2i64, 1},
4241 {TTI::SK_Broadcast, MVT::v4f16, 1},
4242 {TTI::SK_Broadcast, MVT::v8f16, 1},
4243 {TTI::SK_Broadcast, MVT::v2f32, 1},
4244 {TTI::SK_Broadcast, MVT::v4f32, 1},
4245 {TTI::SK_Broadcast, MVT::v2f64, 1},
4246 // Transpose shuffle kinds can be performed with 'trn1/trn2' and
4247 // 'zip1/zip2' instructions.
4248 {TTI::SK_Transpose, MVT::v8i8, 1},
4249 {TTI::SK_Transpose, MVT::v16i8, 1},
4250 {TTI::SK_Transpose, MVT::v4i16, 1},
4251 {TTI::SK_Transpose, MVT::v8i16, 1},
4252 {TTI::SK_Transpose, MVT::v2i32, 1},
4253 {TTI::SK_Transpose, MVT::v4i32, 1},
4254 {TTI::SK_Transpose, MVT::v2i64, 1},
4255 {TTI::SK_Transpose, MVT::v4f16, 1},
4256 {TTI::SK_Transpose, MVT::v8f16, 1},
4257 {TTI::SK_Transpose, MVT::v2f32, 1},
4258 {TTI::SK_Transpose, MVT::v4f32, 1},
4259 {TTI::SK_Transpose, MVT::v2f64, 1},
4260 // Select shuffle kinds.
4261 // TODO: handle vXi8/vXi16.
4262 {TTI::SK_Select, MVT::v2i32, 1}, // mov.
4263 {TTI::SK_Select, MVT::v4i32, 2}, // rev+trn (or similar).
4264 {TTI::SK_Select, MVT::v2i64, 1}, // mov.
4265 {TTI::SK_Select, MVT::v2f32, 1}, // mov.
4266 {TTI::SK_Select, MVT::v4f32, 2}, // rev+trn (or similar).
4267 {TTI::SK_Select, MVT::v2f64, 1}, // mov.
4268 // PermuteSingleSrc shuffle kinds.
4269 {TTI::SK_PermuteSingleSrc, MVT::v2i32, 1}, // mov.
4270 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 3}, // perfectshuffle worst case.
4271 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // mov.
4272 {TTI::SK_PermuteSingleSrc, MVT::v2f32, 1}, // mov.
4273 {TTI::SK_PermuteSingleSrc, MVT::v4f32, 3}, // perfectshuffle worst case.
4274 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // mov.
4275 {TTI::SK_PermuteSingleSrc, MVT::v4i16, 3}, // perfectshuffle worst case.
4276 {TTI::SK_PermuteSingleSrc, MVT::v4f16, 3}, // perfectshuffle worst case.
4277 {TTI::SK_PermuteSingleSrc, MVT::v4bf16, 3}, // same
4278 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 8}, // constpool + load + tbl
4279 {TTI::SK_PermuteSingleSrc, MVT::v8f16, 8}, // constpool + load + tbl
4280 {TTI::SK_PermuteSingleSrc, MVT::v8bf16, 8}, // constpool + load + tbl
4281 {TTI::SK_PermuteSingleSrc, MVT::v8i8, 8}, // constpool + load + tbl
4282 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 8}, // constpool + load + tbl
4283 // Reverse can be lowered with `rev`.
4284 {TTI::SK_Reverse, MVT::v2i32, 1}, // REV64
4285 {TTI::SK_Reverse, MVT::v4i32, 2}, // REV64; EXT
4286 {TTI::SK_Reverse, MVT::v2i64, 1}, // EXT
4287 {TTI::SK_Reverse, MVT::v2f32, 1}, // REV64
4288 {TTI::SK_Reverse, MVT::v4f32, 2}, // REV64; EXT
4289 {TTI::SK_Reverse, MVT::v2f64, 1}, // EXT
4290 {TTI::SK_Reverse, MVT::v8f16, 2}, // REV64; EXT
4291 {TTI::SK_Reverse, MVT::v8i16, 2}, // REV64; EXT
4292 {TTI::SK_Reverse, MVT::v16i8, 2}, // REV64; EXT
4293 {TTI::SK_Reverse, MVT::v4f16, 1}, // REV64
4294 {TTI::SK_Reverse, MVT::v4i16, 1}, // REV64
4295 {TTI::SK_Reverse, MVT::v8i8, 1}, // REV64
4296 // Splice can all be lowered as `ext`.
4297 {TTI::SK_Splice, MVT::v2i32, 1},
4298 {TTI::SK_Splice, MVT::v4i32, 1},
4299 {TTI::SK_Splice, MVT::v2i64, 1},
4300 {TTI::SK_Splice, MVT::v2f32, 1},
4301 {TTI::SK_Splice, MVT::v4f32, 1},
4302 {TTI::SK_Splice, MVT::v2f64, 1},
4303 {TTI::SK_Splice, MVT::v8f16, 1},
4304 {TTI::SK_Splice, MVT::v8bf16, 1},
4305 {TTI::SK_Splice, MVT::v8i16, 1},
4306 {TTI::SK_Splice, MVT::v16i8, 1},
4307 {TTI::SK_Splice, MVT::v4bf16, 1},
4308 {TTI::SK_Splice, MVT::v4f16, 1},
4309 {TTI::SK_Splice, MVT::v4i16, 1},
4310 {TTI::SK_Splice, MVT::v8i8, 1},
4311 // Broadcast shuffle kinds for scalable vectors
4312 {TTI::SK_Broadcast, MVT::nxv16i8, 1},
4313 {TTI::SK_Broadcast, MVT::nxv8i16, 1},
4314 {TTI::SK_Broadcast, MVT::nxv4i32, 1},
4315 {TTI::SK_Broadcast, MVT::nxv2i64, 1},
4316 {TTI::SK_Broadcast, MVT::nxv2f16, 1},
4317 {TTI::SK_Broadcast, MVT::nxv4f16, 1},
4318 {TTI::SK_Broadcast, MVT::nxv8f16, 1},
4319 {TTI::SK_Broadcast, MVT::nxv2bf16, 1},
4320 {TTI::SK_Broadcast, MVT::nxv4bf16, 1},
4321 {TTI::SK_Broadcast, MVT::nxv8bf16, 1},
4322 {TTI::SK_Broadcast, MVT::nxv2f32, 1},
4323 {TTI::SK_Broadcast, MVT::nxv4f32, 1},
4324 {TTI::SK_Broadcast, MVT::nxv2f64, 1},
4325 {TTI::SK_Broadcast, MVT::nxv16i1, 1},
4326 {TTI::SK_Broadcast, MVT::nxv8i1, 1},
4327 {TTI::SK_Broadcast, MVT::nxv4i1, 1},
4328 {TTI::SK_Broadcast, MVT::nxv2i1, 1},
4329 // Handle the cases for vector.reverse with scalable vectors
4330 {TTI::SK_Reverse, MVT::nxv16i8, 1},
4331 {TTI::SK_Reverse, MVT::nxv8i16, 1},
4332 {TTI::SK_Reverse, MVT::nxv4i32, 1},
4333 {TTI::SK_Reverse, MVT::nxv2i64, 1},
4334 {TTI::SK_Reverse, MVT::nxv2f16, 1},
4335 {TTI::SK_Reverse, MVT::nxv4f16, 1},
4336 {TTI::SK_Reverse, MVT::nxv8f16, 1},
4337 {TTI::SK_Reverse, MVT::nxv2bf16, 1},
4338 {TTI::SK_Reverse, MVT::nxv4bf16, 1},
4339 {TTI::SK_Reverse, MVT::nxv8bf16, 1},
4340 {TTI::SK_Reverse, MVT::nxv2f32, 1},
4341 {TTI::SK_Reverse, MVT::nxv4f32, 1},
4342 {TTI::SK_Reverse, MVT::nxv2f64, 1},
4343 {TTI::SK_Reverse, MVT::nxv16i1, 1},
4344 {TTI::SK_Reverse, MVT::nxv8i1, 1},
4345 {TTI::SK_Reverse, MVT::nxv4i1, 1},
4346 {TTI::SK_Reverse, MVT::nxv2i1, 1},
4347 };
4348 if (const auto *Entry = CostTableLookup(ShuffleTbl, Kind, LT.second))
4349 return LT.first * Entry->Cost;
4350 }
4351
4352 if (Kind == TTI::SK_Splice && isa<ScalableVectorType>(Tp))
4353 return getSpliceCost(Tp, Index);
4354
4355 // Inserting a subvector can often be done with either a D, S or H register
4356 // move, so long as the inserted vector is "aligned".
4357 if (Kind == TTI::SK_InsertSubvector && LT.second.isFixedLengthVector() &&
4358 LT.second.getSizeInBits() <= 128 && SubTp) {
4359 std::pair<InstructionCost, MVT> SubLT = getTypeLegalizationCost(SubTp);
4360 if (SubLT.second.isVector()) {
4361 int NumElts = LT.second.getVectorNumElements();
4362 int NumSubElts = SubLT.second.getVectorNumElements();
4363 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
4364 return SubLT.first;
4365 }
4366 }
4367
4368 // Restore optimal kind.
4369 if (IsExtractSubvector)
4371 return BaseT::getShuffleCost(Kind, Tp, Mask, CostKind, Index, SubTp, Args,
4372 CxtI);
4373}
4374
4377 const auto &Strides = DenseMap<Value *, const SCEV *>();
4378 for (BasicBlock *BB : TheLoop->blocks()) {
4379 // Scan the instructions in the block and look for addresses that are
4380 // consecutive and decreasing.
4381 for (Instruction &I : *BB) {
4382 if (isa<LoadInst>(&I) || isa<StoreInst>(&I)) {
4384 Type *AccessTy = getLoadStoreType(&I);
4385 if (getPtrStride(*PSE, AccessTy, Ptr, TheLoop, Strides, /*Assume=*/true,
4386 /*ShouldCheckWrap=*/false)
4387 .value_or(0) < 0)
4388 return true;
4389 }
4390 }
4391 }
4392 return false;
4393}
4394
4396 if (!ST->hasSVE())
4397 return false;
4398
4399 // We don't currently support vectorisation with interleaving for SVE - with
4400 // such loops we're better off not using tail-folding. This gives us a chance
4401 // to fall back on fixed-width vectorisation using NEON's ld2/st2/etc.
4402 if (TFI->IAI->hasGroups())
4403 return false;
4404
4406 if (TFI->LVL->getReductionVars().size())
4407 Required |= TailFoldingOpts::Reductions;
4408 if (TFI->LVL->getFixedOrderRecurrences().size())
4409 Required |= TailFoldingOpts::Recurrences;
4410
4411 // We call this to discover whether any load/store pointers in the loop have
4412 // negative strides. This will require extra work to reverse the loop
4413 // predicate, which may be expensive.
4416 Required |= TailFoldingOpts::Reverse;
4417 if (Required == TailFoldingOpts::Disabled)
4418 Required |= TailFoldingOpts::Simple;
4419
4421 Required))
4422 return false;
4423
4424 // Don't tail-fold for tight loops where we would be better off interleaving
4425 // with an unpredicated loop.
4426 unsigned NumInsns = 0;
4427 for (BasicBlock *BB : TFI->LVL->getLoop()->blocks()) {
4428 NumInsns += BB->sizeWithoutDebug();
4429 }
4430
4431 // We expect 4 of these to be a IV PHI, IV add, IV compare and branch.
4432 return NumInsns >= SVETailFoldInsnThreshold;
4433}
4434
4437 StackOffset BaseOffset, bool HasBaseReg,
4438 int64_t Scale, unsigned AddrSpace) const {
4439 // Scaling factors are not free at all.
4440 // Operands | Rt Latency
4441 // -------------------------------------------
4442 // Rt, [Xn, Xm] | 4
4443 // -------------------------------------------
4444 // Rt, [Xn, Xm, lsl #imm] | Rn: 4 Rm: 5
4445 // Rt, [Xn, Wm, <extend> #imm] |
4447 AM.BaseGV = BaseGV;
4448 AM.BaseOffs = BaseOffset.getFixed();
4449 AM.HasBaseReg = HasBaseReg;
4450 AM.Scale = Scale;
4451 AM.ScalableOffset = BaseOffset.getScalable();
4452 if (getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace))
4453 // Scale represents reg2 * scale, thus account for 1 if
4454 // it is not equal to 0 or 1.
4455 return AM.Scale != 0 && AM.Scale != 1;
4456 return -1;
4457}
4458
4460 // For the binary operators (e.g. or) we need to be more careful than
4461 // selects, here we only transform them if they are already at a natural
4462 // break point in the code - the end of a block with an unconditional
4463 // terminator.
4464 if (EnableOrLikeSelectOpt && I->getOpcode() == Instruction::Or &&
4465 isa<BranchInst>(I->getNextNode()) &&
4466 cast<BranchInst>(I->getNextNode())->isUnconditional())
4467 return true;
4469}
4470
4472 const TargetTransformInfo::LSRCost &C2) {
4473 // AArch64 specific here is adding the number of instructions to the
4474 // comparison (though not as the first consideration, as some targets do)
4475 // along with changing the priority of the base additions.
4476 // TODO: Maybe a more nuanced tradeoff between instruction count
4477 // and number of registers? To be investigated at a later date.
4478 if (EnableLSRCostOpt)
4479 return std::tie(C1.NumRegs, C1.Insns, C1.NumBaseAdds, C1.AddRecCost,
4480 C1.NumIVMuls, C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
4481 std::tie(C2.NumRegs, C2.Insns, C2.NumBaseAdds, C2.AddRecCost,
4482 C2.NumIVMuls, C2.ScaleCost, C2.ImmCost, C2.SetupCost);
4483
4485}
static bool isAllActivePredicate(SelectionDAG &DAG, SDValue N)
SmallVector< AArch64_IMM::ImmInsnModel, 4 > Insn
static std::optional< Instruction * > instCombineSVEVectorMul(InstCombiner &IC, IntrinsicInst &II, Intrinsic::ID IID)
TailFoldingOption TailFoldingOptionLoc
static std::optional< Instruction * > instCombineSVEVectorFAdd(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEVectorFuseMulAddSub(InstCombiner &IC, IntrinsicInst &II, bool MergeIntoAddendOp)
static void getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE, TargetTransformInfo::UnrollingPreferences &UP)
bool SimplifyValuePattern(SmallVector< Value * > &Vec, bool AllowPoison)
static std::optional< Instruction * > instCombineSVESel(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > tryCombineFromSVBoolBinOp(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEUnpack(InstCombiner &IC, IntrinsicInst &II)
static cl::opt< unsigned > SVETailFoldInsnThreshold("sve-tail-folding-insn-threshold", cl::init(15), cl::Hidden)
static cl::opt< bool > EnableFixedwidthAutovecInStreamingMode("enable-fixedwidth-autovec-in-streaming-mode", cl::init(false), cl::Hidden)
static std::optional< Instruction * > instCombineSVEAllOrNoActive(InstCombiner &IC, IntrinsicInst &II, Intrinsic::ID IID)
static std::optional< Instruction * > instCombineSVEVectorFAddU(InstCombiner &IC, IntrinsicInst &II)
static cl::opt< bool > EnableLSRCostOpt("enable-aarch64-lsr-cost-opt", cl::init(true), cl::Hidden)
static std::optional< Instruction * > instCombineSVEVectorSub(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVENoActiveUnaryZero(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineLD1GatherIndex(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEVectorFSub(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > processPhiNode(InstCombiner &IC, IntrinsicInst &II)
The function will remove redundant reinterprets casting in the presence of the control flow.
static std::optional< Instruction * > instCombineSVENoActiveUnaryErase(InstCombiner &IC, IntrinsicInst &II, int PredPos)
static std::optional< Instruction * > instCombineST1ScatterIndex(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVESDIV(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEST1(InstCombiner &IC, IntrinsicInst &II, const DataLayout &DL)
static bool containsDecreasingPointers(Loop *TheLoop, PredicatedScalarEvolution *PSE)
static std::optional< Instruction * > instCombineSVEAllActive(IntrinsicInst &II, Intrinsic::ID IID)
static bool isUnpackedVectorVT(EVT VecVT)
static std::optional< Instruction * > instCombineSVEDupX(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVECmpNE(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEVectorFSubU(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineRDFFR(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineMaxMinNM(InstCombiner &IC, IntrinsicInst &II)
static InstructionCost getHistogramCost(const IntrinsicCostAttributes &ICA)
static cl::opt< unsigned > SVEGatherOverhead("sve-gather-overhead", cl::init(10), cl::Hidden)
static std::optional< Instruction * > instCombineSVECondLast(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEPTest(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEZip(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEDup(InstCombiner &IC, IntrinsicInst &II)
static cl::opt< unsigned > BaseHistCntCost("aarch64-base-histcnt-cost", cl::init(8), cl::Hidden, cl::desc("The cost of a histcnt instruction"))
static std::optional< Instruction * > instCombineConvertFromSVBool(InstCombiner &IC, IntrinsicInst &II)
static cl::opt< unsigned > CallPenaltyChangeSM("call-penalty-sm-change", cl::init(5), cl::Hidden, cl::desc("Penalty of calling a function that requires a change to PSTATE.SM"))
static std::optional< Instruction * > instCombineSVEUzp1(InstCombiner &IC, IntrinsicInst &II)
static std::optional< Instruction * > instCombineSVEVectorBinOp(InstCombiner &IC, IntrinsicInst &II)
static cl::opt< bool > EnableScalableAutovecInStreamingMode("enable-scalable-autovec-in-streaming-mode", cl::init(false), cl::Hidden)
static std::optional< Instruction * > instCombineSVETBL(InstCombiner &IC, IntrinsicInst &II)
static Instruction::BinaryOps intrinsicIDToBinOpCode(unsigned Intrinsic)
static cl::opt< unsigned > InlineCallPenaltyChangeSM("inline-call-penalty-sm-change", cl::init(10), cl::Hidden, cl::desc("Penalty of inlining a call that requires a change to PSTATE.SM"))
static std::optional< Instruction * > instCombineSVELD1(InstCombiner &IC, IntrinsicInst &II, const DataLayout &DL)
static std::optional< Instruction * > instCombineSVESrshl(InstCombiner &am