LLVM 23.0.0git
InstCombineCalls.cpp
Go to the documentation of this file.
1//===- InstCombineCalls.cpp -----------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visitCall, visitInvoke, and visitCallBr functions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstCombineInternal.h"
14#include "llvm/ADT/APFloat.h"
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/APSInt.h"
17#include "llvm/ADT/ArrayRef.h"
21#include "llvm/ADT/Statistic.h"
27#include "llvm/Analysis/Loads.h"
32#include "llvm/IR/Attributes.h"
33#include "llvm/IR/BasicBlock.h"
34#include "llvm/IR/Constant.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DataLayout.h"
37#include "llvm/IR/DebugInfo.h"
39#include "llvm/IR/Function.h"
41#include "llvm/IR/InlineAsm.h"
42#include "llvm/IR/InstrTypes.h"
43#include "llvm/IR/Instruction.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/IntrinsicsAArch64.h"
48#include "llvm/IR/IntrinsicsAMDGPU.h"
49#include "llvm/IR/IntrinsicsARM.h"
50#include "llvm/IR/IntrinsicsHexagon.h"
51#include "llvm/IR/LLVMContext.h"
52#include "llvm/IR/Metadata.h"
55#include "llvm/IR/Statepoint.h"
56#include "llvm/IR/Type.h"
57#include "llvm/IR/User.h"
58#include "llvm/IR/Value.h"
59#include "llvm/IR/ValueHandle.h"
64#include "llvm/Support/Debug.h"
75#include <algorithm>
76#include <cassert>
77#include <cstdint>
78#include <optional>
79#include <utility>
80#include <vector>
81
82#define DEBUG_TYPE "instcombine"
84
85using namespace llvm;
86using namespace PatternMatch;
87
88STATISTIC(NumSimplified, "Number of library calls simplified");
89
91 "instcombine-guard-widening-window",
92 cl::init(3),
93 cl::desc("How wide an instruction window to bypass looking for "
94 "another guard"));
95
96/// Return the specified type promoted as it would be to pass though a va_arg
97/// area.
99 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
100 if (ITy->getBitWidth() < 32)
101 return Type::getInt32Ty(Ty->getContext());
102 }
103 return Ty;
104}
105
106/// Recognize a memcpy/memmove from a trivially otherwise unused alloca.
107/// TODO: This should probably be integrated with visitAllocSites, but that
108/// requires a deeper change to allow either unread or unwritten objects.
110 auto *Src = MI->getRawSource();
111 while (isa<GetElementPtrInst>(Src)) {
112 if (!Src->hasOneUse())
113 return false;
114 Src = cast<Instruction>(Src)->getOperand(0);
115 }
116 return isa<AllocaInst>(Src) && Src->hasOneUse();
117}
118
120 Align DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT);
121 MaybeAlign CopyDstAlign = MI->getDestAlign();
122 if (!CopyDstAlign || *CopyDstAlign < DstAlign) {
123 MI->setDestAlignment(DstAlign);
124 return MI;
125 }
126
127 Align SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT);
128 MaybeAlign CopySrcAlign = MI->getSourceAlign();
129 if (!CopySrcAlign || *CopySrcAlign < SrcAlign) {
130 MI->setSourceAlignment(SrcAlign);
131 return MI;
132 }
133
134 // If we have a store to a location which is known constant, we can conclude
135 // that the store must be storing the constant value (else the memory
136 // wouldn't be constant), and this must be a noop.
137 if (!isModSet(AA->getModRefInfoMask(MI->getDest()))) {
138 // Set the size of the copy to 0, it will be deleted on the next iteration.
139 MI->setLength((uint64_t)0);
140 return MI;
141 }
142
143 // If the source is provably undef, the memcpy/memmove doesn't do anything
144 // (unless the transfer is volatile).
145 if (hasUndefSource(MI) && !MI->isVolatile()) {
146 // Set the size of the copy to 0, it will be deleted on the next iteration.
147 MI->setLength((uint64_t)0);
148 return MI;
149 }
150
151 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
152 // load/store.
153 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getLength());
154 if (!MemOpLength) return nullptr;
155
156 // Source and destination pointer types are always "i8*" for intrinsic. See
157 // if the size is something we can handle with a single primitive load/store.
158 // A single load+store correctly handles overlapping memory in the memmove
159 // case.
160 uint64_t Size = MemOpLength->getLimitedValue();
161 assert(Size && "0-sized memory transferring should be removed already.");
162
163 if (Size > 8 || (Size&(Size-1)))
164 return nullptr; // If not 1/2/4/8 bytes, exit.
165
166 // If it is an atomic and alignment is less than the size then we will
167 // introduce the unaligned memory access which will be later transformed
168 // into libcall in CodeGen. This is not evident performance gain so disable
169 // it now.
170 if (MI->isAtomic())
171 if (*CopyDstAlign < Size || *CopySrcAlign < Size)
172 return nullptr;
173
174 // Use an integer load+store unless we can find something better.
175 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
176
177 // If the memcpy has metadata describing the members, see if we can get the
178 // TBAA, scope and noalias tags describing our copy.
179 AAMDNodes AACopyMD = MI->getAAMetadata().adjustForAccess(Size);
180
181 Value *Src = MI->getArgOperand(1);
182 Value *Dest = MI->getArgOperand(0);
183 LoadInst *L = Builder.CreateLoad(IntType, Src);
184 // Alignment from the mem intrinsic will be better, so use it.
185 L->setAlignment(*CopySrcAlign);
186 L->setAAMetadata(AACopyMD);
187 MDNode *LoopMemParallelMD =
188 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
189 if (LoopMemParallelMD)
190 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
191 MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group);
192 if (AccessGroupMD)
193 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
194
195 StoreInst *S = Builder.CreateStore(L, Dest);
196 // Alignment from the mem intrinsic will be better, so use it.
197 S->setAlignment(*CopyDstAlign);
198 S->setAAMetadata(AACopyMD);
199 if (LoopMemParallelMD)
200 S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
201 if (AccessGroupMD)
202 S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
203 S->copyMetadata(*MI, LLVMContext::MD_DIAssignID);
204
205 if (auto *MT = dyn_cast<MemTransferInst>(MI)) {
206 // non-atomics can be volatile
207 L->setVolatile(MT->isVolatile());
208 S->setVolatile(MT->isVolatile());
209 }
210 if (MI->isAtomic()) {
211 // atomics have to be unordered
212 L->setOrdering(AtomicOrdering::Unordered);
214 }
215
216 // Set the size of the copy to 0, it will be deleted on the next iteration.
217 MI->setLength((uint64_t)0);
218 return MI;
219}
220
222 const Align KnownAlignment =
223 getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT);
224 MaybeAlign MemSetAlign = MI->getDestAlign();
225 if (!MemSetAlign || *MemSetAlign < KnownAlignment) {
226 MI->setDestAlignment(KnownAlignment);
227 return MI;
228 }
229
230 // If we have a store to a location which is known constant, we can conclude
231 // that the store must be storing the constant value (else the memory
232 // wouldn't be constant), and this must be a noop.
233 if (!isModSet(AA->getModRefInfoMask(MI->getDest()))) {
234 // Set the size of the copy to 0, it will be deleted on the next iteration.
235 MI->setLength((uint64_t)0);
236 return MI;
237 }
238
239 // Remove memset with an undef value.
240 // FIXME: This is technically incorrect because it might overwrite a poison
241 // value. Change to PoisonValue once #52930 is resolved.
242 if (isa<UndefValue>(MI->getValue())) {
243 // Set the size of the copy to 0, it will be deleted on the next iteration.
244 MI->setLength((uint64_t)0);
245 return MI;
246 }
247
248 // Extract the length and alignment and fill if they are constant.
249 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
250 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
251 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
252 return nullptr;
253 const uint64_t Len = LenC->getLimitedValue();
254 assert(Len && "0-sized memory setting should be removed already.");
255 const Align Alignment = MI->getDestAlign().valueOrOne();
256
257 // If it is an atomic and alignment is less than the size then we will
258 // introduce the unaligned memory access which will be later transformed
259 // into libcall in CodeGen. This is not evident performance gain so disable
260 // it now.
261 if (MI->isAtomic() && Alignment < Len)
262 return nullptr;
263
264 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
265 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
266 Value *Dest = MI->getDest();
267
268 // Extract the fill value and store.
269 Constant *FillVal = ConstantInt::get(
270 MI->getContext(), APInt::getSplat(Len * 8, FillC->getValue()));
271 StoreInst *S = Builder.CreateStore(FillVal, Dest, MI->isVolatile());
272 S->copyMetadata(*MI, LLVMContext::MD_DIAssignID);
273 for (DbgVariableRecord *DbgAssign : at::getDVRAssignmentMarkers(S)) {
274 if (llvm::is_contained(DbgAssign->location_ops(), FillC))
275 DbgAssign->replaceVariableLocationOp(FillC, FillVal);
276 }
277
278 S->setAlignment(Alignment);
279 if (MI->isAtomic())
281
282 // Set the size of the copy to 0, it will be deleted on the next iteration.
283 MI->setLength((uint64_t)0);
284 return MI;
285 }
286
287 return nullptr;
288}
289
290// TODO, Obvious Missing Transforms:
291// * Narrow width by halfs excluding zero/undef lanes
292Value *InstCombinerImpl::simplifyMaskedLoad(IntrinsicInst &II) {
293 Value *LoadPtr = II.getArgOperand(0);
294 const Align Alignment = II.getParamAlign(0).valueOrOne();
295
296 // If the mask is all ones or undefs, this is a plain vector load of the 1st
297 // argument.
298 if (maskIsAllOneOrUndef(II.getArgOperand(1))) {
299 LoadInst *L = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
300 "unmaskedload");
301 L->copyMetadata(II);
302 return L;
303 }
304
305 // If we can unconditionally load from this address, replace with a
306 // load/select idiom. TODO: use DT for context sensitive query
307 if (isDereferenceablePointer(LoadPtr, II.getType(),
308 II.getDataLayout(), &II, &AC)) {
309 LoadInst *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
310 "unmaskedload");
311 LI->copyMetadata(II);
312 return Builder.CreateSelect(II.getArgOperand(1), LI, II.getArgOperand(2));
313 }
314
315 return nullptr;
316}
317
318// TODO, Obvious Missing Transforms:
319// * Single constant active lane -> store
320// * Narrow width by halfs excluding zero/undef lanes
321Instruction *InstCombinerImpl::simplifyMaskedStore(IntrinsicInst &II) {
322 Value *StorePtr = II.getArgOperand(1);
323 Align Alignment = II.getParamAlign(1).valueOrOne();
324 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(2));
325 if (!ConstMask)
326 return nullptr;
327
328 // If the mask is all zeros, this instruction does nothing.
329 if (maskIsAllZeroOrUndef(ConstMask))
331
332 // If the mask is all ones, this is a plain vector store of the 1st argument.
333 if (maskIsAllOneOrUndef(ConstMask)) {
334 StoreInst *S =
335 new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment);
336 S->copyMetadata(II);
337 return S;
338 }
339
340 if (isa<ScalableVectorType>(ConstMask->getType()))
341 return nullptr;
342
343 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
344 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
345 APInt PoisonElts(DemandedElts.getBitWidth(), 0);
346 if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts,
347 PoisonElts))
348 return replaceOperand(II, 0, V);
349
350 return nullptr;
351}
352
353// TODO, Obvious Missing Transforms:
354// * Single constant active lane load -> load
355// * Dereferenceable address & few lanes -> scalarize speculative load/selects
356// * Adjacent vector addresses -> masked.load
357// * Narrow width by halfs excluding zero/undef lanes
358// * Vector incrementing address -> vector masked load
359Instruction *InstCombinerImpl::simplifyMaskedGather(IntrinsicInst &II) {
360 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(1));
361 if (!ConstMask)
362 return nullptr;
363
364 // Vector splat address w/known mask -> scalar load
365 // Fold the gather to load the source vector first lane
366 // because it is reloading the same value each time
367 if (ConstMask->isAllOnesValue())
368 if (auto *SplatPtr = getSplatValue(II.getArgOperand(0))) {
369 auto *VecTy = cast<VectorType>(II.getType());
370 const Align Alignment = II.getParamAlign(0).valueOrOne();
371 LoadInst *L = Builder.CreateAlignedLoad(VecTy->getElementType(), SplatPtr,
372 Alignment, "load.scalar");
373 Value *Shuf =
374 Builder.CreateVectorSplat(VecTy->getElementCount(), L, "broadcast");
376 }
377
378 return nullptr;
379}
380
381// TODO, Obvious Missing Transforms:
382// * Single constant active lane -> store
383// * Adjacent vector addresses -> masked.store
384// * Narrow store width by halfs excluding zero/undef lanes
385// * Vector incrementing address -> vector masked store
386Instruction *InstCombinerImpl::simplifyMaskedScatter(IntrinsicInst &II) {
387 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(2));
388 if (!ConstMask)
389 return nullptr;
390
391 // If the mask is all zeros, a scatter does nothing.
392 if (maskIsAllZeroOrUndef(ConstMask))
394
395 // Vector splat address -> scalar store
396 if (auto *SplatPtr = getSplatValue(II.getArgOperand(1))) {
397 // scatter(splat(value), splat(ptr), non-zero-mask) -> store value, ptr
398 if (auto *SplatValue = getSplatValue(II.getArgOperand(0))) {
399 if (maskContainsAllOneOrUndef(ConstMask)) {
400 Align Alignment = II.getParamAlign(1).valueOrOne();
401 StoreInst *S = new StoreInst(SplatValue, SplatPtr, /*IsVolatile=*/false,
402 Alignment);
403 S->copyMetadata(II);
404 return S;
405 }
406 }
407 // scatter(vector, splat(ptr), splat(true)) -> store extract(vector,
408 // lastlane), ptr
409 if (ConstMask->isAllOnesValue()) {
410 Align Alignment = II.getParamAlign(1).valueOrOne();
411 VectorType *WideLoadTy = cast<VectorType>(II.getArgOperand(1)->getType());
412 ElementCount VF = WideLoadTy->getElementCount();
413 Value *RunTimeVF = Builder.CreateElementCount(Builder.getInt32Ty(), VF);
414 Value *LastLane = Builder.CreateSub(RunTimeVF, Builder.getInt32(1));
415 Value *Extract =
416 Builder.CreateExtractElement(II.getArgOperand(0), LastLane);
417 StoreInst *S =
418 new StoreInst(Extract, SplatPtr, /*IsVolatile=*/false, Alignment);
419 S->copyMetadata(II);
420 return S;
421 }
422 }
423 if (isa<ScalableVectorType>(ConstMask->getType()))
424 return nullptr;
425
426 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
427 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
428 APInt PoisonElts(DemandedElts.getBitWidth(), 0);
429 if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts,
430 PoisonElts))
431 return replaceOperand(II, 0, V);
432 if (Value *V = SimplifyDemandedVectorElts(II.getOperand(1), DemandedElts,
433 PoisonElts))
434 return replaceOperand(II, 1, V);
435
436 return nullptr;
437}
438
439/// This function transforms launder.invariant.group and strip.invariant.group
440/// like:
441/// launder(launder(%x)) -> launder(%x) (the result is not the argument)
442/// launder(strip(%x)) -> launder(%x)
443/// strip(strip(%x)) -> strip(%x) (the result is not the argument)
444/// strip(launder(%x)) -> strip(%x)
445/// This is legal because it preserves the most recent information about
446/// the presence or absence of invariant.group.
448 InstCombinerImpl &IC) {
449 auto *Arg = II.getArgOperand(0);
450 auto *StrippedArg = Arg->stripPointerCasts();
451 auto *StrippedInvariantGroupsArg = StrippedArg;
452 while (auto *Intr = dyn_cast<IntrinsicInst>(StrippedInvariantGroupsArg)) {
453 if (Intr->getIntrinsicID() != Intrinsic::launder_invariant_group &&
454 Intr->getIntrinsicID() != Intrinsic::strip_invariant_group)
455 break;
456 StrippedInvariantGroupsArg = Intr->getArgOperand(0)->stripPointerCasts();
457 }
458 if (StrippedArg == StrippedInvariantGroupsArg)
459 return nullptr; // No launders/strips to remove.
460
461 Value *Result = nullptr;
462
463 if (II.getIntrinsicID() == Intrinsic::launder_invariant_group)
464 Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg);
465 else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group)
466 Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg);
467 else
469 "simplifyInvariantGroupIntrinsic only handles launder and strip");
470 if (Result->getType()->getPointerAddressSpace() !=
471 II.getType()->getPointerAddressSpace())
472 Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType());
473
474 return cast<Instruction>(Result);
475}
476
478 assert((II.getIntrinsicID() == Intrinsic::cttz ||
479 II.getIntrinsicID() == Intrinsic::ctlz) &&
480 "Expected cttz or ctlz intrinsic");
481 bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz;
482 Value *Op0 = II.getArgOperand(0);
483 Value *Op1 = II.getArgOperand(1);
484 Value *X;
485 // ctlz(bitreverse(x)) -> cttz(x)
486 // cttz(bitreverse(x)) -> ctlz(x)
487 if (match(Op0, m_BitReverse(m_Value(X)))) {
488 Intrinsic::ID ID = IsTZ ? Intrinsic::ctlz : Intrinsic::cttz;
489 Function *F =
490 Intrinsic::getOrInsertDeclaration(II.getModule(), ID, II.getType());
491 return CallInst::Create(F, {X, II.getArgOperand(1)});
492 }
493
494 if (II.getType()->isIntOrIntVectorTy(1)) {
495 // ctlz/cttz i1 Op0 --> not Op0
496 if (match(Op1, m_Zero()))
497 return BinaryOperator::CreateNot(Op0);
498 // If zero is poison, then the input can be assumed to be "true", so the
499 // instruction simplifies to "false".
500 assert(match(Op1, m_One()) && "Expected ctlz/cttz operand to be 0 or 1");
501 return IC.replaceInstUsesWith(II, ConstantInt::getNullValue(II.getType()));
502 }
503
504 // If ctlz/cttz is only used as a shift amount, set is_zero_poison to true.
505 if (II.hasOneUse() && match(Op1, m_Zero()) &&
506 match(II.user_back(), m_Shift(m_Value(), m_Specific(&II)))) {
507 II.dropUBImplyingAttrsAndMetadata();
508 return IC.replaceOperand(II, 1, IC.Builder.getTrue());
509 }
510
511 Constant *C;
512
513 if (IsTZ) {
514 // cttz(-x) -> cttz(x)
515 if (match(Op0, m_Neg(m_Value(X))))
516 return IC.replaceOperand(II, 0, X);
517
518 // cttz(-x & x) -> cttz(x)
519 if (match(Op0, m_c_And(m_Neg(m_Value(X)), m_Deferred(X))))
520 return IC.replaceOperand(II, 0, X);
521
522 // cttz(sext(x)) -> cttz(zext(x))
523 if (match(Op0, m_OneUse(m_SExt(m_Value(X))))) {
524 auto *Zext = IC.Builder.CreateZExt(X, II.getType());
525 auto *CttzZext =
526 IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, Zext, Op1);
527 return IC.replaceInstUsesWith(II, CttzZext);
528 }
529
530 // Zext doesn't change the number of trailing zeros, so narrow:
531 // cttz(zext(x)) -> zext(cttz(x)) if the 'ZeroIsPoison' parameter is 'true'.
532 if (match(Op0, m_OneUse(m_ZExt(m_Value(X)))) && match(Op1, m_One())) {
533 auto *Cttz = IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, X,
534 IC.Builder.getTrue());
535 auto *ZextCttz = IC.Builder.CreateZExt(Cttz, II.getType());
536 return IC.replaceInstUsesWith(II, ZextCttz);
537 }
538
539 // cttz(abs(x)) -> cttz(x)
540 // cttz(nabs(x)) -> cttz(x)
541 Value *Y;
543 if (SPF == SPF_ABS || SPF == SPF_NABS)
544 return IC.replaceOperand(II, 0, X);
545
547 return IC.replaceOperand(II, 0, X);
548
549 // cttz(shl(%const, %val), 1) --> add(cttz(%const, 1), %val)
550 if (match(Op0, m_Shl(m_ImmConstant(C), m_Value(X))) &&
551 match(Op1, m_One())) {
552 Value *ConstCttz =
553 IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, C, Op1);
554 return BinaryOperator::CreateAdd(ConstCttz, X);
555 }
556
557 // cttz(lshr exact (%const, %val), 1) --> sub(cttz(%const, 1), %val)
558 if (match(Op0, m_Exact(m_LShr(m_ImmConstant(C), m_Value(X)))) &&
559 match(Op1, m_One())) {
560 Value *ConstCttz =
561 IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, C, Op1);
562 return BinaryOperator::CreateSub(ConstCttz, X);
563 }
564
565 // cttz(add(lshr(UINT_MAX, %val), 1)) --> sub(width, %val)
566 if (match(Op0, m_Add(m_LShr(m_AllOnes(), m_Value(X)), m_One()))) {
567 Value *Width =
568 ConstantInt::get(II.getType(), II.getType()->getScalarSizeInBits());
569 return BinaryOperator::CreateSub(Width, X);
570 }
571 } else {
572 // ctlz(lshr(%const, %val), 1) --> add(ctlz(%const, 1), %val)
573 if (match(Op0, m_LShr(m_ImmConstant(C), m_Value(X))) &&
574 match(Op1, m_One())) {
575 Value *ConstCtlz =
576 IC.Builder.CreateBinaryIntrinsic(Intrinsic::ctlz, C, Op1);
577 return BinaryOperator::CreateAdd(ConstCtlz, X);
578 }
579
580 // ctlz(shl nuw (%const, %val), 1) --> sub(ctlz(%const, 1), %val)
581 if (match(Op0, m_NUWShl(m_ImmConstant(C), m_Value(X))) &&
582 match(Op1, m_One())) {
583 Value *ConstCtlz =
584 IC.Builder.CreateBinaryIntrinsic(Intrinsic::ctlz, C, Op1);
585 return BinaryOperator::CreateSub(ConstCtlz, X);
586 }
587
588 // ctlz(~x & (x - 1)) -> bitwidth - cttz(x, false)
589 if (Op0->hasOneUse() &&
590 match(Op0,
592 Type *Ty = II.getType();
593 unsigned BitWidth = Ty->getScalarSizeInBits();
594 auto *Cttz = IC.Builder.CreateIntrinsic(Intrinsic::cttz, Ty,
595 {X, IC.Builder.getFalse()});
596 auto *Bw = ConstantInt::get(Ty, APInt(BitWidth, BitWidth));
597 return IC.replaceInstUsesWith(II, IC.Builder.CreateSub(Bw, Cttz));
598 }
599 }
600
601 // cttz(Pow2) -> Log2(Pow2)
602 // ctlz(Pow2) -> BitWidth - 1 - Log2(Pow2)
603 if (auto *R = IC.tryGetLog2(Op0, match(Op1, m_One()))) {
604 if (IsTZ)
605 return IC.replaceInstUsesWith(II, R);
606 BinaryOperator *BO = BinaryOperator::CreateSub(
607 ConstantInt::get(R->getType(), R->getType()->getScalarSizeInBits() - 1),
608 R);
609 BO->setHasNoSignedWrap();
611 return BO;
612 }
613
614 KnownBits Known = IC.computeKnownBits(Op0, &II);
615
616 // Create a mask for bits above (ctlz) or below (cttz) the first known one.
617 unsigned PossibleZeros = IsTZ ? Known.countMaxTrailingZeros()
618 : Known.countMaxLeadingZeros();
619 unsigned DefiniteZeros = IsTZ ? Known.countMinTrailingZeros()
620 : Known.countMinLeadingZeros();
621
622 // If all bits above (ctlz) or below (cttz) the first known one are known
623 // zero, this value is constant.
624 // FIXME: This should be in InstSimplify because we're replacing an
625 // instruction with a constant.
626 if (PossibleZeros == DefiniteZeros) {
627 auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros);
628 return IC.replaceInstUsesWith(II, C);
629 }
630
631 // If the input to cttz/ctlz is known to be non-zero,
632 // then change the 'ZeroIsPoison' parameter to 'true'
633 // because we know the zero behavior can't affect the result.
634 if (!Known.One.isZero() ||
636 if (!match(II.getArgOperand(1), m_One()))
637 return IC.replaceOperand(II, 1, IC.Builder.getTrue());
638 }
639
640 // Add range attribute since known bits can't completely reflect what we know.
641 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
642 if (BitWidth != 1 && !II.hasRetAttr(Attribute::Range) &&
643 !II.getMetadata(LLVMContext::MD_range)) {
644 ConstantRange Range(APInt(BitWidth, DefiniteZeros),
645 APInt(BitWidth, PossibleZeros + 1));
646 II.addRangeRetAttr(Range);
647 return &II;
648 }
649
650 return nullptr;
651}
652
654 assert(II.getIntrinsicID() == Intrinsic::ctpop &&
655 "Expected ctpop intrinsic");
656 Type *Ty = II.getType();
657 unsigned BitWidth = Ty->getScalarSizeInBits();
658 Value *Op0 = II.getArgOperand(0);
659 Value *X, *Y;
660
661 // ctpop(bitreverse(x)) -> ctpop(x)
662 // ctpop(bswap(x)) -> ctpop(x)
663 if (match(Op0, m_BitReverse(m_Value(X))) || match(Op0, m_BSwap(m_Value(X))))
664 return IC.replaceOperand(II, 0, X);
665
666 // ctpop(rot(x)) -> ctpop(x)
667 if ((match(Op0, m_FShl(m_Value(X), m_Value(Y), m_Value())) ||
668 match(Op0, m_FShr(m_Value(X), m_Value(Y), m_Value()))) &&
669 X == Y)
670 return IC.replaceOperand(II, 0, X);
671
672 // ctpop(x | -x) -> bitwidth - cttz(x, false)
673 if (Op0->hasOneUse() &&
674 match(Op0, m_c_Or(m_Value(X), m_Neg(m_Deferred(X))))) {
675 auto *Cttz = IC.Builder.CreateIntrinsic(Intrinsic::cttz, Ty,
676 {X, IC.Builder.getFalse()});
677 auto *Bw = ConstantInt::get(Ty, APInt(BitWidth, BitWidth));
678 return IC.replaceInstUsesWith(II, IC.Builder.CreateSub(Bw, Cttz));
679 }
680
681 // ctpop(~x & (x - 1)) -> cttz(x, false)
682 if (match(Op0,
684 Function *F =
685 Intrinsic::getOrInsertDeclaration(II.getModule(), Intrinsic::cttz, Ty);
686 return CallInst::Create(F, {X, IC.Builder.getFalse()});
687 }
688
689 // Zext doesn't change the number of set bits, so narrow:
690 // ctpop (zext X) --> zext (ctpop X)
691 if (match(Op0, m_OneUse(m_ZExt(m_Value(X))))) {
692 Value *NarrowPop = IC.Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, X);
693 return CastInst::Create(Instruction::ZExt, NarrowPop, Ty);
694 }
695
696 KnownBits Known(BitWidth);
697 IC.computeKnownBits(Op0, Known, &II);
698
699 // If all bits are zero except for exactly one fixed bit, then the result
700 // must be 0 or 1, and we can get that answer by shifting to LSB:
701 // ctpop (X & 32) --> (X & 32) >> 5
702 // TODO: Investigate removing this as its likely unnecessary given the below
703 // `isKnownToBeAPowerOfTwo` check.
704 if ((~Known.Zero).isPowerOf2())
705 return BinaryOperator::CreateLShr(
706 Op0, ConstantInt::get(Ty, (~Known.Zero).exactLogBase2()));
707
708 // More generally we can also handle non-constant power of 2 patterns such as
709 // shl/shr(Pow2, X), (X & -X), etc... by transforming:
710 // ctpop(Pow2OrZero) --> icmp ne X, 0
711 if (IC.isKnownToBeAPowerOfTwo(Op0, /* OrZero */ true))
712 return CastInst::Create(Instruction::ZExt,
715 Ty);
716
717 // Add range attribute since known bits can't completely reflect what we know.
718 if (BitWidth != 1) {
719 ConstantRange OldRange =
720 II.getRange().value_or(ConstantRange::getFull(BitWidth));
721
722 unsigned Lower = Known.countMinPopulation();
723 unsigned Upper = Known.countMaxPopulation() + 1;
724
725 if (Lower == 0 && OldRange.contains(APInt::getZero(BitWidth)) &&
727 Lower = 1;
728
730 Range = Range.intersectWith(OldRange, ConstantRange::Unsigned);
731
732 if (Range != OldRange) {
733 II.addRangeRetAttr(Range);
734 return &II;
735 }
736 }
737
738 return nullptr;
739}
740
741/// Convert `tbl`/`tbx` intrinsics to shufflevector if the mask is constant, and
742/// at most two source operands are actually referenced.
744 bool IsExtension) {
745 // Bail out if the mask is not a constant.
746 auto *C = dyn_cast<Constant>(II.getArgOperand(II.arg_size() - 1));
747 if (!C)
748 return nullptr;
749
750 auto *RetTy = cast<FixedVectorType>(II.getType());
751 unsigned NumIndexes = RetTy->getNumElements();
752
753 // Only perform this transformation for <8 x i8> and <16 x i8> vector types.
754 if (!RetTy->getElementType()->isIntegerTy(8) ||
755 (NumIndexes != 8 && NumIndexes != 16))
756 return nullptr;
757
758 // For tbx instructions, the first argument is the "fallback" vector, which
759 // has the same length as the mask and return type.
760 unsigned int StartIndex = (unsigned)IsExtension;
761 auto *SourceTy =
762 cast<FixedVectorType>(II.getArgOperand(StartIndex)->getType());
763 // Note that the element count of each source vector does *not* need to be the
764 // same as the element count of the return type and mask! All source vectors
765 // must have the same element count as each other, though.
766 unsigned NumElementsPerSource = SourceTy->getNumElements();
767
768 // There are no tbl/tbx intrinsics for which the destination size exceeds the
769 // source size. However, our definitions of the intrinsics, at least in
770 // IntrinsicsAArch64.td, allow for arbitrary destination vector sizes, so it
771 // *could* technically happen.
772 if (NumIndexes > NumElementsPerSource)
773 return nullptr;
774
775 // The tbl/tbx intrinsics take several source operands followed by a mask
776 // operand.
777 unsigned int NumSourceOperands = II.arg_size() - 1 - (unsigned)IsExtension;
778
779 // Map input operands to shuffle indices. This also helpfully deduplicates the
780 // input arguments, in case the same value is passed as an argument multiple
781 // times.
782 SmallDenseMap<Value *, unsigned, 2> ValueToShuffleSlot;
783 Value *ShuffleOperands[2] = {PoisonValue::get(SourceTy),
784 PoisonValue::get(SourceTy)};
785
786 int Indexes[16];
787 for (unsigned I = 0; I < NumIndexes; ++I) {
788 Constant *COp = C->getAggregateElement(I);
789
790 if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
791 return nullptr;
792
793 if (isa<UndefValue>(COp)) {
794 Indexes[I] = -1;
795 continue;
796 }
797
798 uint64_t Index = cast<ConstantInt>(COp)->getZExtValue();
799 // The index of the input argument that this index references (0 = first
800 // source argument, etc).
801 unsigned SourceOperandIndex = Index / NumElementsPerSource;
802 // The index of the element at that source operand.
803 unsigned SourceOperandElementIndex = Index % NumElementsPerSource;
804
805 Value *SourceOperand;
806 if (SourceOperandIndex >= NumSourceOperands) {
807 // This index is out of bounds. Map it to index into either the fallback
808 // vector (tbx) or vector of zeroes (tbl).
809 SourceOperandIndex = NumSourceOperands;
810 if (IsExtension) {
811 // For out-of-bounds indices in tbx, choose the `I`th element of the
812 // fallback.
813 SourceOperand = II.getArgOperand(0);
814 SourceOperandElementIndex = I;
815 } else {
816 // Otherwise, choose some element from the dummy vector of zeroes (we'll
817 // always choose the first).
818 SourceOperand = Constant::getNullValue(SourceTy);
819 SourceOperandElementIndex = 0;
820 }
821 } else {
822 SourceOperand = II.getArgOperand(SourceOperandIndex + StartIndex);
823 }
824
825 // The source operand may be the fallback vector, which may not have the
826 // same number of elements as the source vector. In that case, we *could*
827 // choose to extend its length with another shufflevector, but it's simpler
828 // to just bail instead.
829 if (cast<FixedVectorType>(SourceOperand->getType())->getNumElements() !=
830 NumElementsPerSource)
831 return nullptr;
832
833 // We now know the source operand referenced by this index. Make it a
834 // shufflevector operand, if it isn't already.
835 unsigned NumSlots = ValueToShuffleSlot.size();
836 // This shuffle references more than two sources, and hence cannot be
837 // represented as a shufflevector.
838 if (NumSlots == 2 && !ValueToShuffleSlot.contains(SourceOperand))
839 return nullptr;
840
841 auto [It, Inserted] =
842 ValueToShuffleSlot.try_emplace(SourceOperand, NumSlots);
843 if (Inserted)
844 ShuffleOperands[It->getSecond()] = SourceOperand;
845
846 unsigned RemappedIndex =
847 (It->getSecond() * NumElementsPerSource) + SourceOperandElementIndex;
848 Indexes[I] = RemappedIndex;
849 }
850
852 ShuffleOperands[0], ShuffleOperands[1], ArrayRef(Indexes, NumIndexes));
853 return IC.replaceInstUsesWith(II, Shuf);
854}
855
856// Returns true iff the 2 intrinsics have the same operands, limiting the
857// comparison to the first NumOperands.
858static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E,
859 unsigned NumOperands) {
860 assert(I.arg_size() >= NumOperands && "Not enough operands");
861 assert(E.arg_size() >= NumOperands && "Not enough operands");
862 for (unsigned i = 0; i < NumOperands; i++)
863 if (I.getArgOperand(i) != E.getArgOperand(i))
864 return false;
865 return true;
866}
867
868// Remove trivially empty start/end intrinsic ranges, i.e. a start
869// immediately followed by an end (ignoring debuginfo or other
870// start/end intrinsics in between). As this handles only the most trivial
871// cases, tracking the nesting level is not needed:
872//
873// call @llvm.foo.start(i1 0)
874// call @llvm.foo.start(i1 0) ; This one won't be skipped: it will be removed
875// call @llvm.foo.end(i1 0)
876// call @llvm.foo.end(i1 0) ; &I
877static bool
879 std::function<bool(const IntrinsicInst &)> IsStart) {
880 // We start from the end intrinsic and scan backwards, so that InstCombine
881 // has already processed (and potentially removed) all the instructions
882 // before the end intrinsic.
883 BasicBlock::reverse_iterator BI(EndI), BE(EndI.getParent()->rend());
884 for (; BI != BE; ++BI) {
885 if (auto *I = dyn_cast<IntrinsicInst>(&*BI)) {
886 if (I->isDebugOrPseudoInst() ||
887 I->getIntrinsicID() == EndI.getIntrinsicID())
888 continue;
889 if (IsStart(*I)) {
890 if (haveSameOperands(EndI, *I, EndI.arg_size())) {
892 IC.eraseInstFromFunction(EndI);
893 return true;
894 }
895 // Skip start intrinsics that don't pair with this end intrinsic.
896 continue;
897 }
898 }
899 break;
900 }
901
902 return false;
903}
904
906 removeTriviallyEmptyRange(I, *this, [&I](const IntrinsicInst &II) {
907 // Bail out on the case where the source va_list of a va_copy is destroyed
908 // immediately by a follow-up va_end.
909 return II.getIntrinsicID() == Intrinsic::vastart ||
910 (II.getIntrinsicID() == Intrinsic::vacopy &&
911 I.getArgOperand(0) != II.getArgOperand(1));
912 });
913 return nullptr;
914}
915
917 assert(Call.arg_size() > 1 && "Need at least 2 args to swap");
918 Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);
919 if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) {
920 Call.setArgOperand(0, Arg1);
921 Call.setArgOperand(1, Arg0);
922 return &Call;
923 }
924 return nullptr;
925}
926
927/// Creates a result tuple for an overflow intrinsic \p II with a given
928/// \p Result and a constant \p Overflow value.
930 Constant *Overflow) {
931 Constant *V[] = {PoisonValue::get(Result->getType()), Overflow};
932 StructType *ST = cast<StructType>(II->getType());
933 Constant *Struct = ConstantStruct::get(ST, V);
934 return InsertValueInst::Create(Struct, Result, 0);
935}
936
938InstCombinerImpl::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) {
939 WithOverflowInst *WO = cast<WithOverflowInst>(II);
940 Value *OperationResult = nullptr;
941 Constant *OverflowResult = nullptr;
942 if (OptimizeOverflowCheck(WO->getBinaryOp(), WO->isSigned(), WO->getLHS(),
943 WO->getRHS(), *WO, OperationResult, OverflowResult))
944 return createOverflowTuple(WO, OperationResult, OverflowResult);
945
946 // See whether we can optimize the overflow check with assumption information.
947 for (User *U : WO->users()) {
948 if (!match(U, m_ExtractValue<1>(m_Value())))
949 continue;
950
951 for (auto &AssumeVH : AC.assumptionsFor(U)) {
952 if (!AssumeVH)
953 continue;
954 CallInst *I = cast<CallInst>(AssumeVH);
955 if (!match(I->getArgOperand(0), m_Not(m_Specific(U))))
956 continue;
957 if (!isValidAssumeForContext(I, II, /*DT=*/nullptr,
958 /*AllowEphemerals=*/true))
959 continue;
960 Value *Result =
961 Builder.CreateBinOp(WO->getBinaryOp(), WO->getLHS(), WO->getRHS());
962 Result->takeName(WO);
963 if (auto *Inst = dyn_cast<Instruction>(Result)) {
964 if (WO->isSigned())
965 Inst->setHasNoSignedWrap();
966 else
967 Inst->setHasNoUnsignedWrap();
968 }
969 return createOverflowTuple(WO, Result,
970 ConstantInt::getFalse(U->getType()));
971 }
972 }
973
974 return nullptr;
975}
976
977static bool inputDenormalIsIEEE(const Function &F, const Type *Ty) {
978 Ty = Ty->getScalarType();
979 return F.getDenormalMode(Ty->getFltSemantics()).Input == DenormalMode::IEEE;
980}
981
982static bool inputDenormalIsDAZ(const Function &F, const Type *Ty) {
983 Ty = Ty->getScalarType();
984 return F.getDenormalMode(Ty->getFltSemantics()).inputsAreZero();
985}
986
987/// \returns the compare predicate type if the test performed by
988/// llvm.is.fpclass(x, \p Mask) is equivalent to fcmp o__ x, 0.0 with the
989/// floating-point environment assumed for \p F for type \p Ty
991 const Function &F, Type *Ty) {
992 switch (static_cast<unsigned>(Mask)) {
993 case fcZero:
994 if (inputDenormalIsIEEE(F, Ty))
995 return FCmpInst::FCMP_OEQ;
996 break;
997 case fcZero | fcSubnormal:
998 if (inputDenormalIsDAZ(F, Ty))
999 return FCmpInst::FCMP_OEQ;
1000 break;
1001 case fcPositive | fcNegZero:
1002 if (inputDenormalIsIEEE(F, Ty))
1003 return FCmpInst::FCMP_OGE;
1004 break;
1006 if (inputDenormalIsDAZ(F, Ty))
1007 return FCmpInst::FCMP_OGE;
1008 break;
1010 if (inputDenormalIsIEEE(F, Ty))
1011 return FCmpInst::FCMP_OGT;
1012 break;
1013 case fcNegative | fcPosZero:
1014 if (inputDenormalIsIEEE(F, Ty))
1015 return FCmpInst::FCMP_OLE;
1016 break;
1018 if (inputDenormalIsDAZ(F, Ty))
1019 return FCmpInst::FCMP_OLE;
1020 break;
1022 if (inputDenormalIsIEEE(F, Ty))
1023 return FCmpInst::FCMP_OLT;
1024 break;
1025 case fcPosNormal | fcPosInf:
1026 if (inputDenormalIsDAZ(F, Ty))
1027 return FCmpInst::FCMP_OGT;
1028 break;
1029 case fcNegNormal | fcNegInf:
1030 if (inputDenormalIsDAZ(F, Ty))
1031 return FCmpInst::FCMP_OLT;
1032 break;
1033 case ~fcZero & ~fcNan:
1034 if (inputDenormalIsIEEE(F, Ty))
1035 return FCmpInst::FCMP_ONE;
1036 break;
1037 case ~(fcZero | fcSubnormal) & ~fcNan:
1038 if (inputDenormalIsDAZ(F, Ty))
1039 return FCmpInst::FCMP_ONE;
1040 break;
1041 default:
1042 break;
1043 }
1044
1046}
1047
1048Instruction *InstCombinerImpl::foldIntrinsicIsFPClass(IntrinsicInst &II) {
1049 Value *Src0 = II.getArgOperand(0);
1050 Value *Src1 = II.getArgOperand(1);
1051 const ConstantInt *CMask = cast<ConstantInt>(Src1);
1052 FPClassTest Mask = static_cast<FPClassTest>(CMask->getZExtValue());
1053 const bool IsUnordered = (Mask & fcNan) == fcNan;
1054 const bool IsOrdered = (Mask & fcNan) == fcNone;
1055 const FPClassTest OrderedMask = Mask & ~fcNan;
1056 const FPClassTest OrderedInvertedMask = ~OrderedMask & ~fcNan;
1057
1058 const bool IsStrict =
1059 II.getFunction()->getAttributes().hasFnAttr(Attribute::StrictFP);
1060
1061 Value *FNegSrc;
1062 if (match(Src0, m_FNeg(m_Value(FNegSrc)))) {
1063 // is.fpclass (fneg x), mask -> is.fpclass x, (fneg mask)
1064
1065 II.setArgOperand(1, ConstantInt::get(Src1->getType(), fneg(Mask)));
1066 return replaceOperand(II, 0, FNegSrc);
1067 }
1068
1069 Value *FAbsSrc;
1070 if (match(Src0, m_FAbs(m_Value(FAbsSrc)))) {
1071 II.setArgOperand(1, ConstantInt::get(Src1->getType(), inverse_fabs(Mask)));
1072 return replaceOperand(II, 0, FAbsSrc);
1073 }
1074
1075 if ((OrderedMask == fcInf || OrderedInvertedMask == fcInf) &&
1076 (IsOrdered || IsUnordered) && !IsStrict) {
1077 // is.fpclass(x, fcInf) -> fcmp oeq fabs(x), +inf
1078 // is.fpclass(x, ~fcInf) -> fcmp one fabs(x), +inf
1079 // is.fpclass(x, fcInf|fcNan) -> fcmp ueq fabs(x), +inf
1080 // is.fpclass(x, ~(fcInf|fcNan)) -> fcmp une fabs(x), +inf
1082 FCmpInst::Predicate Pred =
1083 IsUnordered ? FCmpInst::FCMP_UEQ : FCmpInst::FCMP_OEQ;
1084 if (OrderedInvertedMask == fcInf)
1085 Pred = IsUnordered ? FCmpInst::FCMP_UNE : FCmpInst::FCMP_ONE;
1086
1087 Value *Fabs = Builder.CreateFAbs(Src0);
1088 Value *CmpInf = Builder.CreateFCmp(Pred, Fabs, Inf);
1089 CmpInf->takeName(&II);
1090 return replaceInstUsesWith(II, CmpInf);
1091 }
1092
1093 if ((OrderedMask == fcPosInf || OrderedMask == fcNegInf) &&
1094 (IsOrdered || IsUnordered) && !IsStrict) {
1095 // is.fpclass(x, fcPosInf) -> fcmp oeq x, +inf
1096 // is.fpclass(x, fcNegInf) -> fcmp oeq x, -inf
1097 // is.fpclass(x, fcPosInf|fcNan) -> fcmp ueq x, +inf
1098 // is.fpclass(x, fcNegInf|fcNan) -> fcmp ueq x, -inf
1099 Constant *Inf =
1100 ConstantFP::getInfinity(Src0->getType(), OrderedMask == fcNegInf);
1101 Value *EqInf = IsUnordered ? Builder.CreateFCmpUEQ(Src0, Inf)
1102 : Builder.CreateFCmpOEQ(Src0, Inf);
1103
1104 EqInf->takeName(&II);
1105 return replaceInstUsesWith(II, EqInf);
1106 }
1107
1108 if ((OrderedInvertedMask == fcPosInf || OrderedInvertedMask == fcNegInf) &&
1109 (IsOrdered || IsUnordered) && !IsStrict) {
1110 // is.fpclass(x, ~fcPosInf) -> fcmp one x, +inf
1111 // is.fpclass(x, ~fcNegInf) -> fcmp one x, -inf
1112 // is.fpclass(x, ~fcPosInf|fcNan) -> fcmp une x, +inf
1113 // is.fpclass(x, ~fcNegInf|fcNan) -> fcmp une x, -inf
1115 OrderedInvertedMask == fcNegInf);
1116 Value *NeInf = IsUnordered ? Builder.CreateFCmpUNE(Src0, Inf)
1117 : Builder.CreateFCmpONE(Src0, Inf);
1118 NeInf->takeName(&II);
1119 return replaceInstUsesWith(II, NeInf);
1120 }
1121
1122 if (Mask == fcNan && !IsStrict) {
1123 // Equivalent of isnan. Replace with standard fcmp if we don't care about FP
1124 // exceptions.
1125 Value *IsNan =
1126 Builder.CreateFCmpUNO(Src0, ConstantFP::getZero(Src0->getType()));
1127 IsNan->takeName(&II);
1128 return replaceInstUsesWith(II, IsNan);
1129 }
1130
1131 if (Mask == (~fcNan & fcAllFlags) && !IsStrict) {
1132 // Equivalent of !isnan. Replace with standard fcmp.
1133 Value *FCmp =
1134 Builder.CreateFCmpORD(Src0, ConstantFP::getZero(Src0->getType()));
1135 FCmp->takeName(&II);
1136 return replaceInstUsesWith(II, FCmp);
1137 }
1138
1140
1141 // Try to replace with an fcmp with 0
1142 //
1143 // is.fpclass(x, fcZero) -> fcmp oeq x, 0.0
1144 // is.fpclass(x, fcZero | fcNan) -> fcmp ueq x, 0.0
1145 // is.fpclass(x, ~fcZero & ~fcNan) -> fcmp one x, 0.0
1146 // is.fpclass(x, ~fcZero) -> fcmp une x, 0.0
1147 //
1148 // is.fpclass(x, fcPosSubnormal | fcPosNormal | fcPosInf) -> fcmp ogt x, 0.0
1149 // is.fpclass(x, fcPositive | fcNegZero) -> fcmp oge x, 0.0
1150 //
1151 // is.fpclass(x, fcNegSubnormal | fcNegNormal | fcNegInf) -> fcmp olt x, 0.0
1152 // is.fpclass(x, fcNegative | fcPosZero) -> fcmp ole x, 0.0
1153 //
1154 if (!IsStrict && (IsOrdered || IsUnordered) &&
1155 (PredType = fpclassTestIsFCmp0(OrderedMask, *II.getFunction(),
1156 Src0->getType())) !=
1159 // Equivalent of == 0.
1160 Value *FCmp = Builder.CreateFCmp(
1161 IsUnordered ? FCmpInst::getUnorderedPredicate(PredType) : PredType,
1162 Src0, Zero);
1163
1164 FCmp->takeName(&II);
1165 return replaceInstUsesWith(II, FCmp);
1166 }
1167
1168 KnownFPClass Known =
1169 computeKnownFPClass(Src0, Mask, SQ.getWithInstruction(&II));
1170
1171 // Clear test bits we know must be false from the source value.
1172 // fp_class (nnan x), qnan|snan|other -> fp_class (nnan x), other
1173 // fp_class (ninf x), ninf|pinf|other -> fp_class (ninf x), other
1174 if ((Mask & Known.KnownFPClasses) != Mask) {
1175 II.setArgOperand(
1176 1, ConstantInt::get(Src1->getType(), Mask & Known.KnownFPClasses));
1177 return &II;
1178 }
1179
1180 // If none of the tests which can return false are possible, fold to true.
1181 // fp_class (nnan x), ~(qnan|snan) -> true
1182 // fp_class (ninf x), ~(ninf|pinf) -> true
1183 if (Mask == Known.KnownFPClasses)
1184 return replaceInstUsesWith(II, ConstantInt::get(II.getType(), true));
1185
1186 return nullptr;
1187}
1188
1189static std::optional<bool> getKnownSign(Value *Op, const SimplifyQuery &SQ) {
1190 KnownBits Known = computeKnownBits(Op, SQ);
1191 if (Known.isNonNegative())
1192 return false;
1193 if (Known.isNegative())
1194 return true;
1195
1196 Value *X, *Y;
1197 if (match(Op, m_NSWSub(m_Value(X), m_Value(Y))))
1199
1200 return std::nullopt;
1201}
1202
1203static std::optional<bool> getKnownSignOrZero(Value *Op,
1204 const SimplifyQuery &SQ) {
1205 if (std::optional<bool> Sign = getKnownSign(Op, SQ))
1206 return Sign;
1207
1208 Value *X, *Y;
1209 if (match(Op, m_NSWSub(m_Value(X), m_Value(Y))))
1211
1212 return std::nullopt;
1213}
1214
1215/// Return true if two values \p Op0 and \p Op1 are known to have the same sign.
1216static bool signBitMustBeTheSame(Value *Op0, Value *Op1,
1217 const SimplifyQuery &SQ) {
1218 std::optional<bool> Known1 = getKnownSign(Op1, SQ);
1219 if (!Known1)
1220 return false;
1221 std::optional<bool> Known0 = getKnownSign(Op0, SQ);
1222 if (!Known0)
1223 return false;
1224 return *Known0 == *Known1;
1225}
1226
1227/// Try to canonicalize min/max(X + C0, C1) as min/max(X, C1 - C0) + C0. This
1228/// can trigger other combines.
1230 InstCombiner::BuilderTy &Builder) {
1231 Intrinsic::ID MinMaxID = II->getIntrinsicID();
1232 assert((MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin ||
1233 MinMaxID == Intrinsic::umax || MinMaxID == Intrinsic::umin) &&
1234 "Expected a min or max intrinsic");
1235
1236 // TODO: Match vectors with undef elements, but undef may not propagate.
1237 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1);
1238 Value *X;
1239 const APInt *C0, *C1;
1240 if (!match(Op0, m_OneUse(m_Add(m_Value(X), m_APInt(C0)))) ||
1241 !match(Op1, m_APInt(C1)))
1242 return nullptr;
1243
1244 // Check for necessary no-wrap and overflow constraints.
1245 bool IsSigned = MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin;
1246 auto *Add = cast<BinaryOperator>(Op0);
1247 if ((IsSigned && !Add->hasNoSignedWrap()) ||
1248 (!IsSigned && !Add->hasNoUnsignedWrap()))
1249 return nullptr;
1250
1251 // If the constant difference overflows, then instsimplify should reduce the
1252 // min/max to the add or C1.
1253 bool Overflow;
1254 APInt CDiff =
1255 IsSigned ? C1->ssub_ov(*C0, Overflow) : C1->usub_ov(*C0, Overflow);
1256 assert(!Overflow && "Expected simplify of min/max");
1257
1258 // min/max (add X, C0), C1 --> add (min/max X, C1 - C0), C0
1259 // Note: the "mismatched" no-overflow setting does not propagate.
1260 Constant *NewMinMaxC = ConstantInt::get(II->getType(), CDiff);
1261 Value *NewMinMax = Builder.CreateBinaryIntrinsic(MinMaxID, X, NewMinMaxC);
1262 return IsSigned ? BinaryOperator::CreateNSWAdd(NewMinMax, Add->getOperand(1))
1263 : BinaryOperator::CreateNUWAdd(NewMinMax, Add->getOperand(1));
1264}
1265/// Match a sadd_sat or ssub_sat which is using min/max to clamp the value.
1266Instruction *InstCombinerImpl::matchSAddSubSat(IntrinsicInst &MinMax1) {
1267 Type *Ty = MinMax1.getType();
1268
1269 // We are looking for a tree of:
1270 // max(INT_MIN, min(INT_MAX, add(sext(A), sext(B))))
1271 // Where the min and max could be reversed
1272 Instruction *MinMax2;
1273 BinaryOperator *AddSub;
1274 const APInt *MinValue, *MaxValue;
1275 if (match(&MinMax1, m_SMin(m_Instruction(MinMax2), m_APInt(MaxValue)))) {
1276 if (!match(MinMax2, m_SMax(m_BinOp(AddSub), m_APInt(MinValue))))
1277 return nullptr;
1278 } else if (match(&MinMax1,
1279 m_SMax(m_Instruction(MinMax2), m_APInt(MinValue)))) {
1280 if (!match(MinMax2, m_SMin(m_BinOp(AddSub), m_APInt(MaxValue))))
1281 return nullptr;
1282 } else
1283 return nullptr;
1284
1285 // Check that the constants clamp a saturate, and that the new type would be
1286 // sensible to convert to.
1287 if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1)
1288 return nullptr;
1289 // In what bitwidth can this be treated as saturating arithmetics?
1290 unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1;
1291 // FIXME: This isn't quite right for vectors, but using the scalar type is a
1292 // good first approximation for what should be done there.
1293 if (!shouldChangeType(Ty->getScalarType()->getIntegerBitWidth(), NewBitWidth))
1294 return nullptr;
1295
1296 // Also make sure that the inner min/max and the add/sub have one use.
1297 if (!MinMax2->hasOneUse() || !AddSub->hasOneUse())
1298 return nullptr;
1299
1300 // Create the new type (which can be a vector type)
1301 Type *NewTy = Ty->getWithNewBitWidth(NewBitWidth);
1302
1303 Intrinsic::ID IntrinsicID;
1304 if (AddSub->getOpcode() == Instruction::Add)
1305 IntrinsicID = Intrinsic::sadd_sat;
1306 else if (AddSub->getOpcode() == Instruction::Sub)
1307 IntrinsicID = Intrinsic::ssub_sat;
1308 else
1309 return nullptr;
1310
1311 // The two operands of the add/sub must be nsw-truncatable to the NewTy. This
1312 // is usually achieved via a sext from a smaller type.
1313 if (ComputeMaxSignificantBits(AddSub->getOperand(0), AddSub) > NewBitWidth ||
1314 ComputeMaxSignificantBits(AddSub->getOperand(1), AddSub) > NewBitWidth)
1315 return nullptr;
1316
1317 // Finally create and return the sat intrinsic, truncated to the new type
1318 Value *AT = Builder.CreateTrunc(AddSub->getOperand(0), NewTy);
1319 Value *BT = Builder.CreateTrunc(AddSub->getOperand(1), NewTy);
1320 Value *Sat = Builder.CreateIntrinsic(IntrinsicID, NewTy, {AT, BT});
1321 return CastInst::Create(Instruction::SExt, Sat, Ty);
1322}
1323
1324
1325/// If we have a clamp pattern like max (min X, 42), 41 -- where the output
1326/// can only be one of two possible constant values -- turn that into a select
1327/// of constants.
1329 InstCombiner::BuilderTy &Builder) {
1330 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);
1331 Value *X;
1332 const APInt *C0, *C1;
1333 if (!match(I1, m_APInt(C1)) || !I0->hasOneUse())
1334 return nullptr;
1335
1337 switch (II->getIntrinsicID()) {
1338 case Intrinsic::smax:
1339 if (match(I0, m_SMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1)
1340 Pred = ICmpInst::ICMP_SGT;
1341 break;
1342 case Intrinsic::smin:
1343 if (match(I0, m_SMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1)
1344 Pred = ICmpInst::ICMP_SLT;
1345 break;
1346 case Intrinsic::umax:
1347 if (match(I0, m_UMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1)
1348 Pred = ICmpInst::ICMP_UGT;
1349 break;
1350 case Intrinsic::umin:
1351 if (match(I0, m_UMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1)
1352 Pred = ICmpInst::ICMP_ULT;
1353 break;
1354 default:
1355 llvm_unreachable("Expected min/max intrinsic");
1356 }
1357 if (Pred == CmpInst::BAD_ICMP_PREDICATE)
1358 return nullptr;
1359
1360 // max (min X, 42), 41 --> X > 41 ? 42 : 41
1361 // min (max X, 42), 43 --> X < 43 ? 42 : 43
1362 Value *Cmp = Builder.CreateICmp(Pred, X, I1);
1363 return SelectInst::Create(Cmp, ConstantInt::get(II->getType(), *C0), I1);
1364}
1365
1366/// If this min/max has a constant operand and an operand that is a matching
1367/// min/max with a constant operand, constant-fold the 2 constant operands.
1369 IRBuilderBase &Builder,
1370 const SimplifyQuery &SQ) {
1371 Intrinsic::ID MinMaxID = II->getIntrinsicID();
1372 auto *LHS = dyn_cast<MinMaxIntrinsic>(II->getArgOperand(0));
1373 if (!LHS)
1374 return nullptr;
1375
1376 Constant *C0, *C1;
1377 if (!match(LHS->getArgOperand(1), m_ImmConstant(C0)) ||
1378 !match(II->getArgOperand(1), m_ImmConstant(C1)))
1379 return nullptr;
1380
1381 // max (max X, C0), C1 --> max X, (max C0, C1)
1382 // min (min X, C0), C1 --> min X, (min C0, C1)
1383 // umax (smax X, nneg C0), nneg C1 --> smax X, (umax C0, C1)
1384 // smin (umin X, nneg C0), nneg C1 --> umin X, (smin C0, C1)
1385 Intrinsic::ID InnerMinMaxID = LHS->getIntrinsicID();
1386 if (InnerMinMaxID != MinMaxID &&
1387 !(((MinMaxID == Intrinsic::umax && InnerMinMaxID == Intrinsic::smax) ||
1388 (MinMaxID == Intrinsic::smin && InnerMinMaxID == Intrinsic::umin)) &&
1389 isKnownNonNegative(C0, SQ) && isKnownNonNegative(C1, SQ)))
1390 return nullptr;
1391
1393 Value *CondC = Builder.CreateICmp(Pred, C0, C1);
1394 Value *NewC = Builder.CreateSelect(CondC, C0, C1);
1395 return Builder.CreateIntrinsic(InnerMinMaxID, II->getType(),
1396 {LHS->getArgOperand(0), NewC});
1397}
1398
1399/// If this min/max has a matching min/max operand with a constant, try to push
1400/// the constant operand into this instruction. This can enable more folds.
1401static Instruction *
1403 InstCombiner::BuilderTy &Builder) {
1404 // Match and capture a min/max operand candidate.
1405 Value *X, *Y;
1406 Constant *C;
1407 Instruction *Inner;
1409 m_Instruction(Inner),
1411 m_Value(Y))))
1412 return nullptr;
1413
1414 // The inner op must match. Check for constants to avoid infinite loops.
1415 Intrinsic::ID MinMaxID = II->getIntrinsicID();
1416 auto *InnerMM = dyn_cast<IntrinsicInst>(Inner);
1417 if (!InnerMM || InnerMM->getIntrinsicID() != MinMaxID ||
1419 return nullptr;
1420
1421 // max (max X, C), Y --> max (max X, Y), C
1423 MinMaxID, II->getType());
1424 Value *NewInner = Builder.CreateBinaryIntrinsic(MinMaxID, X, Y);
1425 NewInner->takeName(Inner);
1426 return CallInst::Create(MinMax, {NewInner, C});
1427}
1428
1429/// Reduce a sequence of min/max intrinsics with a common operand.
1431 // Match 3 of the same min/max ops. Example: umin(umin(), umin()).
1432 auto *LHS = dyn_cast<IntrinsicInst>(II->getArgOperand(0));
1433 auto *RHS = dyn_cast<IntrinsicInst>(II->getArgOperand(1));
1434 Intrinsic::ID MinMaxID = II->getIntrinsicID();
1435 if (!LHS || !RHS || LHS->getIntrinsicID() != MinMaxID ||
1436 RHS->getIntrinsicID() != MinMaxID ||
1437 (!LHS->hasOneUse() && !RHS->hasOneUse()))
1438 return nullptr;
1439
1440 Value *A = LHS->getArgOperand(0);
1441 Value *B = LHS->getArgOperand(1);
1442 Value *C = RHS->getArgOperand(0);
1443 Value *D = RHS->getArgOperand(1);
1444
1445 // Look for a common operand.
1446 Value *MinMaxOp = nullptr;
1447 Value *ThirdOp = nullptr;
1448 if (LHS->hasOneUse()) {
1449 // If the LHS is only used in this chain and the RHS is used outside of it,
1450 // reuse the RHS min/max because that will eliminate the LHS.
1451 if (D == A || C == A) {
1452 // min(min(a, b), min(c, a)) --> min(min(c, a), b)
1453 // min(min(a, b), min(a, d)) --> min(min(a, d), b)
1454 MinMaxOp = RHS;
1455 ThirdOp = B;
1456 } else if (D == B || C == B) {
1457 // min(min(a, b), min(c, b)) --> min(min(c, b), a)
1458 // min(min(a, b), min(b, d)) --> min(min(b, d), a)
1459 MinMaxOp = RHS;
1460 ThirdOp = A;
1461 }
1462 } else {
1463 assert(RHS->hasOneUse() && "Expected one-use operand");
1464 // Reuse the LHS. This will eliminate the RHS.
1465 if (D == A || D == B) {
1466 // min(min(a, b), min(c, a)) --> min(min(a, b), c)
1467 // min(min(a, b), min(c, b)) --> min(min(a, b), c)
1468 MinMaxOp = LHS;
1469 ThirdOp = C;
1470 } else if (C == A || C == B) {
1471 // min(min(a, b), min(b, d)) --> min(min(a, b), d)
1472 // min(min(a, b), min(c, b)) --> min(min(a, b), d)
1473 MinMaxOp = LHS;
1474 ThirdOp = D;
1475 }
1476 }
1477
1478 if (!MinMaxOp || !ThirdOp)
1479 return nullptr;
1480
1481 Module *Mod = II->getModule();
1482 Function *MinMax =
1483 Intrinsic::getOrInsertDeclaration(Mod, MinMaxID, II->getType());
1484 return CallInst::Create(MinMax, { MinMaxOp, ThirdOp });
1485}
1486
1487/// If all arguments of the intrinsic are unary shuffles with the same mask,
1488/// try to shuffle after the intrinsic.
1491 if (!II->getType()->isVectorTy() ||
1492 !isTriviallyVectorizable(II->getIntrinsicID()) ||
1493 !II->getCalledFunction()->isSpeculatable())
1494 return nullptr;
1495
1496 Value *X;
1497 Constant *C;
1498 ArrayRef<int> Mask;
1499 auto *NonConstArg = find_if_not(II->args(), [&II](Use &Arg) {
1500 return isa<Constant>(Arg.get()) ||
1501 isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(),
1502 Arg.getOperandNo(), nullptr);
1503 });
1504 if (!NonConstArg ||
1505 !match(NonConstArg, m_Shuffle(m_Value(X), m_Poison(), m_Mask(Mask))))
1506 return nullptr;
1507
1508 // At least 1 operand must be a shuffle with 1 use because we are creating 2
1509 // instructions.
1510 if (none_of(II->args(), match_fn(m_OneUse(m_Shuffle(m_Value(), m_Value())))))
1511 return nullptr;
1512
1513 // See if all arguments are shuffled with the same mask.
1515 Type *SrcTy = X->getType();
1516 for (Use &Arg : II->args()) {
1517 if (isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(),
1518 Arg.getOperandNo(), nullptr))
1519 NewArgs.push_back(Arg);
1520 else if (match(&Arg,
1521 m_Shuffle(m_Value(X), m_Poison(), m_SpecificMask(Mask))) &&
1522 X->getType() == SrcTy)
1523 NewArgs.push_back(X);
1524 else if (match(&Arg, m_ImmConstant(C))) {
1525 // If it's a constant, try find the constant that would be shuffled to C.
1526 if (Constant *ShuffledC =
1527 unshuffleConstant(Mask, C, cast<VectorType>(SrcTy)))
1528 NewArgs.push_back(ShuffledC);
1529 else
1530 return nullptr;
1531 } else
1532 return nullptr;
1533 }
1534
1535 // intrinsic (shuf X, M), (shuf Y, M), ... --> shuf (intrinsic X, Y, ...), M
1536 Instruction *FPI = isa<FPMathOperator>(II) ? II : nullptr;
1537 // Result type might be a different vector width.
1538 // TODO: Check that the result type isn't widened?
1539 VectorType *ResTy =
1540 VectorType::get(II->getType()->getScalarType(), cast<VectorType>(SrcTy));
1541 Value *NewIntrinsic =
1542 Builder.CreateIntrinsic(ResTy, II->getIntrinsicID(), NewArgs, FPI);
1543 return new ShuffleVectorInst(NewIntrinsic, Mask);
1544}
1545
1546/// If all arguments of the intrinsic are reverses, try to pull the reverse
1547/// after the intrinsic.
1549 if (!II->getType()->isVectorTy() ||
1550 !isTriviallyVectorizable(II->getIntrinsicID()))
1551 return nullptr;
1552
1553 // At least 1 operand must be a reverse with 1 use because we are creating 2
1554 // instructions.
1555 if (none_of(II->args(), [](Value *V) {
1556 return match(V, m_OneUse(m_VecReverse(m_Value())));
1557 }))
1558 return nullptr;
1559
1560 Value *X;
1561 Constant *C;
1562 SmallVector<Value *> NewArgs;
1563 for (Use &Arg : II->args()) {
1564 if (isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(),
1565 Arg.getOperandNo(), nullptr))
1566 NewArgs.push_back(Arg);
1567 else if (match(&Arg, m_VecReverse(m_Value(X))))
1568 NewArgs.push_back(X);
1569 else if (isSplatValue(Arg))
1570 NewArgs.push_back(Arg);
1571 else if (match(&Arg, m_ImmConstant(C)))
1572 NewArgs.push_back(Builder.CreateVectorReverse(C));
1573 else
1574 return nullptr;
1575 }
1576
1577 // intrinsic (reverse X), (reverse Y), ... --> reverse (intrinsic X, Y, ...)
1578 Instruction *FPI = isa<FPMathOperator>(II) ? II : nullptr;
1579 Instruction *NewIntrinsic = Builder.CreateIntrinsic(
1580 II->getType(), II->getIntrinsicID(), NewArgs, FPI);
1581 return Builder.CreateVectorReverse(NewIntrinsic);
1582}
1583
1584/// Fold the following cases and accepts bswap and bitreverse intrinsics:
1585/// bswap(logic_op(bswap(x), y)) --> logic_op(x, bswap(y))
1586/// bswap(logic_op(bswap(x), bswap(y))) --> logic_op(x, y) (ignores multiuse)
1587template <Intrinsic::ID IntrID>
1589 InstCombiner::BuilderTy &Builder) {
1590 static_assert(IntrID == Intrinsic::bswap || IntrID == Intrinsic::bitreverse,
1591 "This helper only supports BSWAP and BITREVERSE intrinsics");
1592
1593 Value *X, *Y;
1594 // Find bitwise logic op. Check that it is a BinaryOperator explicitly so we
1595 // don't match ConstantExpr that aren't meaningful for this transform.
1598 Value *OldReorderX, *OldReorderY;
1600
1601 // If both X and Y are bswap/bitreverse, the transform reduces the number
1602 // of instructions even if there's multiuse.
1603 // If only one operand is bswap/bitreverse, we need to ensure the operand
1604 // have only one use.
1605 if (match(X, m_Intrinsic<IntrID>(m_Value(OldReorderX))) &&
1606 match(Y, m_Intrinsic<IntrID>(m_Value(OldReorderY)))) {
1607 return BinaryOperator::Create(Op, OldReorderX, OldReorderY);
1608 }
1609
1610 if (match(X, m_OneUse(m_Intrinsic<IntrID>(m_Value(OldReorderX))))) {
1611 Value *NewReorder = Builder.CreateUnaryIntrinsic(IntrID, Y);
1612 return BinaryOperator::Create(Op, OldReorderX, NewReorder);
1613 }
1614
1615 if (match(Y, m_OneUse(m_Intrinsic<IntrID>(m_Value(OldReorderY))))) {
1616 Value *NewReorder = Builder.CreateUnaryIntrinsic(IntrID, X);
1617 return BinaryOperator::Create(Op, NewReorder, OldReorderY);
1618 }
1619 }
1620 return nullptr;
1621}
1622
1623/// Helper to match idempotent binary intrinsics, namely, intrinsics where
1624/// `f(f(x, y), y) == f(x, y)` holds.
1626 switch (IID) {
1627 case Intrinsic::smax:
1628 case Intrinsic::smin:
1629 case Intrinsic::umax:
1630 case Intrinsic::umin:
1631 case Intrinsic::maximum:
1632 case Intrinsic::minimum:
1633 case Intrinsic::maximumnum:
1634 case Intrinsic::minimumnum:
1635 case Intrinsic::maxnum:
1636 case Intrinsic::minnum:
1637 return true;
1638 default:
1639 return false;
1640 }
1641}
1642
1643/// Attempt to simplify value-accumulating recurrences of kind:
1644/// %umax.acc = phi i8 [ %umax, %backedge ], [ %a, %entry ]
1645/// %umax = call i8 @llvm.umax.i8(i8 %umax.acc, i8 %b)
1646/// And let the idempotent binary intrinsic be hoisted, when the operands are
1647/// known to be loop-invariant.
1649 IntrinsicInst *II) {
1650 PHINode *PN;
1651 Value *Init, *OtherOp;
1652
1653 // A binary intrinsic recurrence with loop-invariant operands is equivalent to
1654 // `call @llvm.binary.intrinsic(Init, OtherOp)`.
1655 auto IID = II->getIntrinsicID();
1656 if (!isIdempotentBinaryIntrinsic(IID) ||
1658 !IC.getDominatorTree().dominates(OtherOp, PN))
1659 return nullptr;
1660
1661 auto *InvariantBinaryInst =
1662 IC.Builder.CreateBinaryIntrinsic(IID, Init, OtherOp);
1663 if (isa<FPMathOperator>(InvariantBinaryInst))
1664 cast<Instruction>(InvariantBinaryInst)->copyFastMathFlags(II);
1665 return InvariantBinaryInst;
1666}
1667
1668static Value *simplifyReductionOperand(Value *Arg, bool CanReorderLanes) {
1669 if (!CanReorderLanes)
1670 return nullptr;
1671
1672 Value *V;
1673 if (match(Arg, m_VecReverse(m_Value(V))))
1674 return V;
1675
1676 ArrayRef<int> Mask;
1677 if (!isa<FixedVectorType>(Arg->getType()) ||
1678 !match(Arg, m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask))) ||
1679 !cast<ShuffleVectorInst>(Arg)->isSingleSource())
1680 return nullptr;
1681
1682 int Sz = Mask.size();
1683 SmallBitVector UsedIndices(Sz);
1684 for (int Idx : Mask) {
1685 if (Idx == PoisonMaskElem || UsedIndices.test(Idx))
1686 return nullptr;
1687 UsedIndices.set(Idx);
1688 }
1689
1690 // Can remove shuffle iff just shuffled elements, no repeats, undefs, or
1691 // other changes.
1692 return UsedIndices.all() ? V : nullptr;
1693}
1694
1695/// Fold an unsigned minimum of trailing or leading zero bits counts:
1696/// umin(cttz(CtOp1, ZeroUndef), ConstOp) --> cttz(CtOp1 | (1 << ConstOp))
1697/// umin(ctlz(CtOp1, ZeroUndef), ConstOp) --> ctlz(CtOp1 | (SignedMin
1698/// >> ConstOp))
1699/// umin(cttz(CtOp1), cttz(CtOp2)) --> cttz(CtOp1 | CtOp2)
1700/// umin(ctlz(CtOp1), ctlz(CtOp2)) --> ctlz(CtOp1 | CtOp2)
1701template <Intrinsic::ID IntrID>
1702static Value *
1704 const DataLayout &DL,
1705 InstCombiner::BuilderTy &Builder) {
1706 static_assert(IntrID == Intrinsic::cttz || IntrID == Intrinsic::ctlz,
1707 "This helper only supports cttz and ctlz intrinsics");
1708
1709 Value *CtOp1, *CtOp2;
1710 Value *ZeroUndef1, *ZeroUndef2;
1711 if (!match(I0, m_OneUse(
1712 m_Intrinsic<IntrID>(m_Value(CtOp1), m_Value(ZeroUndef1)))))
1713 return nullptr;
1714
1715 if (match(I1,
1716 m_OneUse(m_Intrinsic<IntrID>(m_Value(CtOp2), m_Value(ZeroUndef2)))))
1717 return Builder.CreateBinaryIntrinsic(
1718 IntrID, Builder.CreateOr(CtOp1, CtOp2),
1719 Builder.CreateOr(ZeroUndef1, ZeroUndef2));
1720
1721 unsigned BitWidth = I1->getType()->getScalarSizeInBits();
1722 auto LessBitWidth = [BitWidth](auto &C) { return C.ult(BitWidth); };
1723 if (!match(I1, m_CheckedInt(LessBitWidth)))
1724 // We have a constant >= BitWidth (which can be handled by CVP)
1725 // or a non-splat vector with elements < and >= BitWidth
1726 return nullptr;
1727
1728 Type *Ty = I1->getType();
1730 IntrID == Intrinsic::cttz ? Instruction::Shl : Instruction::LShr,
1731 IntrID == Intrinsic::cttz
1732 ? ConstantInt::get(Ty, 1)
1733 : ConstantInt::get(Ty, APInt::getSignedMinValue(BitWidth)),
1734 cast<Constant>(I1), DL);
1735 return Builder.CreateBinaryIntrinsic(
1736 IntrID, Builder.CreateOr(CtOp1, NewConst),
1737 ConstantInt::getTrue(ZeroUndef1->getType()));
1738}
1739
1740/// Return whether "X LOp (Y ROp Z)" is always equal to
1741/// "(X LOp Y) ROp (X LOp Z)".
1743 bool HasNSW, Intrinsic::ID ROp) {
1744 switch (ROp) {
1745 case Intrinsic::umax:
1746 case Intrinsic::umin:
1747 if (HasNUW && LOp == Instruction::Add)
1748 return true;
1749 if (HasNUW && LOp == Instruction::Shl)
1750 return true;
1751 return false;
1752 case Intrinsic::smax:
1753 case Intrinsic::smin:
1754 return HasNSW && LOp == Instruction::Add;
1755 default:
1756 return false;
1757 }
1758}
1759
1760/// Return whether "(X ROp Y) LOp Z" is always equal to
1761/// "(X LOp Z) ROp (Y LOp Z)".
1763 bool HasNSW, Intrinsic::ID ROp) {
1764 if (Instruction::isCommutative(LOp) || LOp == Instruction::Shl)
1765 return leftDistributesOverRight(LOp, HasNUW, HasNSW, ROp);
1766 switch (ROp) {
1767 case Intrinsic::umax:
1768 case Intrinsic::umin:
1769 return HasNUW && LOp == Instruction::Sub;
1770 case Intrinsic::smax:
1771 case Intrinsic::smin:
1772 return HasNSW && LOp == Instruction::Sub;
1773 default:
1774 return false;
1775 }
1776}
1777
1778// Attempts to factorise a common term
1779// in an instruction that has the form "(A op' B) op (C op' D)
1780// where op is an intrinsic and op' is a binop
1781static Value *
1783 InstCombiner::BuilderTy &Builder) {
1784 Value *LHS = II->getOperand(0), *RHS = II->getOperand(1);
1785 Intrinsic::ID TopLevelOpcode = II->getIntrinsicID();
1786
1789
1790 if (!Op0 || !Op1)
1791 return nullptr;
1792
1793 if (Op0->getOpcode() != Op1->getOpcode())
1794 return nullptr;
1795
1796 if (!Op0->hasOneUse() || !Op1->hasOneUse())
1797 return nullptr;
1798
1799 Instruction::BinaryOps InnerOpcode =
1800 static_cast<Instruction::BinaryOps>(Op0->getOpcode());
1801 bool HasNUW = Op0->hasNoUnsignedWrap() && Op1->hasNoUnsignedWrap();
1802 bool HasNSW = Op0->hasNoSignedWrap() && Op1->hasNoSignedWrap();
1803
1804 Value *A = Op0->getOperand(0);
1805 Value *B = Op0->getOperand(1);
1806 Value *C = Op1->getOperand(0);
1807 Value *D = Op1->getOperand(1);
1808
1809 // Attempts to swap variables such that A equals C or B equals D,
1810 // if the inner operation is commutative.
1811 if (Op0->isCommutative() && A != C && B != D) {
1812 if (A == D || B == C)
1813 std::swap(C, D);
1814 else
1815 return nullptr;
1816 }
1817
1818 BinaryOperator *NewBinop;
1819 if (A == C &&
1820 leftDistributesOverRight(InnerOpcode, HasNUW, HasNSW, TopLevelOpcode)) {
1821 Value *NewIntrinsic = Builder.CreateBinaryIntrinsic(TopLevelOpcode, B, D);
1822 NewBinop =
1823 cast<BinaryOperator>(Builder.CreateBinOp(InnerOpcode, A, NewIntrinsic));
1824 } else if (B == D && rightDistributesOverLeft(InnerOpcode, HasNUW, HasNSW,
1825 TopLevelOpcode)) {
1826 Value *NewIntrinsic = Builder.CreateBinaryIntrinsic(TopLevelOpcode, A, C);
1827 NewBinop =
1828 cast<BinaryOperator>(Builder.CreateBinOp(InnerOpcode, NewIntrinsic, B));
1829 } else {
1830 return nullptr;
1831 }
1832
1833 NewBinop->setHasNoUnsignedWrap(HasNUW);
1834 NewBinop->setHasNoSignedWrap(HasNSW);
1835
1836 return NewBinop;
1837}
1838
1840 Value *Arg0 = II->getArgOperand(0);
1841 auto *ShiftConst = dyn_cast<Constant>(II->getArgOperand(1));
1842 if (!ShiftConst)
1843 return nullptr;
1844
1845 int ElemBits = Arg0->getType()->getScalarSizeInBits();
1846 bool AllPositive = true;
1847 bool AllNegative = true;
1848
1849 auto Check = [&](Constant *C) -> bool {
1850 if (auto *CI = dyn_cast_or_null<ConstantInt>(C)) {
1851 const APInt &V = CI->getValue();
1852 if (V.isNonNegative()) {
1853 AllNegative = false;
1854 return AllPositive && V.ult(ElemBits);
1855 }
1856 AllPositive = false;
1857 return AllNegative && V.sgt(-ElemBits);
1858 }
1859 return false;
1860 };
1861
1862 if (auto *VTy = dyn_cast<FixedVectorType>(Arg0->getType())) {
1863 for (unsigned I = 0, E = VTy->getNumElements(); I < E; ++I) {
1864 if (!Check(ShiftConst->getAggregateElement(I)))
1865 return nullptr;
1866 }
1867
1868 } else if (!Check(ShiftConst))
1869 return nullptr;
1870
1871 IRBuilderBase &B = IC.Builder;
1872 if (AllPositive)
1873 return IC.replaceInstUsesWith(*II, B.CreateShl(Arg0, ShiftConst));
1874
1875 Value *NegAmt = B.CreateNeg(ShiftConst);
1876 Intrinsic::ID IID = II->getIntrinsicID();
1877 const bool IsSigned =
1878 IID == Intrinsic::arm_neon_vshifts || IID == Intrinsic::aarch64_neon_sshl;
1879 Value *Result =
1880 IsSigned ? B.CreateAShr(Arg0, NegAmt) : B.CreateLShr(Arg0, NegAmt);
1881 return IC.replaceInstUsesWith(*II, Result);
1882}
1883
1884/// CallInst simplification. This mostly only handles folding of intrinsic
1885/// instructions. For normal calls, it allows visitCallBase to do the heavy
1886/// lifting.
1888 // Don't try to simplify calls without uses. It will not do anything useful,
1889 // but will result in the following folds being skipped.
1890 if (!CI.use_empty()) {
1891 SmallVector<Value *, 8> Args(CI.args());
1892 if (Value *V = simplifyCall(&CI, CI.getCalledOperand(), Args,
1893 SQ.getWithInstruction(&CI)))
1894 return replaceInstUsesWith(CI, V);
1895 }
1896
1897 if (Value *FreedOp = getFreedOperand(&CI, &TLI))
1898 return visitFree(CI, FreedOp);
1899
1900 // If the caller function (i.e. us, the function that contains this CallInst)
1901 // is nounwind, mark the call as nounwind, even if the callee isn't.
1902 if (CI.getFunction()->doesNotThrow() && !CI.doesNotThrow()) {
1903 CI.setDoesNotThrow();
1904 return &CI;
1905 }
1906
1908 if (!II)
1909 return visitCallBase(CI);
1910
1911 // Intrinsics cannot occur in an invoke or a callbr, so handle them here
1912 // instead of in visitCallBase.
1913 if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
1914 if (auto NumBytes = MI->getLengthInBytes()) {
1915 // memmove/cpy/set of zero bytes is a noop.
1916 if (NumBytes->isZero())
1917 return eraseInstFromFunction(CI);
1918
1919 // For atomic unordered mem intrinsics if len is not a positive or
1920 // not a multiple of element size then behavior is undefined.
1921 if (MI->isAtomic() &&
1922 (NumBytes->isNegative() ||
1923 (NumBytes->getZExtValue() % MI->getElementSizeInBytes() != 0))) {
1925 assert(MI->getType()->isVoidTy() &&
1926 "non void atomic unordered mem intrinsic");
1927 return eraseInstFromFunction(*MI);
1928 }
1929 }
1930
1931 // No other transformations apply to volatile transfers.
1932 if (MI->isVolatile())
1933 return nullptr;
1934
1936 // memmove(x,x,size) -> noop.
1937 if (MTI->getSource() == MTI->getDest())
1938 return eraseInstFromFunction(CI);
1939 }
1940
1941 auto IsPointerUndefined = [MI](Value *Ptr) {
1942 return isa<ConstantPointerNull>(Ptr) &&
1944 MI->getFunction(),
1945 cast<PointerType>(Ptr->getType())->getAddressSpace());
1946 };
1947 bool SrcIsUndefined = false;
1948 // If we can determine a pointer alignment that is bigger than currently
1949 // set, update the alignment.
1950 if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1952 return I;
1953 SrcIsUndefined = IsPointerUndefined(MTI->getRawSource());
1954 } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MI)) {
1955 if (Instruction *I = SimplifyAnyMemSet(MSI))
1956 return I;
1957 }
1958
1959 // If src/dest is null, this memory intrinsic must be a noop.
1960 if (SrcIsUndefined || IsPointerUndefined(MI->getRawDest())) {
1961 Builder.CreateAssumption(Builder.CreateIsNull(MI->getLength()));
1962 return eraseInstFromFunction(CI);
1963 }
1964
1965 // If we have a memmove and the source operation is a constant global,
1966 // then the source and dest pointers can't alias, so we can change this
1967 // into a call to memcpy.
1968 if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) {
1969 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
1970 if (GVSrc->isConstant()) {
1971 Module *M = CI.getModule();
1972 Intrinsic::ID MemCpyID =
1973 MMI->isAtomic()
1974 ? Intrinsic::memcpy_element_unordered_atomic
1975 : Intrinsic::memcpy;
1976 Type *Tys[3] = { CI.getArgOperand(0)->getType(),
1977 CI.getArgOperand(1)->getType(),
1978 CI.getArgOperand(2)->getType() };
1980 Intrinsic::getOrInsertDeclaration(M, MemCpyID, Tys));
1981 return II;
1982 }
1983 }
1984 }
1985
1986 // For fixed width vector result intrinsics, use the generic demanded vector
1987 // support.
1988 if (auto *IIFVTy = dyn_cast<FixedVectorType>(II->getType())) {
1989 auto VWidth = IIFVTy->getNumElements();
1990 APInt PoisonElts(VWidth, 0);
1991 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
1992 if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, PoisonElts)) {
1993 if (V != II)
1994 return replaceInstUsesWith(*II, V);
1995 return II;
1996 }
1997 }
1998
1999 if (II->isCommutative()) {
2000 if (auto Pair = matchSymmetricPair(II->getOperand(0), II->getOperand(1))) {
2001 replaceOperand(*II, 0, Pair->first);
2002 replaceOperand(*II, 1, Pair->second);
2003 return II;
2004 }
2005
2006 if (CallInst *NewCall = canonicalizeConstantArg0ToArg1(CI))
2007 return NewCall;
2008 }
2009
2010 // Unused constrained FP intrinsic calls may have declared side effect, which
2011 // prevents it from being removed. In some cases however the side effect is
2012 // actually absent. To detect this case, call SimplifyConstrainedFPCall. If it
2013 // returns a replacement, the call may be removed.
2014 if (CI.use_empty() && isa<ConstrainedFPIntrinsic>(CI)) {
2015 if (simplifyConstrainedFPCall(&CI, SQ.getWithInstruction(&CI)))
2016 return eraseInstFromFunction(CI);
2017 }
2018
2019 Intrinsic::ID IID = II->getIntrinsicID();
2020 switch (IID) {
2021 case Intrinsic::objectsize: {
2022 SmallVector<Instruction *> InsertedInstructions;
2023 if (Value *V = lowerObjectSizeCall(II, DL, &TLI, AA, /*MustSucceed=*/false,
2024 &InsertedInstructions)) {
2025 for (Instruction *Inserted : InsertedInstructions)
2026 Worklist.add(Inserted);
2027 return replaceInstUsesWith(CI, V);
2028 }
2029 return nullptr;
2030 }
2031 case Intrinsic::abs: {
2032 Value *IIOperand = II->getArgOperand(0);
2033 bool IntMinIsPoison = cast<Constant>(II->getArgOperand(1))->isOneValue();
2034
2035 // abs(-x) -> abs(x)
2036 Value *X;
2037 if (match(IIOperand, m_Neg(m_Value(X)))) {
2038 if (cast<Instruction>(IIOperand)->hasNoSignedWrap() || IntMinIsPoison)
2039 replaceOperand(*II, 1, Builder.getTrue());
2040 return replaceOperand(*II, 0, X);
2041 }
2042 if (match(IIOperand, m_c_Select(m_Neg(m_Value(X)), m_Deferred(X))))
2043 return replaceOperand(*II, 0, X);
2044
2045 Value *Y;
2046 // abs(a * abs(b)) -> abs(a * b)
2047 if (match(IIOperand,
2050 bool NSW =
2051 cast<Instruction>(IIOperand)->hasNoSignedWrap() && IntMinIsPoison;
2052 auto *XY = NSW ? Builder.CreateNSWMul(X, Y) : Builder.CreateMul(X, Y);
2053 return replaceOperand(*II, 0, XY);
2054 }
2055
2056 if (std::optional<bool> Known =
2057 getKnownSignOrZero(IIOperand, SQ.getWithInstruction(II))) {
2058 // abs(x) -> x if x >= 0 (include abs(x-y) --> x - y where x >= y)
2059 // abs(x) -> x if x > 0 (include abs(x-y) --> x - y where x > y)
2060 if (!*Known)
2061 return replaceInstUsesWith(*II, IIOperand);
2062
2063 // abs(x) -> -x if x < 0
2064 // abs(x) -> -x if x < = 0 (include abs(x-y) --> y - x where x <= y)
2065 if (IntMinIsPoison)
2066 return BinaryOperator::CreateNSWNeg(IIOperand);
2067 return BinaryOperator::CreateNeg(IIOperand);
2068 }
2069
2070 // abs (sext X) --> zext (abs X*)
2071 // Clear the IsIntMin (nsw) bit on the abs to allow narrowing.
2072 if (match(IIOperand, m_OneUse(m_SExt(m_Value(X))))) {
2073 Value *NarrowAbs =
2074 Builder.CreateBinaryIntrinsic(Intrinsic::abs, X, Builder.getFalse());
2075 return CastInst::Create(Instruction::ZExt, NarrowAbs, II->getType());
2076 }
2077
2078 // Match a complicated way to check if a number is odd/even:
2079 // abs (srem X, 2) --> and X, 1
2080 const APInt *C;
2081 if (match(IIOperand, m_SRem(m_Value(X), m_APInt(C))) && *C == 2)
2082 return BinaryOperator::CreateAnd(X, ConstantInt::get(II->getType(), 1));
2083
2084 break;
2085 }
2086 case Intrinsic::umin: {
2087 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);
2088 // umin(x, 1) == zext(x != 0)
2089 if (match(I1, m_One())) {
2090 assert(II->getType()->getScalarSizeInBits() != 1 &&
2091 "Expected simplify of umin with max constant");
2092 Value *Zero = Constant::getNullValue(I0->getType());
2093 Value *Cmp = Builder.CreateICmpNE(I0, Zero);
2094 return CastInst::Create(Instruction::ZExt, Cmp, II->getType());
2095 }
2096 // umin(cttz(x), const) --> cttz(x | (1 << const))
2097 if (Value *FoldedCttz =
2099 I0, I1, DL, Builder))
2100 return replaceInstUsesWith(*II, FoldedCttz);
2101 // umin(ctlz(x), const) --> ctlz(x | (SignedMin >> const))
2102 if (Value *FoldedCtlz =
2104 I0, I1, DL, Builder))
2105 return replaceInstUsesWith(*II, FoldedCtlz);
2106 [[fallthrough]];
2107 }
2108 case Intrinsic::umax: {
2109 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);
2110 Value *X, *Y;
2111 if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_ZExt(m_Value(Y))) &&
2112 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) {
2113 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y);
2114 return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType());
2115 }
2116 Constant *C;
2117 if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_Constant(C)) &&
2118 I0->hasOneUse()) {
2119 if (Constant *NarrowC = getLosslessUnsignedTrunc(C, X->getType(), DL)) {
2120 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC);
2121 return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType());
2122 }
2123 }
2124 // If C is not 0:
2125 // umax(nuw_shl(x, C), x + 1) -> x == 0 ? 1 : nuw_shl(x, C)
2126 // If C is not 0 or 1:
2127 // umax(nuw_mul(x, C), x + 1) -> x == 0 ? 1 : nuw_mul(x, C)
2128 auto foldMaxMulShift = [&](Value *A, Value *B) -> Instruction * {
2129 const APInt *C;
2130 Value *X;
2131 if (!match(A, m_NUWShl(m_Value(X), m_APInt(C))) &&
2132 !(match(A, m_NUWMul(m_Value(X), m_APInt(C))) && !C->isOne()))
2133 return nullptr;
2134 if (C->isZero())
2135 return nullptr;
2136 if (!match(B, m_OneUse(m_Add(m_Specific(X), m_One()))))
2137 return nullptr;
2138
2139 Value *Cmp = Builder.CreateICmpEQ(X, ConstantInt::get(X->getType(), 0));
2140 Value *NewSelect = nullptr;
2141 NewSelect = Builder.CreateSelectWithUnknownProfile(
2142 Cmp, ConstantInt::get(X->getType(), 1), A, DEBUG_TYPE);
2143 return replaceInstUsesWith(*II, NewSelect);
2144 };
2145
2146 if (IID == Intrinsic::umax) {
2147 if (Instruction *I = foldMaxMulShift(I0, I1))
2148 return I;
2149 if (Instruction *I = foldMaxMulShift(I1, I0))
2150 return I;
2151 }
2152
2153 // If both operands of unsigned min/max are sign-extended, it is still ok
2154 // to narrow the operation.
2155 [[fallthrough]];
2156 }
2157 case Intrinsic::smax:
2158 case Intrinsic::smin: {
2159 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);
2160 Value *X, *Y;
2161 if (match(I0, m_SExt(m_Value(X))) && match(I1, m_SExt(m_Value(Y))) &&
2162 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) {
2163 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y);
2164 return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType());
2165 }
2166
2167 Constant *C;
2168 if (match(I0, m_SExt(m_Value(X))) && match(I1, m_Constant(C)) &&
2169 I0->hasOneUse()) {
2170 if (Constant *NarrowC = getLosslessSignedTrunc(C, X->getType(), DL)) {
2171 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC);
2172 return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType());
2173 }
2174 }
2175
2176 // smax(smin(X, MinC), MaxC) -> smin(smax(X, MaxC), MinC) if MinC s>= MaxC
2177 // umax(umin(X, MinC), MaxC) -> umin(umax(X, MaxC), MinC) if MinC u>= MaxC
2178 const APInt *MinC, *MaxC;
2179 auto CreateCanonicalClampForm = [&](bool IsSigned) {
2180 auto MaxIID = IsSigned ? Intrinsic::smax : Intrinsic::umax;
2181 auto MinIID = IsSigned ? Intrinsic::smin : Intrinsic::umin;
2182 Value *NewMax = Builder.CreateBinaryIntrinsic(
2183 MaxIID, X, ConstantInt::get(X->getType(), *MaxC));
2184 return replaceInstUsesWith(
2185 *II, Builder.CreateBinaryIntrinsic(
2186 MinIID, NewMax, ConstantInt::get(X->getType(), *MinC)));
2187 };
2188 if (IID == Intrinsic::smax &&
2190 m_APInt(MinC)))) &&
2191 match(I1, m_APInt(MaxC)) && MinC->sgt(*MaxC))
2192 return CreateCanonicalClampForm(true);
2193 if (IID == Intrinsic::umax &&
2195 m_APInt(MinC)))) &&
2196 match(I1, m_APInt(MaxC)) && MinC->ugt(*MaxC))
2197 return CreateCanonicalClampForm(false);
2198
2199 // umin(i1 X, i1 Y) -> and i1 X, Y
2200 // smax(i1 X, i1 Y) -> and i1 X, Y
2201 if ((IID == Intrinsic::umin || IID == Intrinsic::smax) &&
2202 II->getType()->isIntOrIntVectorTy(1)) {
2203 return BinaryOperator::CreateAnd(I0, I1);
2204 }
2205
2206 // umax(i1 X, i1 Y) -> or i1 X, Y
2207 // smin(i1 X, i1 Y) -> or i1 X, Y
2208 if ((IID == Intrinsic::umax || IID == Intrinsic::smin) &&
2209 II->getType()->isIntOrIntVectorTy(1)) {
2210 return BinaryOperator::CreateOr(I0, I1);
2211 }
2212
2213 // smin(smax(X, -1), 1) -> scmp(X, 0)
2214 // smax(smin(X, 1), -1) -> scmp(X, 0)
2215 // At this point, smax(smin(X, 1), -1) is changed to smin(smax(X, -1)
2216 // And i1's have been changed to and/ors
2217 // So we only need to check for smin
2218 if (IID == Intrinsic::smin) {
2219 if (match(I0, m_OneUse(m_SMax(m_Value(X), m_AllOnes()))) &&
2220 match(I1, m_One())) {
2221 Value *Zero = ConstantInt::get(X->getType(), 0);
2222 return replaceInstUsesWith(
2223 CI,
2224 Builder.CreateIntrinsic(II->getType(), Intrinsic::scmp, {X, Zero}));
2225 }
2226 }
2227
2228 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
2229 // smax (neg nsw X), (neg nsw Y) --> neg nsw (smin X, Y)
2230 // smin (neg nsw X), (neg nsw Y) --> neg nsw (smax X, Y)
2231 // TODO: Canonicalize neg after min/max if I1 is constant.
2232 if (match(I0, m_NSWNeg(m_Value(X))) && match(I1, m_NSWNeg(m_Value(Y))) &&
2233 (I0->hasOneUse() || I1->hasOneUse())) {
2235 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, Y);
2236 return BinaryOperator::CreateNSWNeg(InvMaxMin);
2237 }
2238 }
2239
2240 // (umax X, (xor X, Pow2))
2241 // -> (or X, Pow2)
2242 // (umin X, (xor X, Pow2))
2243 // -> (and X, ~Pow2)
2244 // (smax X, (xor X, Pos_Pow2))
2245 // -> (or X, Pos_Pow2)
2246 // (smin X, (xor X, Pos_Pow2))
2247 // -> (and X, ~Pos_Pow2)
2248 // (smax X, (xor X, Neg_Pow2))
2249 // -> (and X, ~Neg_Pow2)
2250 // (smin X, (xor X, Neg_Pow2))
2251 // -> (or X, Neg_Pow2)
2252 if ((match(I0, m_c_Xor(m_Specific(I1), m_Value(X))) ||
2253 match(I1, m_c_Xor(m_Specific(I0), m_Value(X)))) &&
2254 isKnownToBeAPowerOfTwo(X, /* OrZero */ true)) {
2255 bool UseOr = IID == Intrinsic::smax || IID == Intrinsic::umax;
2256 bool UseAndN = IID == Intrinsic::smin || IID == Intrinsic::umin;
2257
2258 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
2259 auto KnownSign = getKnownSign(X, SQ.getWithInstruction(II));
2260 if (KnownSign == std::nullopt) {
2261 UseOr = false;
2262 UseAndN = false;
2263 } else if (*KnownSign /* true is Signed. */) {
2264 UseOr ^= true;
2265 UseAndN ^= true;
2266 Type *Ty = I0->getType();
2267 // Negative power of 2 must be IntMin. It's possible to be able to
2268 // prove negative / power of 2 without actually having known bits, so
2269 // just get the value by hand.
2271 Ty, APInt::getSignedMinValue(Ty->getScalarSizeInBits()));
2272 }
2273 }
2274 if (UseOr)
2275 return BinaryOperator::CreateOr(I0, X);
2276 else if (UseAndN)
2277 return BinaryOperator::CreateAnd(I0, Builder.CreateNot(X));
2278 }
2279
2280 // If we can eliminate ~A and Y is free to invert:
2281 // max ~A, Y --> ~(min A, ~Y)
2282 //
2283 // Examples:
2284 // max ~A, ~Y --> ~(min A, Y)
2285 // max ~A, C --> ~(min A, ~C)
2286 // max ~A, (max ~Y, ~Z) --> ~min( A, (min Y, Z))
2287 auto moveNotAfterMinMax = [&](Value *X, Value *Y) -> Instruction * {
2288 Value *A;
2289 if (match(X, m_OneUse(m_Not(m_Value(A)))) &&
2290 !isFreeToInvert(A, A->hasOneUse())) {
2291 if (Value *NotY = getFreelyInverted(Y, Y->hasOneUse(), &Builder)) {
2293 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, A, NotY);
2294 return BinaryOperator::CreateNot(InvMaxMin);
2295 }
2296 }
2297 return nullptr;
2298 };
2299
2300 if (Instruction *I = moveNotAfterMinMax(I0, I1))
2301 return I;
2302 if (Instruction *I = moveNotAfterMinMax(I1, I0))
2303 return I;
2304
2306 return I;
2307
2308 // minmax (X & NegPow2C, Y & NegPow2C) --> minmax(X, Y) & NegPow2C
2309 const APInt *RHSC;
2310 if (match(I0, m_OneUse(m_And(m_Value(X), m_NegatedPower2(RHSC)))) &&
2311 match(I1, m_OneUse(m_And(m_Value(Y), m_SpecificInt(*RHSC)))))
2312 return BinaryOperator::CreateAnd(Builder.CreateBinaryIntrinsic(IID, X, Y),
2313 ConstantInt::get(II->getType(), *RHSC));
2314
2315 // smax(X, -X) --> abs(X)
2316 // smin(X, -X) --> -abs(X)
2317 // umax(X, -X) --> -abs(X)
2318 // umin(X, -X) --> abs(X)
2319 if (isKnownNegation(I0, I1)) {
2320 // We can choose either operand as the input to abs(), but if we can
2321 // eliminate the only use of a value, that's better for subsequent
2322 // transforms/analysis.
2323 if (I0->hasOneUse() && !I1->hasOneUse())
2324 std::swap(I0, I1);
2325
2326 // This is some variant of abs(). See if we can propagate 'nsw' to the abs
2327 // operation and potentially its negation.
2328 bool IntMinIsPoison = isKnownNegation(I0, I1, /* NeedNSW */ true);
2329 Value *Abs = Builder.CreateBinaryIntrinsic(
2330 Intrinsic::abs, I0,
2331 ConstantInt::getBool(II->getContext(), IntMinIsPoison));
2332
2333 // We don't have a "nabs" intrinsic, so negate if needed based on the
2334 // max/min operation.
2335 if (IID == Intrinsic::smin || IID == Intrinsic::umax)
2336 Abs = Builder.CreateNeg(Abs, "nabs", IntMinIsPoison);
2337 return replaceInstUsesWith(CI, Abs);
2338 }
2339
2341 return Sel;
2342
2343 if (Instruction *SAdd = matchSAddSubSat(*II))
2344 return SAdd;
2345
2346 if (Value *NewMinMax = reassociateMinMaxWithConstants(II, Builder, SQ))
2347 return replaceInstUsesWith(*II, NewMinMax);
2348
2350 return R;
2351
2352 if (Instruction *NewMinMax = factorizeMinMaxTree(II))
2353 return NewMinMax;
2354
2355 // Try to fold minmax with constant RHS based on range information
2356 if (match(I1, m_APIntAllowPoison(RHSC))) {
2357 ICmpInst::Predicate Pred =
2359 bool IsSigned = MinMaxIntrinsic::isSigned(IID);
2361 I0, IsSigned, SQ.getWithInstruction(II));
2362 if (!LHS_CR.isFullSet()) {
2363 if (LHS_CR.icmp(Pred, *RHSC))
2364 return replaceInstUsesWith(*II, I0);
2365 if (LHS_CR.icmp(ICmpInst::getSwappedPredicate(Pred), *RHSC))
2366 return replaceInstUsesWith(*II,
2367 ConstantInt::get(II->getType(), *RHSC));
2368 }
2369 }
2370
2372 return replaceInstUsesWith(*II, V);
2373
2374 break;
2375 }
2376 case Intrinsic::scmp: {
2377 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);
2378 Value *LHS, *RHS;
2379 if (match(I0, m_NSWSub(m_Value(LHS), m_Value(RHS))) && match(I1, m_Zero()))
2380 return replaceInstUsesWith(
2381 CI,
2382 Builder.CreateIntrinsic(II->getType(), Intrinsic::scmp, {LHS, RHS}));
2383 break;
2384 }
2385 case Intrinsic::bitreverse: {
2386 Value *IIOperand = II->getArgOperand(0);
2387 // bitrev (zext i1 X to ?) --> X ? SignBitC : 0
2388 Value *X;
2389 if (match(IIOperand, m_ZExt(m_Value(X))) &&
2390 X->getType()->isIntOrIntVectorTy(1)) {
2391 Type *Ty = II->getType();
2392 APInt SignBit = APInt::getSignMask(Ty->getScalarSizeInBits());
2393 return SelectInst::Create(X, ConstantInt::get(Ty, SignBit),
2395 }
2396
2397 if (Instruction *crossLogicOpFold =
2399 return crossLogicOpFold;
2400
2401 break;
2402 }
2403 case Intrinsic::bswap: {
2404 Value *IIOperand = II->getArgOperand(0);
2405
2406 // Try to canonicalize bswap-of-logical-shift-by-8-bit-multiple as
2407 // inverse-shift-of-bswap:
2408 // bswap (shl X, Y) --> lshr (bswap X), Y
2409 // bswap (lshr X, Y) --> shl (bswap X), Y
2410 Value *X, *Y;
2411 if (match(IIOperand, m_OneUse(m_LogicalShift(m_Value(X), m_Value(Y))))) {
2412 unsigned BitWidth = IIOperand->getType()->getScalarSizeInBits();
2414 Value *NewSwap = Builder.CreateUnaryIntrinsic(Intrinsic::bswap, X);
2415 BinaryOperator::BinaryOps InverseShift =
2416 cast<BinaryOperator>(IIOperand)->getOpcode() == Instruction::Shl
2417 ? Instruction::LShr
2418 : Instruction::Shl;
2419 return BinaryOperator::Create(InverseShift, NewSwap, Y);
2420 }
2421 }
2422
2423 KnownBits Known = computeKnownBits(IIOperand, II);
2424 uint64_t LZ = alignDown(Known.countMinLeadingZeros(), 8);
2425 uint64_t TZ = alignDown(Known.countMinTrailingZeros(), 8);
2426 unsigned BW = Known.getBitWidth();
2427
2428 // bswap(x) -> shift(x) if x has exactly one "active byte"
2429 if (BW - LZ - TZ == 8) {
2430 assert(LZ != TZ && "active byte cannot be in the middle");
2431 if (LZ > TZ) // -> shl(x) if the "active byte" is in the low part of x
2432 return BinaryOperator::CreateNUWShl(
2433 IIOperand, ConstantInt::get(IIOperand->getType(), LZ - TZ));
2434 // -> lshr(x) if the "active byte" is in the high part of x
2435 return BinaryOperator::CreateExactLShr(
2436 IIOperand, ConstantInt::get(IIOperand->getType(), TZ - LZ));
2437 }
2438
2439 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
2440 if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
2441 unsigned C = X->getType()->getScalarSizeInBits() - BW;
2442 Value *CV = ConstantInt::get(X->getType(), C);
2443 Value *V = Builder.CreateLShr(X, CV);
2444 return new TruncInst(V, IIOperand->getType());
2445 }
2446
2447 if (Instruction *crossLogicOpFold =
2449 return crossLogicOpFold;
2450 }
2451
2452 // Try to fold into bitreverse if bswap is the root of the expression tree.
2453 if (Instruction *BitOp = matchBSwapOrBitReverse(*II, /*MatchBSwaps*/ false,
2454 /*MatchBitReversals*/ true))
2455 return BitOp;
2456 break;
2457 }
2458 case Intrinsic::masked_load:
2459 if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II))
2460 return replaceInstUsesWith(CI, SimplifiedMaskedOp);
2461 break;
2462 case Intrinsic::masked_store:
2463 return simplifyMaskedStore(*II);
2464 case Intrinsic::masked_gather:
2465 return simplifyMaskedGather(*II);
2466 case Intrinsic::masked_scatter:
2467 return simplifyMaskedScatter(*II);
2468 case Intrinsic::launder_invariant_group:
2469 case Intrinsic::strip_invariant_group:
2470 if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this))
2471 return replaceInstUsesWith(*II, SkippedBarrier);
2472 break;
2473 case Intrinsic::powi:
2474 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2475 // 0 and 1 are handled in instsimplify
2476 // powi(x, -1) -> 1/x
2477 if (Power->isMinusOne())
2478 return BinaryOperator::CreateFDivFMF(ConstantFP::get(CI.getType(), 1.0),
2479 II->getArgOperand(0), II);
2480 // powi(x, 2) -> x*x
2481 if (Power->equalsInt(2))
2482 return BinaryOperator::CreateFMulFMF(II->getArgOperand(0),
2483 II->getArgOperand(0), II);
2484
2485 if (!Power->getValue()[0]) {
2486 Value *X;
2487 // If power is even:
2488 // powi(-x, p) -> powi(x, p)
2489 // powi(fabs(x), p) -> powi(x, p)
2490 // powi(copysign(x, y), p) -> powi(x, p)
2491 if (match(II->getArgOperand(0), m_FNeg(m_Value(X))) ||
2492 match(II->getArgOperand(0), m_FAbs(m_Value(X))) ||
2493 match(II->getArgOperand(0),
2495 return replaceOperand(*II, 0, X);
2496 }
2497 }
2498 break;
2499
2500 case Intrinsic::cttz:
2501 case Intrinsic::ctlz:
2502 if (auto *I = foldCttzCtlz(*II, *this))
2503 return I;
2504 break;
2505
2506 case Intrinsic::ctpop:
2507 if (auto *I = foldCtpop(*II, *this))
2508 return I;
2509 break;
2510
2511 case Intrinsic::fshl:
2512 case Intrinsic::fshr: {
2513 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1);
2514 Type *Ty = II->getType();
2515 unsigned BitWidth = Ty->getScalarSizeInBits();
2516 Constant *ShAmtC;
2517 if (match(II->getArgOperand(2), m_ImmConstant(ShAmtC))) {
2518 // Canonicalize a shift amount constant operand to modulo the bit-width.
2519 Constant *WidthC = ConstantInt::get(Ty, BitWidth);
2520 Constant *ModuloC =
2521 ConstantFoldBinaryOpOperands(Instruction::URem, ShAmtC, WidthC, DL);
2522 if (!ModuloC)
2523 return nullptr;
2524 if (ModuloC != ShAmtC)
2525 return replaceOperand(*II, 2, ModuloC);
2526
2528 ShAmtC, DL),
2529 m_One()) &&
2530 "Shift amount expected to be modulo bitwidth");
2531
2532 // Canonicalize funnel shift right by constant to funnel shift left. This
2533 // is not entirely arbitrary. For historical reasons, the backend may
2534 // recognize rotate left patterns but miss rotate right patterns.
2535 if (IID == Intrinsic::fshr) {
2536 // fshr X, Y, C --> fshl X, Y, (BitWidth - C) if C is not zero.
2537 if (!isKnownNonZero(ShAmtC, SQ.getWithInstruction(II)))
2538 return nullptr;
2539
2540 Constant *LeftShiftC = ConstantExpr::getSub(WidthC, ShAmtC);
2541 Module *Mod = II->getModule();
2542 Function *Fshl =
2543 Intrinsic::getOrInsertDeclaration(Mod, Intrinsic::fshl, Ty);
2544 return CallInst::Create(Fshl, { Op0, Op1, LeftShiftC });
2545 }
2546 assert(IID == Intrinsic::fshl &&
2547 "All funnel shifts by simple constants should go left");
2548
2549 // fshl(X, 0, C) --> shl X, C
2550 // fshl(X, undef, C) --> shl X, C
2551 if (match(Op1, m_ZeroInt()) || match(Op1, m_Undef()))
2552 return BinaryOperator::CreateShl(Op0, ShAmtC);
2553
2554 // fshl(0, X, C) --> lshr X, (BW-C)
2555 // fshl(undef, X, C) --> lshr X, (BW-C)
2556 if (match(Op0, m_ZeroInt()) || match(Op0, m_Undef()))
2557 return BinaryOperator::CreateLShr(Op1,
2558 ConstantExpr::getSub(WidthC, ShAmtC));
2559
2560 // fshl i16 X, X, 8 --> bswap i16 X (reduce to more-specific form)
2561 if (Op0 == Op1 && BitWidth == 16 && match(ShAmtC, m_SpecificInt(8))) {
2562 Module *Mod = II->getModule();
2563 Function *Bswap =
2564 Intrinsic::getOrInsertDeclaration(Mod, Intrinsic::bswap, Ty);
2565 return CallInst::Create(Bswap, { Op0 });
2566 }
2567 if (Instruction *BitOp =
2568 matchBSwapOrBitReverse(*II, /*MatchBSwaps*/ true,
2569 /*MatchBitReversals*/ true))
2570 return BitOp;
2571
2572 // R = fshl(X, X, C2)
2573 // fshl(R, R, C1) --> fshl(X, X, (C1 + C2) % bitsize)
2574 Value *InnerOp;
2575 const APInt *ShAmtInnerC, *ShAmtOuterC;
2576 if (match(Op0, m_FShl(m_Value(InnerOp), m_Deferred(InnerOp),
2577 m_APInt(ShAmtInnerC))) &&
2578 match(ShAmtC, m_APInt(ShAmtOuterC)) && Op0 == Op1) {
2579 APInt Sum = *ShAmtOuterC + *ShAmtInnerC;
2580 APInt Modulo = Sum.urem(APInt(Sum.getBitWidth(), BitWidth));
2581 if (Modulo.isZero())
2582 return replaceInstUsesWith(*II, InnerOp);
2583 Constant *ModuloC = ConstantInt::get(Ty, Modulo);
2585 {InnerOp, InnerOp, ModuloC});
2586 }
2587 }
2588
2589 // fshl(X, X, Neg(Y)) --> fshr(X, X, Y)
2590 // fshr(X, X, Neg(Y)) --> fshl(X, X, Y)
2591 // if BitWidth is a power-of-2
2592 Value *Y;
2593 if (Op0 == Op1 && isPowerOf2_32(BitWidth) &&
2594 match(II->getArgOperand(2), m_Neg(m_Value(Y)))) {
2595 Module *Mod = II->getModule();
2597 Mod, IID == Intrinsic::fshl ? Intrinsic::fshr : Intrinsic::fshl, Ty);
2598 return CallInst::Create(OppositeShift, {Op0, Op1, Y});
2599 }
2600
2601 // fshl(X, 0, Y) --> shl(X, and(Y, BitWidth - 1)) if bitwidth is a
2602 // power-of-2
2603 if (IID == Intrinsic::fshl && isPowerOf2_32(BitWidth) &&
2604 match(Op1, m_ZeroInt())) {
2605 Value *Op2 = II->getArgOperand(2);
2606 Value *And = Builder.CreateAnd(Op2, ConstantInt::get(Ty, BitWidth - 1));
2607 return BinaryOperator::CreateShl(Op0, And);
2608 }
2609
2610 // Left or right might be masked.
2612 return &CI;
2613
2614 // The shift amount (operand 2) of a funnel shift is modulo the bitwidth,
2615 // so only the low bits of the shift amount are demanded if the bitwidth is
2616 // a power-of-2.
2617 if (!isPowerOf2_32(BitWidth))
2618 break;
2620 KnownBits Op2Known(BitWidth);
2621 if (SimplifyDemandedBits(II, 2, Op2Demanded, Op2Known))
2622 return &CI;
2623 break;
2624 }
2625 case Intrinsic::ptrmask: {
2626 unsigned BitWidth = DL.getPointerTypeSizeInBits(II->getType());
2627 KnownBits Known(BitWidth);
2629 return II;
2630
2631 Value *InnerPtr, *InnerMask;
2632 bool Changed = false;
2633 // Combine:
2634 // (ptrmask (ptrmask p, A), B)
2635 // -> (ptrmask p, (and A, B))
2636 if (match(II->getArgOperand(0),
2638 m_Value(InnerMask))))) {
2639 assert(II->getArgOperand(1)->getType() == InnerMask->getType() &&
2640 "Mask types must match");
2641 // TODO: If InnerMask == Op1, we could copy attributes from inner
2642 // callsite -> outer callsite.
2643 Value *NewMask = Builder.CreateAnd(II->getArgOperand(1), InnerMask);
2644 replaceOperand(CI, 0, InnerPtr);
2645 replaceOperand(CI, 1, NewMask);
2646 Changed = true;
2647 }
2648
2649 // See if we can deduce non-null.
2650 if (!CI.hasRetAttr(Attribute::NonNull) &&
2651 (Known.isNonZero() ||
2652 isKnownNonZero(II, getSimplifyQuery().getWithInstruction(II)))) {
2653 CI.addRetAttr(Attribute::NonNull);
2654 Changed = true;
2655 }
2656
2657 unsigned NewAlignmentLog =
2659 std::min(BitWidth - 1, Known.countMinTrailingZeros()));
2660 // Known bits will capture if we had alignment information associated with
2661 // the pointer argument.
2662 if (NewAlignmentLog > Log2(CI.getRetAlign().valueOrOne())) {
2664 CI.getContext(), Align(uint64_t(1) << NewAlignmentLog)));
2665 Changed = true;
2666 }
2667 if (Changed)
2668 return &CI;
2669 break;
2670 }
2671 case Intrinsic::uadd_with_overflow:
2672 case Intrinsic::sadd_with_overflow: {
2673 if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
2674 return I;
2675
2676 // Given 2 constant operands whose sum does not overflow:
2677 // uaddo (X +nuw C0), C1 -> uaddo X, C0 + C1
2678 // saddo (X +nsw C0), C1 -> saddo X, C0 + C1
2679 Value *X;
2680 const APInt *C0, *C1;
2681 Value *Arg0 = II->getArgOperand(0);
2682 Value *Arg1 = II->getArgOperand(1);
2683 bool IsSigned = IID == Intrinsic::sadd_with_overflow;
2684 bool HasNWAdd = IsSigned
2685 ? match(Arg0, m_NSWAddLike(m_Value(X), m_APInt(C0)))
2686 : match(Arg0, m_NUWAddLike(m_Value(X), m_APInt(C0)));
2687 if (HasNWAdd && match(Arg1, m_APInt(C1))) {
2688 bool Overflow;
2689 APInt NewC =
2690 IsSigned ? C1->sadd_ov(*C0, Overflow) : C1->uadd_ov(*C0, Overflow);
2691 if (!Overflow)
2692 return replaceInstUsesWith(
2693 *II, Builder.CreateBinaryIntrinsic(
2694 IID, X, ConstantInt::get(Arg1->getType(), NewC)));
2695 }
2696 break;
2697 }
2698
2699 case Intrinsic::umul_with_overflow:
2700 case Intrinsic::smul_with_overflow:
2701 case Intrinsic::usub_with_overflow:
2702 if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
2703 return I;
2704 break;
2705
2706 case Intrinsic::ssub_with_overflow: {
2707 if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
2708 return I;
2709
2710 Constant *C;
2711 Value *Arg0 = II->getArgOperand(0);
2712 Value *Arg1 = II->getArgOperand(1);
2713 // Given a constant C that is not the minimum signed value
2714 // for an integer of a given bit width:
2715 //
2716 // ssubo X, C -> saddo X, -C
2717 if (match(Arg1, m_Constant(C)) && C->isNotMinSignedValue()) {
2718 Value *NegVal = ConstantExpr::getNeg(C);
2719 // Build a saddo call that is equivalent to the discovered
2720 // ssubo call.
2721 return replaceInstUsesWith(
2722 *II, Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow,
2723 Arg0, NegVal));
2724 }
2725
2726 break;
2727 }
2728
2729 case Intrinsic::uadd_sat:
2730 case Intrinsic::sadd_sat:
2731 case Intrinsic::usub_sat:
2732 case Intrinsic::ssub_sat: {
2734 Type *Ty = SI->getType();
2735 Value *Arg0 = SI->getLHS();
2736 Value *Arg1 = SI->getRHS();
2737
2738 // Make use of known overflow information.
2739 OverflowResult OR = computeOverflow(SI->getBinaryOp(), SI->isSigned(),
2740 Arg0, Arg1, SI);
2741 switch (OR) {
2743 break;
2745 if (SI->isSigned())
2746 return BinaryOperator::CreateNSW(SI->getBinaryOp(), Arg0, Arg1);
2747 else
2748 return BinaryOperator::CreateNUW(SI->getBinaryOp(), Arg0, Arg1);
2750 unsigned BitWidth = Ty->getScalarSizeInBits();
2751 APInt Min = APSInt::getMinValue(BitWidth, !SI->isSigned());
2752 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Min));
2753 }
2755 unsigned BitWidth = Ty->getScalarSizeInBits();
2756 APInt Max = APSInt::getMaxValue(BitWidth, !SI->isSigned());
2757 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Max));
2758 }
2759 }
2760
2761 // usub_sat((sub nuw C, A), C1) -> usub_sat(usub_sat(C, C1), A)
2762 // which after that:
2763 // usub_sat((sub nuw C, A), C1) -> usub_sat(C - C1, A) if C1 u< C
2764 // usub_sat((sub nuw C, A), C1) -> 0 otherwise
2765 Constant *C, *C1;
2766 Value *A;
2767 if (IID == Intrinsic::usub_sat &&
2768 match(Arg0, m_NUWSub(m_ImmConstant(C), m_Value(A))) &&
2769 match(Arg1, m_ImmConstant(C1))) {
2770 auto *NewC = Builder.CreateBinaryIntrinsic(Intrinsic::usub_sat, C, C1);
2771 auto *NewSub =
2772 Builder.CreateBinaryIntrinsic(Intrinsic::usub_sat, NewC, A);
2773 return replaceInstUsesWith(*SI, NewSub);
2774 }
2775
2776 // ssub.sat(X, C) -> sadd.sat(X, -C) if C != MIN
2777 if (IID == Intrinsic::ssub_sat && match(Arg1, m_Constant(C)) &&
2778 C->isNotMinSignedValue()) {
2779 Value *NegVal = ConstantExpr::getNeg(C);
2780 return replaceInstUsesWith(
2781 *II, Builder.CreateBinaryIntrinsic(
2782 Intrinsic::sadd_sat, Arg0, NegVal));
2783 }
2784
2785 // sat(sat(X + Val2) + Val) -> sat(X + (Val+Val2))
2786 // sat(sat(X - Val2) - Val) -> sat(X - (Val+Val2))
2787 // if Val and Val2 have the same sign
2788 if (auto *Other = dyn_cast<IntrinsicInst>(Arg0)) {
2789 Value *X;
2790 const APInt *Val, *Val2;
2791 APInt NewVal;
2792 bool IsUnsigned =
2793 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
2794 if (Other->getIntrinsicID() == IID &&
2795 match(Arg1, m_APInt(Val)) &&
2796 match(Other->getArgOperand(0), m_Value(X)) &&
2797 match(Other->getArgOperand(1), m_APInt(Val2))) {
2798 if (IsUnsigned)
2799 NewVal = Val->uadd_sat(*Val2);
2800 else if (Val->isNonNegative() == Val2->isNonNegative()) {
2801 bool Overflow;
2802 NewVal = Val->sadd_ov(*Val2, Overflow);
2803 if (Overflow) {
2804 // Both adds together may add more than SignedMaxValue
2805 // without saturating the final result.
2806 break;
2807 }
2808 } else {
2809 // Cannot fold saturated addition with different signs.
2810 break;
2811 }
2812
2813 return replaceInstUsesWith(
2814 *II, Builder.CreateBinaryIntrinsic(
2815 IID, X, ConstantInt::get(II->getType(), NewVal)));
2816 }
2817 }
2818 break;
2819 }
2820
2821 case Intrinsic::minnum:
2822 case Intrinsic::maxnum:
2823 case Intrinsic::minimumnum:
2824 case Intrinsic::maximumnum:
2825 case Intrinsic::minimum:
2826 case Intrinsic::maximum: {
2827 Value *Arg0 = II->getArgOperand(0);
2828 Value *Arg1 = II->getArgOperand(1);
2829 Value *X, *Y;
2830 if (match(Arg0, m_FNeg(m_Value(X))) && match(Arg1, m_FNeg(m_Value(Y))) &&
2831 (Arg0->hasOneUse() || Arg1->hasOneUse())) {
2832 // If both operands are negated, invert the call and negate the result:
2833 // min(-X, -Y) --> -(max(X, Y))
2834 // max(-X, -Y) --> -(min(X, Y))
2835 Intrinsic::ID NewIID;
2836 switch (IID) {
2837 case Intrinsic::maxnum:
2838 NewIID = Intrinsic::minnum;
2839 break;
2840 case Intrinsic::minnum:
2841 NewIID = Intrinsic::maxnum;
2842 break;
2843 case Intrinsic::maximumnum:
2844 NewIID = Intrinsic::minimumnum;
2845 break;
2846 case Intrinsic::minimumnum:
2847 NewIID = Intrinsic::maximumnum;
2848 break;
2849 case Intrinsic::maximum:
2850 NewIID = Intrinsic::minimum;
2851 break;
2852 case Intrinsic::minimum:
2853 NewIID = Intrinsic::maximum;
2854 break;
2855 default:
2856 llvm_unreachable("unexpected intrinsic ID");
2857 }
2858 Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II);
2859 Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall);
2860 FNeg->copyIRFlags(II);
2861 return FNeg;
2862 }
2863
2864 // m(m(X, C2), C1) -> m(X, C)
2865 const APFloat *C1, *C2;
2866 if (auto *M = dyn_cast<IntrinsicInst>(Arg0)) {
2867 if (M->getIntrinsicID() == IID && match(Arg1, m_APFloat(C1)) &&
2868 ((match(M->getArgOperand(0), m_Value(X)) &&
2869 match(M->getArgOperand(1), m_APFloat(C2))) ||
2870 (match(M->getArgOperand(1), m_Value(X)) &&
2871 match(M->getArgOperand(0), m_APFloat(C2))))) {
2872 APFloat Res(0.0);
2873 switch (IID) {
2874 case Intrinsic::maxnum:
2875 Res = maxnum(*C1, *C2);
2876 break;
2877 case Intrinsic::minnum:
2878 Res = minnum(*C1, *C2);
2879 break;
2880 case Intrinsic::maximumnum:
2881 Res = maximumnum(*C1, *C2);
2882 break;
2883 case Intrinsic::minimumnum:
2884 Res = minimumnum(*C1, *C2);
2885 break;
2886 case Intrinsic::maximum:
2887 Res = maximum(*C1, *C2);
2888 break;
2889 case Intrinsic::minimum:
2890 Res = minimum(*C1, *C2);
2891 break;
2892 default:
2893 llvm_unreachable("unexpected intrinsic ID");
2894 }
2895 // TODO: Conservatively intersecting FMF. If Res == C2, the transform
2896 // was a simplification (so Arg0 and its original flags could
2897 // propagate?)
2898 Value *V = Builder.CreateBinaryIntrinsic(
2899 IID, X, ConstantFP::get(Arg0->getType(), Res),
2901 return replaceInstUsesWith(*II, V);
2902 }
2903 }
2904
2905 // m((fpext X), (fpext Y)) -> fpext (m(X, Y))
2906 if (match(Arg0, m_FPExt(m_Value(X))) && match(Arg1, m_FPExt(m_Value(Y))) &&
2907 (Arg0->hasOneUse() || Arg1->hasOneUse()) &&
2908 X->getType() == Y->getType()) {
2909 Value *NewCall =
2910 Builder.CreateBinaryIntrinsic(IID, X, Y, II, II->getName());
2911 return new FPExtInst(NewCall, II->getType());
2912 }
2913
2914 // m(fpext X, C) -> fpext m(X, TruncC) if C can be losslessly truncated.
2915 Constant *C;
2916 if (match(Arg0, m_OneUse(m_FPExt(m_Value(X)))) &&
2917 match(Arg1, m_ImmConstant(C))) {
2918 if (Constant *TruncC =
2919 getLosslessInvCast(C, X->getType(), Instruction::FPExt, DL)) {
2920 Value *NewCall =
2921 Builder.CreateBinaryIntrinsic(IID, X, TruncC, II, II->getName());
2922 return new FPExtInst(NewCall, II->getType());
2923 }
2924 }
2925
2926 // max X, -X --> fabs X
2927 // min X, -X --> -(fabs X)
2928 // TODO: Remove one-use limitation? That is obviously better for max,
2929 // hence why we don't check for one-use for that. However,
2930 // it would be an extra instruction for min (fnabs), but
2931 // that is still likely better for analysis and codegen.
2932 auto IsMinMaxOrXNegX = [IID, &X](Value *Op0, Value *Op1) {
2933 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_Specific(X)))
2934 return Op0->hasOneUse() ||
2935 (IID != Intrinsic::minimum && IID != Intrinsic::minnum &&
2936 IID != Intrinsic::minimumnum);
2937 return false;
2938 };
2939
2940 if (IsMinMaxOrXNegX(Arg0, Arg1) || IsMinMaxOrXNegX(Arg1, Arg0)) {
2941 Value *R = Builder.CreateFAbs(X, II);
2942 if (IID == Intrinsic::minimum || IID == Intrinsic::minnum ||
2943 IID == Intrinsic::minimumnum)
2944 R = Builder.CreateFNegFMF(R, II);
2945 return replaceInstUsesWith(*II, R);
2946 }
2947
2948 break;
2949 }
2950 case Intrinsic::matrix_multiply: {
2951 // Optimize negation in matrix multiplication.
2952
2953 // -A * -B -> A * B
2954 Value *A, *B;
2955 if (match(II->getArgOperand(0), m_FNeg(m_Value(A))) &&
2956 match(II->getArgOperand(1), m_FNeg(m_Value(B)))) {
2957 replaceOperand(*II, 0, A);
2958 replaceOperand(*II, 1, B);
2959 return II;
2960 }
2961
2962 Value *Op0 = II->getOperand(0);
2963 Value *Op1 = II->getOperand(1);
2964 Value *OpNotNeg, *NegatedOp;
2965 unsigned NegatedOpArg, OtherOpArg;
2966 if (match(Op0, m_FNeg(m_Value(OpNotNeg)))) {
2967 NegatedOp = Op0;
2968 NegatedOpArg = 0;
2969 OtherOpArg = 1;
2970 } else if (match(Op1, m_FNeg(m_Value(OpNotNeg)))) {
2971 NegatedOp = Op1;
2972 NegatedOpArg = 1;
2973 OtherOpArg = 0;
2974 } else
2975 // Multiplication doesn't have a negated operand.
2976 break;
2977
2978 // Only optimize if the negated operand has only one use.
2979 if (!NegatedOp->hasOneUse())
2980 break;
2981
2982 Value *OtherOp = II->getOperand(OtherOpArg);
2983 VectorType *RetTy = cast<VectorType>(II->getType());
2984 VectorType *NegatedOpTy = cast<VectorType>(NegatedOp->getType());
2985 VectorType *OtherOpTy = cast<VectorType>(OtherOp->getType());
2986 ElementCount NegatedCount = NegatedOpTy->getElementCount();
2987 ElementCount OtherCount = OtherOpTy->getElementCount();
2988 ElementCount RetCount = RetTy->getElementCount();
2989 // (-A) * B -> A * (-B), if it is cheaper to negate B and vice versa.
2990 if (ElementCount::isKnownGT(NegatedCount, OtherCount) &&
2991 ElementCount::isKnownLT(OtherCount, RetCount)) {
2992 Value *InverseOtherOp = Builder.CreateFNeg(OtherOp);
2993 replaceOperand(*II, NegatedOpArg, OpNotNeg);
2994 replaceOperand(*II, OtherOpArg, InverseOtherOp);
2995 return II;
2996 }
2997 // (-A) * B -> -(A * B), if it is cheaper to negate the result
2998 if (ElementCount::isKnownGT(NegatedCount, RetCount)) {
2999 SmallVector<Value *, 5> NewArgs(II->args());
3000 NewArgs[NegatedOpArg] = OpNotNeg;
3001 Instruction *NewMul =
3002 Builder.CreateIntrinsic(II->getType(), IID, NewArgs, II);
3003 return replaceInstUsesWith(*II, Builder.CreateFNegFMF(NewMul, II));
3004 }
3005 break;
3006 }
3007 case Intrinsic::fmuladd: {
3008 // Try to simplify the underlying FMul.
3009 if (Value *V =
3010 simplifyFMulInst(II->getArgOperand(0), II->getArgOperand(1),
3011 II->getFastMathFlags(), SQ.getWithInstruction(II)))
3012 return BinaryOperator::CreateFAddFMF(V, II->getArgOperand(2),
3013 II->getFastMathFlags());
3014
3015 [[fallthrough]];
3016 }
3017 case Intrinsic::fma: {
3018 // fma fneg(x), fneg(y), z -> fma x, y, z
3019 Value *Src0 = II->getArgOperand(0);
3020 Value *Src1 = II->getArgOperand(1);
3021 Value *Src2 = II->getArgOperand(2);
3022 Value *X, *Y;
3023 if (match(Src0, m_FNeg(m_Value(X))) && match(Src1, m_FNeg(m_Value(Y)))) {
3024 replaceOperand(*II, 0, X);
3025 replaceOperand(*II, 1, Y);
3026 return II;
3027 }
3028
3029 // fma fabs(x), fabs(x), z -> fma x, x, z
3030 if (match(Src0, m_FAbs(m_Value(X))) &&
3031 match(Src1, m_FAbs(m_Specific(X)))) {
3032 replaceOperand(*II, 0, X);
3033 replaceOperand(*II, 1, X);
3034 return II;
3035 }
3036
3037 // Try to simplify the underlying FMul. We can only apply simplifications
3038 // that do not require rounding.
3039 if (Value *V = simplifyFMAFMul(Src0, Src1, II->getFastMathFlags(),
3040 SQ.getWithInstruction(II)))
3041 return BinaryOperator::CreateFAddFMF(V, Src2, II->getFastMathFlags());
3042
3043 // fma x, y, 0 -> fmul x, y
3044 // This is always valid for -0.0, but requires nsz for +0.0 as
3045 // -0.0 + 0.0 = 0.0, which would not be the same as the fmul on its own.
3046 if (match(Src2, m_NegZeroFP()) ||
3047 (match(Src2, m_PosZeroFP()) && II->getFastMathFlags().noSignedZeros()))
3048 return BinaryOperator::CreateFMulFMF(Src0, Src1, II);
3049
3050 // fma x, -1.0, y -> fsub y, x
3051 if (match(Src1, m_SpecificFP(-1.0)))
3052 return BinaryOperator::CreateFSubFMF(Src2, Src0, II);
3053
3054 break;
3055 }
3056 case Intrinsic::copysign: {
3057 Value *Mag = II->getArgOperand(0), *Sign = II->getArgOperand(1);
3058 if (std::optional<bool> KnownSignBit = computeKnownFPSignBit(
3059 Sign, getSimplifyQuery().getWithInstruction(II))) {
3060 if (*KnownSignBit) {
3061 // If we know that the sign argument is negative, reduce to FNABS:
3062 // copysign Mag, -Sign --> fneg (fabs Mag)
3063 Value *Fabs = Builder.CreateFAbs(Mag, II);
3064 return replaceInstUsesWith(*II, Builder.CreateFNegFMF(Fabs, II));
3065 }
3066
3067 // If we know that the sign argument is positive, reduce to FABS:
3068 // copysign Mag, +Sign --> fabs Mag
3069 Value *Fabs = Builder.CreateFAbs(Mag, II);
3070 return replaceInstUsesWith(*II, Fabs);
3071 }
3072
3073 // Propagate sign argument through nested calls:
3074 // copysign Mag, (copysign ?, X) --> copysign Mag, X
3075 Value *X;
3077 Value *CopySign =
3078 Builder.CreateCopySign(Mag, X, FMFSource::intersect(II, Sign));
3079 return replaceInstUsesWith(*II, CopySign);
3080 }
3081
3082 // Clear sign-bit of constant magnitude:
3083 // copysign -MagC, X --> copysign MagC, X
3084 // TODO: Support constant folding for fabs
3085 const APFloat *MagC;
3086 if (match(Mag, m_APFloat(MagC)) && MagC->isNegative()) {
3087 APFloat PosMagC = *MagC;
3088 PosMagC.clearSign();
3089 return replaceOperand(*II, 0, ConstantFP::get(Mag->getType(), PosMagC));
3090 }
3091
3092 // Peek through changes of magnitude's sign-bit. This call rewrites those:
3093 // copysign (fabs X), Sign --> copysign X, Sign
3094 // copysign (fneg X), Sign --> copysign X, Sign
3095 if (match(Mag, m_FAbs(m_Value(X))) || match(Mag, m_FNeg(m_Value(X))))
3096 return replaceOperand(*II, 0, X);
3097
3098 Type *SignEltTy = Sign->getType()->getScalarType();
3099
3100 Value *CastSrc;
3101 if (match(Sign,
3103 CastSrc->getType()->isIntOrIntVectorTy() &&
3105 KnownBits Known(SignEltTy->getPrimitiveSizeInBits());
3107 APInt::getSignMask(Known.getBitWidth()), Known,
3108 SQ))
3109 return II;
3110 }
3111
3112 break;
3113 }
3114 case Intrinsic::fabs: {
3115 Value *Cond, *TVal, *FVal;
3116 Value *Arg = II->getArgOperand(0);
3117 Value *X;
3118 // fabs (-X) --> fabs (X)
3119 if (match(Arg, m_FNeg(m_Value(X)))) {
3120 CallInst *Fabs = Builder.CreateFAbs(X, II);
3121 return replaceInstUsesWith(CI, Fabs);
3122 }
3123
3124 if (match(Arg, m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))) {
3125 // fabs (select Cond, TrueC, FalseC) --> select Cond, AbsT, AbsF
3126 if (Arg->hasOneUse() ? (isa<Constant>(TVal) || isa<Constant>(FVal))
3127 : (isa<Constant>(TVal) && isa<Constant>(FVal))) {
3128 CallInst *AbsT = Builder.CreateCall(II->getCalledFunction(), {TVal});
3129 CallInst *AbsF = Builder.CreateCall(II->getCalledFunction(), {FVal});
3130 SelectInst *SI = SelectInst::Create(Cond, AbsT, AbsF);
3131 SI->setFastMathFlags(II->getFastMathFlags() |
3132 cast<SelectInst>(Arg)->getFastMathFlags());
3133 // Can't copy nsz to select, as even with the nsz flag the fabs result
3134 // always has the sign bit unset.
3135 SI->setHasNoSignedZeros(false);
3136 return SI;
3137 }
3138 // fabs (select Cond, -FVal, FVal) --> fabs FVal
3139 if (match(TVal, m_FNeg(m_Specific(FVal))))
3140 return replaceOperand(*II, 0, FVal);
3141 // fabs (select Cond, TVal, -TVal) --> fabs TVal
3142 if (match(FVal, m_FNeg(m_Specific(TVal))))
3143 return replaceOperand(*II, 0, TVal);
3144 }
3145
3146 Value *Magnitude, *Sign;
3147 if (match(II->getArgOperand(0),
3148 m_CopySign(m_Value(Magnitude), m_Value(Sign)))) {
3149 // fabs (copysign x, y) -> (fabs x)
3150 CallInst *AbsSign = Builder.CreateFAbs(Magnitude, II);
3151 return replaceInstUsesWith(*II, AbsSign);
3152 }
3153
3154 [[fallthrough]];
3155 }
3156 case Intrinsic::ceil:
3157 case Intrinsic::floor:
3158 case Intrinsic::round:
3159 case Intrinsic::roundeven:
3160 case Intrinsic::nearbyint:
3161 case Intrinsic::rint:
3162 case Intrinsic::trunc: {
3163 Value *ExtSrc;
3164 if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc))))) {
3165 // Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x)
3166 Value *NarrowII = Builder.CreateUnaryIntrinsic(IID, ExtSrc, II);
3167 return new FPExtInst(NarrowII, II->getType());
3168 }
3169 break;
3170 }
3171 case Intrinsic::cos:
3172 case Intrinsic::amdgcn_cos:
3173 case Intrinsic::cosh: {
3174 Value *X, *Sign;
3175 Value *Src = II->getArgOperand(0);
3176 if (match(Src, m_FNeg(m_Value(X))) || match(Src, m_FAbs(m_Value(X))) ||
3177 match(Src, m_CopySign(m_Value(X), m_Value(Sign)))) {
3178 // f(-x) --> f(x)
3179 // f(fabs(x)) --> f(x)
3180 // f(copysign(x, y)) --> f(x)
3181 // for f in {cos, cosh}
3182 return replaceOperand(*II, 0, X);
3183 }
3184 break;
3185 }
3186 case Intrinsic::sin:
3187 case Intrinsic::amdgcn_sin:
3188 case Intrinsic::sinh:
3189 case Intrinsic::tan:
3190 case Intrinsic::tanh: {
3191 Value *X;
3192 if (match(II->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X))))) {
3193 // f(-x) --> -f(x)
3194 // for f in {sin, sinh, tan, tanh}
3195 Value *NewFunc = Builder.CreateUnaryIntrinsic(IID, X, II);
3196 return UnaryOperator::CreateFNegFMF(NewFunc, II);
3197 }
3198 break;
3199 }
3200 case Intrinsic::ldexp: {
3201 // ldexp(ldexp(x, a), b) -> ldexp(x, a + b)
3202 //
3203 // The danger is if the first ldexp would overflow to infinity or underflow
3204 // to zero, but the combined exponent avoids it. We ignore this with
3205 // reassoc.
3206 //
3207 // It's also safe to fold if we know both exponents are >= 0 or <= 0 since
3208 // it would just double down on the overflow/underflow which would occur
3209 // anyway.
3210 //
3211 // TODO: Could do better if we had range tracking for the input value
3212 // exponent. Also could broaden sign check to cover == 0 case.
3213 Value *Src = II->getArgOperand(0);
3214 Value *Exp = II->getArgOperand(1);
3215
3216 uint64_t ConstExp;
3217 if (match(Exp, m_ConstantInt(ConstExp))) {
3218 // ldexp(x, K) -> fmul x, 2^K
3219 const fltSemantics &FPTy =
3220 Src->getType()->getScalarType()->getFltSemantics();
3221
3222 APFloat Scaled = scalbn(APFloat::getOne(FPTy), static_cast<int>(ConstExp),
3224 if (!Scaled.isZero() && !Scaled.isInfinity()) {
3225 // Skip overflow and underflow cases.
3226 Constant *FPConst = ConstantFP::get(Src->getType(), Scaled);
3227 return BinaryOperator::CreateFMulFMF(Src, FPConst, II);
3228 }
3229 }
3230
3231 Value *InnerSrc;
3232 Value *InnerExp;
3234 m_Value(InnerSrc), m_Value(InnerExp)))) &&
3235 Exp->getType() == InnerExp->getType()) {
3236 FastMathFlags FMF = II->getFastMathFlags();
3237 FastMathFlags InnerFlags = cast<FPMathOperator>(Src)->getFastMathFlags();
3238
3239 if ((FMF.allowReassoc() && InnerFlags.allowReassoc()) ||
3240 signBitMustBeTheSame(Exp, InnerExp, SQ.getWithInstruction(II))) {
3241 // TODO: Add nsw/nuw probably safe if integer type exceeds exponent
3242 // width.
3243 Value *NewExp = Builder.CreateAdd(InnerExp, Exp);
3244 II->setArgOperand(1, NewExp);
3245 II->setFastMathFlags(InnerFlags); // Or the inner flags.
3246 return replaceOperand(*II, 0, InnerSrc);
3247 }
3248 }
3249
3250 // ldexp(x, zext(i1 y)) -> fmul x, (select y, 2.0, 1.0)
3251 // ldexp(x, sext(i1 y)) -> fmul x, (select y, 0.5, 1.0)
3252 Value *ExtSrc;
3253 if (match(Exp, m_ZExt(m_Value(ExtSrc))) &&
3254 ExtSrc->getType()->getScalarSizeInBits() == 1) {
3255 Value *Select =
3256 Builder.CreateSelect(ExtSrc, ConstantFP::get(II->getType(), 2.0),
3257 ConstantFP::get(II->getType(), 1.0));
3259 }
3260 if (match(Exp, m_SExt(m_Value(ExtSrc))) &&
3261 ExtSrc->getType()->getScalarSizeInBits() == 1) {
3262 Value *Select =
3263 Builder.CreateSelect(ExtSrc, ConstantFP::get(II->getType(), 0.5),
3264 ConstantFP::get(II->getType(), 1.0));
3266 }
3267
3268 // ldexp(x, c ? exp : 0) -> c ? ldexp(x, exp) : x
3269 // ldexp(x, c ? 0 : exp) -> c ? x : ldexp(x, exp)
3270 ///
3271 // TODO: If we cared, should insert a canonicalize for x
3272 Value *SelectCond, *SelectLHS, *SelectRHS;
3273 if (match(II->getArgOperand(1),
3274 m_OneUse(m_Select(m_Value(SelectCond), m_Value(SelectLHS),
3275 m_Value(SelectRHS))))) {
3276 Value *NewLdexp = nullptr;
3277 Value *Select = nullptr;
3278 if (match(SelectRHS, m_ZeroInt())) {
3279 NewLdexp = Builder.CreateLdexp(Src, SelectLHS, II);
3280 Select = Builder.CreateSelect(SelectCond, NewLdexp, Src);
3281 } else if (match(SelectLHS, m_ZeroInt())) {
3282 NewLdexp = Builder.CreateLdexp(Src, SelectRHS, II);
3283 Select = Builder.CreateSelect(SelectCond, Src, NewLdexp);
3284 }
3285
3286 if (NewLdexp) {
3287 Select->takeName(II);
3288 return replaceInstUsesWith(*II, Select);
3289 }
3290 }
3291
3292 break;
3293 }
3294 case Intrinsic::ptrauth_auth:
3295 case Intrinsic::ptrauth_resign: {
3296 // (sign|resign) + (auth|resign) can be folded by omitting the middle
3297 // sign+auth component if the key and discriminator match.
3298 bool NeedSign = II->getIntrinsicID() == Intrinsic::ptrauth_resign;
3299 Value *Ptr = II->getArgOperand(0);
3300 Value *Key = II->getArgOperand(1);
3301 Value *Disc = II->getArgOperand(2);
3302 Value *DS = nullptr;
3303 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_deactivation_symbol))
3304 DS = Bundle->Inputs[0];
3305
3306 // AuthKey will be the key we need to end up authenticating against in
3307 // whatever we replace this sequence with.
3308 Value *AuthKey = nullptr, *AuthDisc = nullptr, *BasePtr;
3309 if (const auto *CI = dyn_cast<CallBase>(Ptr)) {
3310 Value *OtherDS = nullptr;
3311 if (auto Bundle =
3313 OtherDS = Bundle->Inputs[0];
3314 if (DS != OtherDS)
3315 break;
3316
3317 BasePtr = CI->getArgOperand(0);
3318 if (CI->getIntrinsicID() == Intrinsic::ptrauth_sign) {
3319 if (CI->getArgOperand(1) != Key || CI->getArgOperand(2) != Disc)
3320 break;
3321 } else if (CI->getIntrinsicID() == Intrinsic::ptrauth_resign) {
3322 // The resign intrinsic does not support deactivation symbols.
3323 assert(!DS);
3324 if (CI->getArgOperand(3) != Key || CI->getArgOperand(4) != Disc)
3325 break;
3326 AuthKey = CI->getArgOperand(1);
3327 AuthDisc = CI->getArgOperand(2);
3328 } else
3329 break;
3330 } else if (const auto *PtrToInt = dyn_cast<PtrToIntOperator>(Ptr)) {
3331 // ptrauth constants are equivalent to a call to @llvm.ptrauth.sign for
3332 // our purposes, so check for that too.
3333 const auto *CPA = dyn_cast<ConstantPtrAuth>(PtrToInt->getOperand(0));
3334 if (!CPA || DS || !CPA->isKnownCompatibleWith(Key, Disc, DL))
3335 break;
3336
3337 // resign(ptrauth(p,ks,ds),ks,ds,kr,dr) -> ptrauth(p,kr,dr)
3338 if (NeedSign && isa<ConstantInt>(II->getArgOperand(4))) {
3339 auto *SignKey = cast<ConstantInt>(II->getArgOperand(3));
3340 auto *SignDisc = cast<ConstantInt>(II->getArgOperand(4));
3341 auto *Null = ConstantPointerNull::get(Builder.getPtrTy());
3342 auto *NewCPA = ConstantPtrAuth::get(CPA->getPointer(), SignKey,
3343 SignDisc, /*AddrDisc=*/Null,
3344 /*DeactivationSymbol=*/Null);
3346 *II, ConstantExpr::getPointerCast(NewCPA, II->getType()));
3347 return eraseInstFromFunction(*II);
3348 }
3349
3350 // auth(ptrauth(p,k,d),k,d) -> p
3351 BasePtr = Builder.CreatePtrToInt(CPA->getPointer(), II->getType());
3352 } else
3353 break;
3354
3355 unsigned NewIntrin;
3356 if (AuthKey && NeedSign) {
3357 // resign(0,1) + resign(1,2) = resign(0, 2)
3358 NewIntrin = Intrinsic::ptrauth_resign;
3359 } else if (AuthKey) {
3360 // resign(0,1) + auth(1) = auth(0)
3361 NewIntrin = Intrinsic::ptrauth_auth;
3362 } else if (NeedSign) {
3363 // sign(0) + resign(0, 1) = sign(1)
3364 NewIntrin = Intrinsic::ptrauth_sign;
3365 } else {
3366 // sign(0) + auth(0) = nop
3367 replaceInstUsesWith(*II, BasePtr);
3368 return eraseInstFromFunction(*II);
3369 }
3370
3371 SmallVector<Value *, 4> CallArgs;
3372 CallArgs.push_back(BasePtr);
3373 if (AuthKey) {
3374 CallArgs.push_back(AuthKey);
3375 CallArgs.push_back(AuthDisc);
3376 }
3377
3378 if (NeedSign) {
3379 CallArgs.push_back(II->getArgOperand(3));
3380 CallArgs.push_back(II->getArgOperand(4));
3381 }
3382
3383 std::vector<OperandBundleDef> Bundles;
3384 if (DS)
3385 Bundles.push_back(OperandBundleDef("deactivation-symbol", DS));
3386
3387 Function *NewFn =
3388 Intrinsic::getOrInsertDeclaration(II->getModule(), NewIntrin);
3389 return CallInst::Create(NewFn, CallArgs, Bundles);
3390 }
3391 case Intrinsic::arm_neon_vtbl1:
3392 case Intrinsic::arm_neon_vtbl2:
3393 case Intrinsic::arm_neon_vtbl3:
3394 case Intrinsic::arm_neon_vtbl4:
3395 case Intrinsic::aarch64_neon_tbl1:
3396 case Intrinsic::aarch64_neon_tbl2:
3397 case Intrinsic::aarch64_neon_tbl3:
3398 case Intrinsic::aarch64_neon_tbl4:
3399 return simplifyNeonTbl(*II, *this, /*IsExtension=*/false);
3400 case Intrinsic::arm_neon_vtbx1:
3401 case Intrinsic::arm_neon_vtbx2:
3402 case Intrinsic::arm_neon_vtbx3:
3403 case Intrinsic::arm_neon_vtbx4:
3404 case Intrinsic::aarch64_neon_tbx1:
3405 case Intrinsic::aarch64_neon_tbx2:
3406 case Intrinsic::aarch64_neon_tbx3:
3407 case Intrinsic::aarch64_neon_tbx4:
3408 return simplifyNeonTbl(*II, *this, /*IsExtension=*/true);
3409
3410 case Intrinsic::arm_neon_vmulls:
3411 case Intrinsic::arm_neon_vmullu:
3412 case Intrinsic::aarch64_neon_smull:
3413 case Intrinsic::aarch64_neon_umull: {
3414 Value *Arg0 = II->getArgOperand(0);
3415 Value *Arg1 = II->getArgOperand(1);
3416
3417 // Handle mul by zero first:
3419 return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
3420 }
3421
3422 // Check for constant LHS & RHS - in this case we just simplify.
3423 bool Zext = (IID == Intrinsic::arm_neon_vmullu ||
3424 IID == Intrinsic::aarch64_neon_umull);
3425 VectorType *NewVT = cast<VectorType>(II->getType());
3426 if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
3427 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
3428 Value *V0 = Builder.CreateIntCast(CV0, NewVT, /*isSigned=*/!Zext);
3429 Value *V1 = Builder.CreateIntCast(CV1, NewVT, /*isSigned=*/!Zext);
3430 return replaceInstUsesWith(CI, Builder.CreateMul(V0, V1));
3431 }
3432
3433 // Couldn't simplify - canonicalize constant to the RHS.
3434 std::swap(Arg0, Arg1);
3435 }
3436
3437 // Handle mul by one:
3438 if (Constant *CV1 = dyn_cast<Constant>(Arg1))
3439 if (ConstantInt *Splat =
3440 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
3441 if (Splat->isOne())
3442 return CastInst::CreateIntegerCast(Arg0, II->getType(),
3443 /*isSigned=*/!Zext);
3444
3445 break;
3446 }
3447 case Intrinsic::arm_neon_aesd:
3448 case Intrinsic::arm_neon_aese:
3449 case Intrinsic::aarch64_crypto_aesd:
3450 case Intrinsic::aarch64_crypto_aese:
3451 case Intrinsic::aarch64_sve_aesd:
3452 case Intrinsic::aarch64_sve_aese: {
3453 Value *DataArg = II->getArgOperand(0);
3454 Value *KeyArg = II->getArgOperand(1);
3455
3456 // Accept zero on either operand.
3457 if (!match(KeyArg, m_ZeroInt()))
3458 std::swap(KeyArg, DataArg);
3459
3460 // Try to use the builtin XOR in AESE and AESD to eliminate a prior XOR
3461 Value *Data, *Key;
3462 if (match(KeyArg, m_ZeroInt()) &&
3463 match(DataArg, m_Xor(m_Value(Data), m_Value(Key)))) {
3464 replaceOperand(*II, 0, Data);
3465 replaceOperand(*II, 1, Key);
3466 return II;
3467 }
3468 break;
3469 }
3470 case Intrinsic::arm_neon_vshifts:
3471 case Intrinsic::arm_neon_vshiftu:
3472 case Intrinsic::aarch64_neon_sshl:
3473 case Intrinsic::aarch64_neon_ushl:
3474 return foldNeonShift(II, *this);
3475 case Intrinsic::hexagon_V6_vandvrt:
3476 case Intrinsic::hexagon_V6_vandvrt_128B: {
3477 // Simplify Q -> V -> Q conversion.
3478 if (auto Op0 = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
3479 Intrinsic::ID ID0 = Op0->getIntrinsicID();
3480 if (ID0 != Intrinsic::hexagon_V6_vandqrt &&
3481 ID0 != Intrinsic::hexagon_V6_vandqrt_128B)
3482 break;
3483 Value *Bytes = Op0->getArgOperand(1), *Mask = II->getArgOperand(1);
3484 uint64_t Bytes1 = computeKnownBits(Bytes, Op0).One.getZExtValue();
3485 uint64_t Mask1 = computeKnownBits(Mask, II).One.getZExtValue();
3486 // Check if every byte has common bits in Bytes and Mask.
3487 uint64_t C = Bytes1 & Mask1;
3488 if ((C & 0xFF) && (C & 0xFF00) && (C & 0xFF0000) && (C & 0xFF000000))
3489 return replaceInstUsesWith(*II, Op0->getArgOperand(0));
3490 }
3491 break;
3492 }
3493 case Intrinsic::stackrestore: {
3494 enum class ClassifyResult {
3495 None,
3496 Alloca,
3497 StackRestore,
3498 CallWithSideEffects,
3499 };
3500 auto Classify = [](const Instruction *I) {
3501 if (isa<AllocaInst>(I))
3502 return ClassifyResult::Alloca;
3503
3504 if (auto *CI = dyn_cast<CallInst>(I)) {
3505 if (auto *II = dyn_cast<IntrinsicInst>(CI)) {
3506 if (II->getIntrinsicID() == Intrinsic::stackrestore)
3507 return ClassifyResult::StackRestore;
3508
3509 if (II->mayHaveSideEffects())
3510 return ClassifyResult::CallWithSideEffects;
3511 } else {
3512 // Consider all non-intrinsic calls to be side effects
3513 return ClassifyResult::CallWithSideEffects;
3514 }
3515 }
3516
3517 return ClassifyResult::None;
3518 };
3519
3520 // If the stacksave and the stackrestore are in the same BB, and there is
3521 // no intervening call, alloca, or stackrestore of a different stacksave,
3522 // remove the restore. This can happen when variable allocas are DCE'd.
3523 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
3524 if (SS->getIntrinsicID() == Intrinsic::stacksave &&
3525 SS->getParent() == II->getParent()) {
3526 BasicBlock::iterator BI(SS);
3527 bool CannotRemove = false;
3528 for (++BI; &*BI != II; ++BI) {
3529 switch (Classify(&*BI)) {
3530 case ClassifyResult::None:
3531 // So far so good, look at next instructions.
3532 break;
3533
3534 case ClassifyResult::StackRestore:
3535 // If we found an intervening stackrestore for a different
3536 // stacksave, we can't remove the stackrestore. Otherwise, continue.
3537 if (cast<IntrinsicInst>(*BI).getArgOperand(0) != SS)
3538 CannotRemove = true;
3539 break;
3540
3541 case ClassifyResult::Alloca:
3542 case ClassifyResult::CallWithSideEffects:
3543 // If we found an alloca, a non-intrinsic call, or an intrinsic
3544 // call with side effects, we can't remove the stackrestore.
3545 CannotRemove = true;
3546 break;
3547 }
3548 if (CannotRemove)
3549 break;
3550 }
3551
3552 if (!CannotRemove)
3553 return eraseInstFromFunction(CI);
3554 }
3555 }
3556
3557 // Scan down this block to see if there is another stack restore in the
3558 // same block without an intervening call/alloca.
3560 Instruction *TI = II->getParent()->getTerminator();
3561 bool CannotRemove = false;
3562 for (++BI; &*BI != TI; ++BI) {
3563 switch (Classify(&*BI)) {
3564 case ClassifyResult::None:
3565 // So far so good, look at next instructions.
3566 break;
3567
3568 case ClassifyResult::StackRestore:
3569 // If there is a stackrestore below this one, remove this one.
3570 return eraseInstFromFunction(CI);
3571
3572 case ClassifyResult::Alloca:
3573 case ClassifyResult::CallWithSideEffects:
3574 // If we found an alloca, a non-intrinsic call, or an intrinsic call
3575 // with side effects (such as llvm.stacksave and llvm.read_register),
3576 // we can't remove the stack restore.
3577 CannotRemove = true;
3578 break;
3579 }
3580 if (CannotRemove)
3581 break;
3582 }
3583
3584 // If the stack restore is in a return, resume, or unwind block and if there
3585 // are no allocas or calls between the restore and the return, nuke the
3586 // restore.
3587 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
3588 return eraseInstFromFunction(CI);
3589 break;
3590 }
3591 case Intrinsic::lifetime_end:
3592 // Asan needs to poison memory to detect invalid access which is possible
3593 // even for empty lifetime range.
3594 if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3595 II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) ||
3596 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress) ||
3597 II->getFunction()->hasFnAttribute(Attribute::SanitizeMemTag))
3598 break;
3599
3600 if (removeTriviallyEmptyRange(*II, *this, [](const IntrinsicInst &I) {
3601 return I.getIntrinsicID() == Intrinsic::lifetime_start;
3602 }))
3603 return nullptr;
3604 break;
3605 case Intrinsic::assume: {
3606 Value *IIOperand = II->getArgOperand(0);
3608 II->getOperandBundlesAsDefs(OpBundles);
3609
3610 /// This will remove the boolean Condition from the assume given as
3611 /// argument and remove the assume if it becomes useless.
3612 /// always returns nullptr for use as a return values.
3613 auto RemoveConditionFromAssume = [&](Instruction *Assume) -> Instruction * {
3614 assert(isa<AssumeInst>(Assume));
3616 return eraseInstFromFunction(CI);
3617 replaceUse(II->getOperandUse(0), ConstantInt::getTrue(II->getContext()));
3618 return nullptr;
3619 };
3620 // Remove an assume if it is followed by an identical assume.
3621 // TODO: Do we need this? Unless there are conflicting assumptions, the
3622 // computeKnownBits(IIOperand) below here eliminates redundant assumes.
3623 Instruction *Next = II->getNextNode();
3625 return RemoveConditionFromAssume(Next);
3626
3627 // Canonicalize assume(a && b) -> assume(a); assume(b);
3628 // Note: New assumption intrinsics created here are registered by
3629 // the InstCombineIRInserter object.
3630 FunctionType *AssumeIntrinsicTy = II->getFunctionType();
3631 Value *AssumeIntrinsic = II->getCalledOperand();
3632 Value *A, *B;
3633 if (match(IIOperand, m_LogicalAnd(m_Value(A), m_Value(B)))) {
3634 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, OpBundles,
3635 II->getName());
3636 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName());
3637 return eraseInstFromFunction(*II);
3638 }
3639 // assume(!(a || b)) -> assume(!a); assume(!b);
3640 if (match(IIOperand, m_Not(m_LogicalOr(m_Value(A), m_Value(B))))) {
3641 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
3642 Builder.CreateNot(A), OpBundles, II->getName());
3643 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
3644 Builder.CreateNot(B), II->getName());
3645 return eraseInstFromFunction(*II);
3646 }
3647
3648 for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) {
3649 OperandBundleUse OBU = II->getOperandBundleAt(Idx);
3650
3651 // Separate storage assumptions apply to the underlying allocations, not
3652 // any particular pointer within them. When evaluating the hints for AA
3653 // purposes we getUnderlyingObject them; by precomputing the answers here
3654 // we can avoid having to do so repeatedly there.
3655 if (OBU.getTagName() == "separate_storage") {
3656 assert(OBU.Inputs.size() == 2);
3657 auto MaybeSimplifyHint = [&](const Use &U) {
3658 Value *Hint = U.get();
3659 // Not having a limit is safe because InstCombine removes unreachable
3660 // code.
3661 Value *UnderlyingObject = getUnderlyingObject(Hint, /*MaxLookup*/ 0);
3662 if (Hint != UnderlyingObject)
3663 replaceUse(const_cast<Use &>(U), UnderlyingObject);
3664 };
3665 MaybeSimplifyHint(OBU.Inputs[0]);
3666 MaybeSimplifyHint(OBU.Inputs[1]);
3667 }
3668
3669 // Try to remove redundant alignment assumptions.
3670 if (OBU.getTagName() == "align" && OBU.Inputs.size() == 2) {
3672 *cast<AssumeInst>(II), II->arg_size() + Idx);
3673 if (!RK || RK.AttrKind != Attribute::Alignment ||
3675 continue;
3676
3677 // Remove align 1 bundles; they don't add any useful information.
3678 if (RK.ArgValue == 1)
3680
3681 // Don't try to remove align assumptions for pointers derived from
3682 // arguments. We might lose information if the function gets inline and
3683 // the align argument attribute disappears.
3685 if (!UO || isa<Argument>(UO))
3686 continue;
3687
3688 // Compute known bits for the pointer, passing nullptr as context to
3689 // avoid computeKnownBits using the assumption we are about to remove
3690 // for reasoning.
3691 KnownBits Known = computeKnownBits(RK.WasOn, /*CtxI=*/nullptr);
3692 unsigned TZ = std::min(Known.countMinTrailingZeros(),
3694 if ((1ULL << TZ) < RK.ArgValue)
3695 continue;
3697 }
3698
3699 if (OBU.getTagName() == "nonnull" && OBU.Inputs.size() == 1) {
3701 *cast<AssumeInst>(II), II->arg_size() + Idx);
3702 if (!RK || RK.AttrKind != Attribute::NonNull)
3703 continue;
3704
3705 // Drop assume if we can prove nonnull without it
3706 if (isKnownNonZero(RK.WasOn, getSimplifyQuery().getWithInstruction(II)))
3708
3709 // Fold the assume into metadata if it's valid at the load
3710 if (auto *LI = dyn_cast<LoadInst>(RK.WasOn);
3711 LI &&
3712 isValidAssumeForContext(II, LI, &DT, /*AllowEphemerals=*/true)) {
3713 MDNode *MD = MDNode::get(II->getContext(), {});
3714 LI->setMetadata(LLVMContext::MD_nonnull, MD);
3715 LI->setMetadata(LLVMContext::MD_noundef, MD);
3717 }
3718
3719 // TODO: apply nonnull return attributes to calls and invokes
3720 }
3721 }
3722
3723 // Convert nonnull assume like:
3724 // %A = icmp ne i32* %PTR, null
3725 // call void @llvm.assume(i1 %A)
3726 // into
3727 // call void @llvm.assume(i1 true) [ "nonnull"(i32* %PTR) ]
3728 if (match(IIOperand,
3730 A->getType()->isPointerTy()) {
3731 if (auto *Replacement = buildAssumeFromKnowledge(
3732 {RetainedKnowledge{Attribute::NonNull, 0, A}}, Next, &AC, &DT)) {
3733
3734 InsertNewInstBefore(Replacement, Next->getIterator());
3735 AC.registerAssumption(Replacement);
3736 return RemoveConditionFromAssume(II);
3737 }
3738 }
3739
3740 // Convert alignment assume like:
3741 // %B = ptrtoint i32* %A to i64
3742 // %C = and i64 %B, Constant
3743 // %D = icmp eq i64 %C, 0
3744 // call void @llvm.assume(i1 %D)
3745 // into
3746 // call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 Constant + 1)]
3747 uint64_t AlignMask = 1;
3748 if ((match(IIOperand, m_Not(m_Trunc(m_Value(A)))) ||
3749 match(IIOperand,
3751 m_And(m_Value(A), m_ConstantInt(AlignMask)),
3752 m_Zero())))) {
3753 if (isPowerOf2_64(AlignMask + 1)) {
3754 uint64_t Offset = 0;
3756 if (match(A, m_PtrToIntOrAddr(m_Value(A)))) {
3757 /// Note: this doesn't preserve the offset information but merges
3758 /// offset and alignment.
3759 /// TODO: we can generate a GEP instead of merging the alignment with
3760 /// the offset.
3761 RetainedKnowledge RK{Attribute::Alignment,
3762 MinAlign(Offset, AlignMask + 1), A};
3763 if (auto *Replacement =
3765
3766 Replacement->insertAfter(II->getIterator());
3767 AC.registerAssumption(Replacement);
3768 }
3769 return RemoveConditionFromAssume(II);
3770 }
3771 }
3772 }
3773
3774 /// Canonicalize Knowledge in operand bundles.
3775 if (EnableKnowledgeRetention && II->hasOperandBundles()) {
3776 for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) {
3777 auto &BOI = II->bundle_op_info_begin()[Idx];
3780 if (BOI.End - BOI.Begin > 2)
3781 continue; // Prevent reducing knowledge in an align with offset since
3782 // extracting a RetainedKnowledge from them looses offset
3783 // information
3784 RetainedKnowledge CanonRK =
3787 &getDominatorTree());
3788 if (CanonRK == RK)
3789 continue;
3790 if (!CanonRK) {
3791 if (BOI.End - BOI.Begin > 0) {
3792 Worklist.pushValue(II->op_begin()[BOI.Begin]);
3793 Value::dropDroppableUse(II->op_begin()[BOI.Begin]);
3794 }
3795 continue;
3796 }
3797 assert(RK.AttrKind == CanonRK.AttrKind);
3798 if (BOI.End - BOI.Begin > 0)
3799 II->op_begin()[BOI.Begin].set(CanonRK.WasOn);
3800 if (BOI.End - BOI.Begin > 1)
3801 II->op_begin()[BOI.Begin + 1].set(ConstantInt::get(
3802 Type::getInt64Ty(II->getContext()), CanonRK.ArgValue));
3803 if (RK.WasOn)
3804 Worklist.pushValue(RK.WasOn);
3805 return II;
3806 }
3807 }
3808
3809 // If there is a dominating assume with the same condition as this one,
3810 // then this one is redundant, and should be removed.
3811 KnownBits Known(1);
3812 computeKnownBits(IIOperand, Known, II);
3814 return eraseInstFromFunction(*II);
3815
3816 // assume(false) is unreachable.
3817 if (match(IIOperand, m_CombineOr(m_Zero(), m_Undef()))) {
3819 return eraseInstFromFunction(*II);
3820 }
3821
3822 // Update the cache of affected values for this assumption (we might be
3823 // here because we just simplified the condition).
3824 AC.updateAffectedValues(cast<AssumeInst>(II));
3825 break;
3826 }
3827 case Intrinsic::experimental_guard: {
3828 // Is this guard followed by another guard? We scan forward over a small
3829 // fixed window of instructions to handle common cases with conditions
3830 // computed between guards.
3831 Instruction *NextInst = II->getNextNode();
3832 for (unsigned i = 0; i < GuardWideningWindow; i++) {
3833 // Note: Using context-free form to avoid compile time blow up
3834 if (!isSafeToSpeculativelyExecute(NextInst))
3835 break;
3836 NextInst = NextInst->getNextNode();
3837 }
3838 Value *NextCond = nullptr;
3839 if (match(NextInst,
3841 Value *CurrCond = II->getArgOperand(0);
3842
3843 // Remove a guard that it is immediately preceded by an identical guard.
3844 // Otherwise canonicalize guard(a); guard(b) -> guard(a & b).
3845 if (CurrCond != NextCond) {
3846 Instruction *MoveI = II->getNextNode();
3847 while (MoveI != NextInst) {
3848 auto *Temp = MoveI;
3849 MoveI = MoveI->getNextNode();
3850 Temp->moveBefore(II->getIterator());
3851 }
3852 replaceOperand(*II, 0, Builder.CreateAnd(CurrCond, NextCond));
3853 }
3854 eraseInstFromFunction(*NextInst);
3855 return II;
3856 }
3857 break;
3858 }
3859 case Intrinsic::vector_insert: {
3860 Value *Vec = II->getArgOperand(0);
3861 Value *SubVec = II->getArgOperand(1);
3862 Value *Idx = II->getArgOperand(2);
3863 auto *DstTy = dyn_cast<FixedVectorType>(II->getType());
3864 auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType());
3865 auto *SubVecTy = dyn_cast<FixedVectorType>(SubVec->getType());
3866
3867 // Only canonicalize if the destination vector, Vec, and SubVec are all
3868 // fixed vectors.
3869 if (DstTy && VecTy && SubVecTy) {
3870 unsigned DstNumElts = DstTy->getNumElements();
3871 unsigned VecNumElts = VecTy->getNumElements();
3872 unsigned SubVecNumElts = SubVecTy->getNumElements();
3873 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
3874
3875 // An insert that entirely overwrites Vec with SubVec is a nop.
3876 if (VecNumElts == SubVecNumElts)
3877 return replaceInstUsesWith(CI, SubVec);
3878
3879 // Widen SubVec into a vector of the same width as Vec, since
3880 // shufflevector requires the two input vectors to be the same width.
3881 // Elements beyond the bounds of SubVec within the widened vector are
3882 // undefined.
3883 SmallVector<int, 8> WidenMask;
3884 unsigned i;
3885 for (i = 0; i != SubVecNumElts; ++i)
3886 WidenMask.push_back(i);
3887 for (; i != VecNumElts; ++i)
3888 WidenMask.push_back(PoisonMaskElem);
3889
3890 Value *WidenShuffle = Builder.CreateShuffleVector(SubVec, WidenMask);
3891
3893 for (unsigned i = 0; i != IdxN; ++i)
3894 Mask.push_back(i);
3895 for (unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i)
3896 Mask.push_back(i);
3897 for (unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i)
3898 Mask.push_back(i);
3899
3900 Value *Shuffle = Builder.CreateShuffleVector(Vec, WidenShuffle, Mask);
3901 return replaceInstUsesWith(CI, Shuffle);
3902 }
3903 break;
3904 }
3905 case Intrinsic::vector_extract: {
3906 Value *Vec = II->getArgOperand(0);
3907 Value *Idx = II->getArgOperand(1);
3908
3909 Type *ReturnType = II->getType();
3910 // (extract_vector (insert_vector InsertTuple, InsertValue, InsertIdx),
3911 // ExtractIdx)
3912 unsigned ExtractIdx = cast<ConstantInt>(Idx)->getZExtValue();
3913 Value *InsertTuple, *InsertIdx, *InsertValue;
3915 m_Value(InsertValue),
3916 m_Value(InsertIdx))) &&
3917 InsertValue->getType() == ReturnType) {
3918 unsigned Index = cast<ConstantInt>(InsertIdx)->getZExtValue();
3919 // Case where we get the same index right after setting it.
3920 // extract.vector(insert.vector(InsertTuple, InsertValue, Idx), Idx) -->
3921 // InsertValue
3922 if (ExtractIdx == Index)
3923 return replaceInstUsesWith(CI, InsertValue);
3924 // If we are getting a different index than what was set in the
3925 // insert.vector intrinsic. We can just set the input tuple to the one up
3926 // in the chain. extract.vector(insert.vector(InsertTuple, InsertValue,
3927 // InsertIndex), ExtractIndex)
3928 // --> extract.vector(InsertTuple, ExtractIndex)
3929 else
3930 return replaceOperand(CI, 0, InsertTuple);
3931 }
3932
3933 ConstantInt *ALMUpperBound;
3935 m_Value(), m_ConstantInt(ALMUpperBound)))) {
3936 const auto &Attrs = II->getFunction()->getAttributes().getFnAttrs();
3937 unsigned VScaleMin = Attrs.getVScaleRangeMin();
3938 unsigned ScaleFactor =
3939 cast<VectorType>(ReturnType)->isScalableTy() ? VScaleMin : 1;
3940 if (ExtractIdx * ScaleFactor >= ALMUpperBound->getZExtValue())
3941 return replaceInstUsesWith(CI,
3942 ConstantVector::getNullValue(ReturnType));
3943 }
3944
3945 auto *DstTy = dyn_cast<VectorType>(ReturnType);
3946 auto *VecTy = dyn_cast<VectorType>(Vec->getType());
3947
3948 if (DstTy && VecTy) {
3949 auto DstEltCnt = DstTy->getElementCount();
3950 auto VecEltCnt = VecTy->getElementCount();
3951 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
3952
3953 // Extracting the entirety of Vec is a nop.
3954 if (DstEltCnt == VecTy->getElementCount()) {
3955 replaceInstUsesWith(CI, Vec);
3956 return eraseInstFromFunction(CI);
3957 }
3958
3959 // Only canonicalize to shufflevector if the destination vector and
3960 // Vec are fixed vectors.
3961 if (VecEltCnt.isScalable() || DstEltCnt.isScalable())
3962 break;
3963
3965 for (unsigned i = 0; i != DstEltCnt.getKnownMinValue(); ++i)
3966 Mask.push_back(IdxN + i);
3967
3968 Value *Shuffle = Builder.CreateShuffleVector(Vec, Mask);
3969 return replaceInstUsesWith(CI, Shuffle);
3970 }
3971 break;
3972 }
3973 case Intrinsic::vp_load: {
3974 auto *VPI = cast<VPIntrinsic>(II);
3975 // Fold away bit casts of the loaded value by loading the desired type,
3976 // if the mask is all-ones.
3977 Value *Mask = VPI->getMaskParam();
3978 Value *EVL = VPI->getVectorLengthParam();
3979 if (!isa<Constant>(Mask) || !cast<Constant>(Mask)->isAllOnesValue() ||
3980 !II->hasOneUse())
3981 break;
3982
3983 const DataLayout &DL = II->getDataLayout();
3984 auto *Cast = dyn_cast<CastInst>(II->user_back());
3985 if (!Cast || !Cast->isNoopCast(DL) || !isa<VectorType>(Cast->getDestTy()))
3986 break;
3987 VectorType *OrigVecTy = cast<VectorType>(II->getType());
3988 Align OrigAlign =
3989 DL.getValueOrABITypeAlignment(VPI->getPointerAlignment(), OrigVecTy);
3990 ElementCount OrigVecCnt = OrigVecTy->getElementCount();
3991 VectorType *NewVecTy = cast<VectorType>(Cast->getDestTy());
3992 ElementCount NewVecCnt = NewVecTy->getElementCount();
3993
3994 // Right now we only support cases where the NewVec is longer, because for
3995 // cases where it's shorter, we have to be sure that EVL can be exactly
3996 // divided, otherwise it might yield incorrect results or even page faults
3997 // (if we round-up during the division).
3998 if (OrigVecCnt.isScalable() == NewVecCnt.isScalable() &&
3999 NewVecCnt.hasKnownScalarFactor(OrigVecCnt)) {
4000 unsigned Factor = NewVecCnt.getKnownScalarFactor(OrigVecCnt);
4001 Value *NewEVL = Builder.CreateNUWMul(EVL, Builder.getInt32(Factor));
4002 Value *NewMask = Builder.CreateVectorSplat(NewVecCnt, Builder.getTrue());
4003 CallInst *NewVP = Builder.CreateIntrinsic(
4004 NewVecTy, Intrinsic::vp_load,
4005 {VPI->getMemoryPointerParam(), NewMask, NewEVL});
4006 // Preserve the original alignment.
4007 NewVP->addParamAttrs(
4008 0, AttrBuilder(VPI->getContext()).addAlignmentAttr(OrigAlign));
4009 replaceInstUsesWith(*Cast, NewVP);
4010 return eraseInstFromFunction(*Cast);
4011 }
4012 break;
4013 }
4014 case Intrinsic::experimental_vp_reverse: {
4015 Value *X;
4016 Value *Vec = II->getArgOperand(0);
4017 Value *Mask = II->getArgOperand(1);
4018 if (!match(Mask, m_AllOnes()))
4019 break;
4020 Value *EVL = II->getArgOperand(2);
4021 // TODO: Canonicalize experimental.vp.reverse after unop/binops?
4022 // rev(unop rev(X)) --> unop X
4023 if (match(Vec,
4025 m_Value(X), m_AllOnes(), m_Specific(EVL)))))) {
4026 auto *OldUnOp = cast<UnaryOperator>(Vec);
4028 OldUnOp->getOpcode(), X, OldUnOp, OldUnOp->getName(),
4029 II->getIterator());
4030 return replaceInstUsesWith(CI, NewUnOp);
4031 }
4032 break;
4033 }
4034 case Intrinsic::vector_reduce_or:
4035 case Intrinsic::vector_reduce_and: {
4036 // Canonicalize logical or/and reductions:
4037 // Or reduction for i1 is represented as:
4038 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
4039 // %res = cmp ne iReduxWidth %val, 0
4040 // And reduction for i1 is represented as:
4041 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
4042 // %res = cmp eq iReduxWidth %val, 11111
4043 Value *Arg = II->getArgOperand(0);
4044 Value *Vect;
4045
4046 if (Value *NewOp =
4047 simplifyReductionOperand(Arg, /*CanReorderLanes=*/true)) {
4048 replaceUse(II->getOperandUse(0), NewOp);
4049 return II;
4050 }
4051
4052 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
4053 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType()))
4054 if (FTy->getElementType() == Builder.getInt1Ty()) {
4055 Value *Res = Builder.CreateBitCast(
4056 Vect, Builder.getIntNTy(FTy->getNumElements()));
4057 if (IID == Intrinsic::vector_reduce_and) {
4058 Res = Builder.CreateICmpEQ(
4060 } else {
4061 assert(IID == Intrinsic::vector_reduce_or &&
4062 "Expected or reduction.");
4063 Res = Builder.CreateIsNotNull(Res);
4064 }
4065 if (Arg != Vect)
4066 Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res,
4067 II->getType());
4068 return replaceInstUsesWith(CI, Res);
4069 }
4070 }
4071 [[fallthrough]];
4072 }
4073 case Intrinsic::vector_reduce_add: {
4074 if (IID == Intrinsic::vector_reduce_add) {
4075 // Convert vector_reduce_add(ZExt(<n x i1>)) to
4076 // ZExtOrTrunc(ctpop(bitcast <n x i1> to in)).
4077 // Convert vector_reduce_add(SExt(<n x i1>)) to
4078 // -ZExtOrTrunc(ctpop(bitcast <n x i1> to in)).
4079 // Convert vector_reduce_add(<n x i1>) to
4080 // Trunc(ctpop(bitcast <n x i1> to in)).
4081 Value *Arg = II->getArgOperand(0);
4082 Value *Vect;
4083
4084 if (Value *NewOp =
4085 simplifyReductionOperand(Arg, /*CanReorderLanes=*/true)) {
4086 replaceUse(II->getOperandUse(0), NewOp);
4087 return II;
4088 }
4089
4090 // vector.reduce.add.vNiM(splat(%x)) -> mul(%x, N)
4091 if (Value *Splat = getSplatValue(Arg)) {
4092 ElementCount VecToReduceCount =
4093 cast<VectorType>(Arg->getType())->getElementCount();
4094 if (VecToReduceCount.isFixed()) {
4095 unsigned VectorSize = VecToReduceCount.getFixedValue();
4096 return BinaryOperator::CreateMul(
4097 Splat,
4098 ConstantInt::get(Splat->getType(), VectorSize, /*IsSigned=*/false,
4099 /*ImplicitTrunc=*/true));
4100 }
4101 }
4102
4103 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
4104 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType()))
4105 if (FTy->getElementType() == Builder.getInt1Ty()) {
4106 Value *V = Builder.CreateBitCast(
4107 Vect, Builder.getIntNTy(FTy->getNumElements()));
4108 Value *Res = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, V);
4109 Res = Builder.CreateZExtOrTrunc(Res, II->getType());
4110 if (Arg != Vect &&
4111 cast<Instruction>(Arg)->getOpcode() == Instruction::SExt)
4112 Res = Builder.CreateNeg(Res);
4113 return replaceInstUsesWith(CI, Res);
4114 }
4115 }
4116 }
4117 [[fallthrough]];
4118 }
4119 case Intrinsic::vector_reduce_xor: {
4120 if (IID == Intrinsic::vector_reduce_xor) {
4121 // Exclusive disjunction reduction over the vector with
4122 // (potentially-extended) i1 element type is actually a
4123 // (potentially-extended) arithmetic `add` reduction over the original
4124 // non-extended value:
4125 // vector_reduce_xor(?ext(<n x i1>))
4126 // -->
4127 // ?ext(vector_reduce_add(<n x i1>))
4128 Value *Arg = II->getArgOperand(0);
4129 Value *Vect;
4130
4131 if (Value *NewOp =
4132 simplifyReductionOperand(Arg, /*CanReorderLanes=*/true)) {
4133 replaceUse(II->getOperandUse(0), NewOp);
4134 return II;
4135 }
4136
4137 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
4138 if (auto *VTy = dyn_cast<VectorType>(Vect->getType()))
4139 if (VTy->getElementType() == Builder.getInt1Ty()) {
4140 Value *Res = Builder.CreateAddReduce(Vect);
4141 if (Arg != Vect)
4142 Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res,
4143 II->getType());
4144 return replaceInstUsesWith(CI, Res);
4145 }
4146 }
4147 }
4148 [[fallthrough]];
4149 }
4150 case Intrinsic::vector_reduce_mul: {
4151 if (IID == Intrinsic::vector_reduce_mul) {
4152 // Multiplicative reduction over the vector with (potentially-extended)
4153 // i1 element type is actually a (potentially zero-extended)
4154 // logical `and` reduction over the original non-extended value:
4155 // vector_reduce_mul(?ext(<n x i1>))
4156 // -->
4157 // zext(vector_reduce_and(<n x i1>))
4158 Value *Arg = II->getArgOperand(0);
4159 Value *Vect;
4160
4161 if (Value *NewOp =
4162 simplifyReductionOperand(Arg, /*CanReorderLanes=*/true)) {
4163 replaceUse(II->getOperandUse(0), NewOp);
4164 return II;
4165 }
4166
4167 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
4168 if (auto *VTy = dyn_cast<VectorType>(Vect->getType()))
4169 if (VTy->getElementType() == Builder.getInt1Ty()) {
4170 Value *Res = Builder.CreateAndReduce(Vect);
4171 Res = Builder.CreateZExt(Res, II->getType());
4172 return replaceInstUsesWith(CI, Res);
4173 }
4174 }
4175 }
4176 [[fallthrough]];
4177 }
4178 case Intrinsic::vector_reduce_umin:
4179 case Intrinsic::vector_reduce_umax: {
4180 if (IID == Intrinsic::vector_reduce_umin ||
4181 IID == Intrinsic::vector_reduce_umax) {
4182 // UMin/UMax reduction over the vector with (potentially-extended)
4183 // i1 element type is actually a (potentially-extended)
4184 // logical `and`/`or` reduction over the original non-extended value:
4185 // vector_reduce_u{min,max}(?ext(<n x i1>))
4186 // -->
4187 // ?ext(vector_reduce_{and,or}(<n x i1>))
4188 Value *Arg = II->getArgOperand(0);
4189 Value *Vect;
4190
4191 if (Value *NewOp =
4192 simplifyReductionOperand(Arg, /*CanReorderLanes=*/true)) {
4193 replaceUse(II->getOperandUse(0), NewOp);
4194 return II;
4195 }
4196
4197 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
4198 if (auto *VTy = dyn_cast<VectorType>(Vect->getType()))
4199 if (VTy->getElementType() == Builder.getInt1Ty()) {
4200 Value *Res = IID == Intrinsic::vector_reduce_umin
4201 ? Builder.CreateAndReduce(Vect)
4202 : Builder.CreateOrReduce(Vect);
4203 if (Arg != Vect)
4204 Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res,
4205 II->getType());
4206 return replaceInstUsesWith(CI, Res);
4207 }
4208 }
4209 }
4210 [[fallthrough]];
4211 }
4212 case Intrinsic::vector_reduce_smin:
4213 case Intrinsic::vector_reduce_smax: {
4214 if (IID == Intrinsic::vector_reduce_smin ||
4215 IID == Intrinsic::vector_reduce_smax) {
4216 // SMin/SMax reduction over the vector with (potentially-extended)
4217 // i1 element type is actually a (potentially-extended)
4218 // logical `and`/`or` reduction over the original non-extended value:
4219 // vector_reduce_s{min,max}(<n x i1>)
4220 // -->
4221 // vector_reduce_{or,and}(<n x i1>)
4222 // and
4223 // vector_reduce_s{min,max}(sext(<n x i1>))
4224 // -->
4225 // sext(vector_reduce_{or,and}(<n x i1>))
4226 // and
4227 // vector_reduce_s{min,max}(zext(<n x i1>))
4228 // -->
4229 // zext(vector_reduce_{and,or}(<n x i1>))
4230 Value *Arg = II->getArgOperand(0);
4231 Value *Vect;
4232
4233 if (Value *NewOp =
4234 simplifyReductionOperand(Arg, /*CanReorderLanes=*/true)) {
4235 replaceUse(II->getOperandUse(0), NewOp);
4236 return II;
4237 }
4238
4239 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
4240 if (auto *VTy = dyn_cast<VectorType>(Vect->getType()))
4241 if (VTy->getElementType() == Builder.getInt1Ty()) {
4242 Instruction::CastOps ExtOpc = Instruction::CastOps::CastOpsEnd;
4243 if (Arg != Vect)
4244 ExtOpc = cast<CastInst>(Arg)->getOpcode();
4245 Value *Res = ((IID == Intrinsic::vector_reduce_smin) ==
4246 (ExtOpc == Instruction::CastOps::ZExt))
4247 ? Builder.CreateAndReduce(Vect)
4248 : Builder.CreateOrReduce(Vect);
4249 if (Arg != Vect)
4250 Res = Builder.CreateCast(ExtOpc, Res, II->getType());
4251 return replaceInstUsesWith(CI, Res);
4252 }
4253 }
4254 }
4255 [[fallthrough]];
4256 }
4257 case Intrinsic::vector_reduce_fmax:
4258 case Intrinsic::vector_reduce_fmin:
4259 case Intrinsic::vector_reduce_fadd:
4260 case Intrinsic::vector_reduce_fmul: {
4261 bool CanReorderLanes = (IID != Intrinsic::vector_reduce_fadd &&
4262 IID != Intrinsic::vector_reduce_fmul) ||
4263 II->hasAllowReassoc();
4264 const unsigned ArgIdx = (IID == Intrinsic::vector_reduce_fadd ||
4265 IID == Intrinsic::vector_reduce_fmul)
4266 ? 1
4267 : 0;
4268 Value *Arg = II->getArgOperand(ArgIdx);
4269 if (Value *NewOp = simplifyReductionOperand(Arg, CanReorderLanes)) {
4270 replaceUse(II->getOperandUse(ArgIdx), NewOp);
4271 return nullptr;
4272 }
4273 break;
4274 }
4275 case Intrinsic::is_fpclass: {
4276 if (Instruction *I = foldIntrinsicIsFPClass(*II))
4277 return I;
4278 break;
4279 }
4280 case Intrinsic::threadlocal_address: {
4281 Align MinAlign = getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT);
4282 MaybeAlign Align = II->getRetAlign();
4283 if (MinAlign > Align.valueOrOne()) {
4284 II->addRetAttr(Attribute::getWithAlignment(II->getContext(), MinAlign));
4285 return II;
4286 }
4287 break;
4288 }
4289 case Intrinsic::fptoui_sat:
4290 case Intrinsic::fptosi_sat:
4291 if (Instruction *I = foldItoFPtoI(*II))
4292 return I;
4293 break;
4294 case Intrinsic::frexp: {
4295 Value *X;
4296 // The first result is idempotent with the added complication of the struct
4297 // return, and the second result is zero because the value is already
4298 // normalized.
4299 if (match(II->getArgOperand(0), m_ExtractValue<0>(m_Value(X)))) {
4301 X = Builder.CreateInsertValue(
4302 X, Constant::getNullValue(II->getType()->getStructElementType(1)),
4303 1);
4304 return replaceInstUsesWith(*II, X);
4305 }
4306 }
4307 break;
4308 }
4309 case Intrinsic::get_active_lane_mask: {
4310 const APInt *Op0, *Op1;
4311 if (match(II->getOperand(0), m_StrictlyPositive(Op0)) &&
4312 match(II->getOperand(1), m_APInt(Op1))) {
4313 Type *OpTy = II->getOperand(0)->getType();
4314 return replaceInstUsesWith(
4315 *II, Builder.CreateIntrinsic(
4316 II->getType(), Intrinsic::get_active_lane_mask,
4317 {Constant::getNullValue(OpTy),
4318 ConstantInt::get(OpTy, Op1->usub_sat(*Op0))}));
4319 }
4320 break;
4321 }
4322 case Intrinsic::experimental_get_vector_length: {
4323 // get.vector.length(Cnt, MaxLanes) --> Cnt when Cnt <= MaxLanes
4324 unsigned BitWidth =
4325 std::max(II->getArgOperand(0)->getType()->getScalarSizeInBits(),
4326 II->getType()->getScalarSizeInBits());
4327 ConstantRange Cnt =
4328 computeConstantRangeIncludingKnownBits(II->getArgOperand(0), false,
4329 SQ.getWithInstruction(II))
4331 ConstantRange MaxLanes = cast<ConstantInt>(II->getArgOperand(1))
4332 ->getValue()
4333 .zextOrTrunc(Cnt.getBitWidth());
4334 if (cast<ConstantInt>(II->getArgOperand(2))->isOne())
4335 MaxLanes = MaxLanes.multiply(
4336 getVScaleRange(II->getFunction(), Cnt.getBitWidth()));
4337
4338 if (Cnt.icmp(CmpInst::ICMP_ULE, MaxLanes))
4339 return replaceInstUsesWith(
4340 *II, Builder.CreateZExtOrTrunc(II->getArgOperand(0), II->getType()));
4341 return nullptr;
4342 }
4343 default: {
4344 // Handle target specific intrinsics
4345 std::optional<Instruction *> V = targetInstCombineIntrinsic(*II);
4346 if (V)
4347 return *V;
4348 break;
4349 }
4350 }
4351
4352 // Try to fold intrinsic into select/phi operands. This is legal if:
4353 // * The intrinsic is speculatable.
4354 // * The operand is one of the following:
4355 // - a phi.
4356 // - a select with a scalar condition.
4357 // - a select with a vector condition and II is not a cross lane operation.
4359 for (Value *Op : II->args()) {
4360 if (auto *Sel = dyn_cast<SelectInst>(Op)) {
4361 bool IsVectorCond = Sel->getCondition()->getType()->isVectorTy();
4362 if (IsVectorCond &&
4363 (!isNotCrossLaneOperation(II) || !II->getType()->isVectorTy()))
4364 continue;
4365 // Don't replace a scalar select with a more expensive vector select if
4366 // we can't simplify both arms of the select.
4367 bool SimplifyBothArms =
4368 !Op->getType()->isVectorTy() && II->getType()->isVectorTy();
4370 *II, Sel, /*FoldWithMultiUse=*/false, SimplifyBothArms))
4371 return R;
4372 }
4373 if (auto *Phi = dyn_cast<PHINode>(Op))
4374 if (Instruction *R = foldOpIntoPhi(*II, Phi))
4375 return R;
4376 }
4377 }
4378
4380 return Shuf;
4381
4383 return replaceInstUsesWith(*II, Reverse);
4384
4386 return replaceInstUsesWith(*II, Res);
4387
4388 // Some intrinsics (like experimental_gc_statepoint) can be used in invoke
4389 // context, so it is handled in visitCallBase and we should trigger it.
4390 return visitCallBase(*II);
4391}
4392
4393// Fence instruction simplification
4395 auto *NFI = dyn_cast<FenceInst>(FI.getNextNode());
4396 // This check is solely here to handle arbitrary target-dependent syncscopes.
4397 // TODO: Can remove if does not matter in practice.
4398 if (NFI && FI.isIdenticalTo(NFI))
4399 return eraseInstFromFunction(FI);
4400
4401 // Returns true if FI1 is identical or stronger fence than FI2.
4402 auto isIdenticalOrStrongerFence = [](FenceInst *FI1, FenceInst *FI2) {
4403 auto FI1SyncScope = FI1->getSyncScopeID();
4404 // Consider same scope, where scope is global or single-thread.
4405 if (FI1SyncScope != FI2->getSyncScopeID() ||
4406 (FI1SyncScope != SyncScope::System &&
4407 FI1SyncScope != SyncScope::SingleThread))
4408 return false;
4409
4410 return isAtLeastOrStrongerThan(FI1->getOrdering(), FI2->getOrdering());
4411 };
4412 if (NFI && isIdenticalOrStrongerFence(NFI, &FI))
4413 return eraseInstFromFunction(FI);
4414
4415 if (auto *PFI = dyn_cast_or_null<FenceInst>(FI.getPrevNode()))
4416 if (isIdenticalOrStrongerFence(PFI, &FI))
4417 return eraseInstFromFunction(FI);
4418 return nullptr;
4419}
4420
4421// InvokeInst simplification
4423 return visitCallBase(II);
4424}
4425
4426// CallBrInst simplification
4428 return visitCallBase(CBI);
4429}
4430
4432 if (!CI->hasFnAttr("modular-format"))
4433 return nullptr;
4434
4436 llvm::split(CI->getFnAttr("modular-format").getValueAsString(), ','));
4437 // TODO: Make use of the first two arguments
4438 unsigned FirstArgIdx;
4439 [[maybe_unused]] bool Error;
4440 Error = Args[2].getAsInteger(10, FirstArgIdx);
4441 assert(!Error && "invalid first arg index");
4442 --FirstArgIdx;
4443 StringRef FnName = Args[3];
4444 StringRef ImplName = Args[4];
4446
4447 if (AllAspects.empty())
4448 return nullptr;
4449
4450 SmallVector<StringRef> NeededAspects;
4451 for (StringRef Aspect : AllAspects) {
4452 if (Aspect == "float") {
4453 if (llvm::any_of(
4454 llvm::make_range(std::next(CI->arg_begin(), FirstArgIdx),
4455 CI->arg_end()),
4456 [](Value *V) { return V->getType()->isFloatingPointTy(); }))
4457 NeededAspects.push_back("float");
4458 } else {
4459 // Unknown aspects are always considered to be needed.
4460 NeededAspects.push_back(Aspect);
4461 }
4462 }
4463
4464 if (NeededAspects.size() == AllAspects.size())
4465 return nullptr;
4466
4467 Module *M = CI->getModule();
4468 LLVMContext &Ctx = M->getContext();
4469 Function *Callee = CI->getCalledFunction();
4470 FunctionCallee ModularFn = M->getOrInsertFunction(
4471 FnName, Callee->getFunctionType(),
4472 Callee->getAttributes().removeFnAttribute(Ctx, "modular-format"));
4473 CallInst *New = cast<CallInst>(CI->clone());
4474 New->setCalledFunction(ModularFn);
4475 New->removeFnAttr("modular-format");
4476 B.Insert(New);
4477
4478 const auto ReferenceAspect = [&](StringRef Aspect) {
4479 SmallString<20> Name = ImplName;
4480 Name += '_';
4481 Name += Aspect;
4482 Function *RelocNoneFn =
4483 Intrinsic::getOrInsertDeclaration(M, Intrinsic::reloc_none);
4484 B.CreateCall(RelocNoneFn,
4485 {MetadataAsValue::get(Ctx, MDString::get(Ctx, Name))});
4486 };
4487
4488 llvm::sort(NeededAspects);
4489 for (StringRef Request : NeededAspects)
4490 ReferenceAspect(Request);
4491
4492 return New;
4493}
4494
4495Instruction *InstCombinerImpl::tryOptimizeCall(CallInst *CI) {
4496 if (!CI->getCalledFunction()) return nullptr;
4497
4498 // Skip optimizing notail and musttail calls so
4499 // LibCallSimplifier::optimizeCall doesn't have to preserve those invariants.
4500 // LibCallSimplifier::optimizeCall should try to preserve tail calls though.
4501 if (CI->isMustTailCall() || CI->isNoTailCall())
4502 return nullptr;
4503
4504 auto InstCombineRAUW = [this](Instruction *From, Value *With) {
4505 replaceInstUsesWith(*From, With);
4506 };
4507 auto InstCombineErase = [this](Instruction *I) {
4509 };
4510 LibCallSimplifier Simplifier(DL, &TLI, &DT, &DC, &AC, ORE, BFI, PSI,
4511 InstCombineRAUW, InstCombineErase);
4512 if (Value *With = Simplifier.optimizeCall(CI, Builder)) {
4513 ++NumSimplified;
4514 return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With);
4515 }
4516 if (Value *With = optimizeModularFormat(CI, Builder)) {
4517 ++NumSimplified;
4518 return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With);
4519 }
4520
4521 return nullptr;
4522}
4523
4525 // Strip off at most one level of pointer casts, looking for an alloca. This
4526 // is good enough in practice and simpler than handling any number of casts.
4527 Value *Underlying = TrampMem->stripPointerCasts();
4528 if (Underlying != TrampMem &&
4529 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
4530 return nullptr;
4531 if (!isa<AllocaInst>(Underlying))
4532 return nullptr;
4533
4534 IntrinsicInst *InitTrampoline = nullptr;
4535 for (User *U : TrampMem->users()) {
4537 if (!II)
4538 return nullptr;
4539 if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
4540 if (InitTrampoline)
4541 // More than one init_trampoline writes to this value. Give up.
4542 return nullptr;
4543 InitTrampoline = II;
4544 continue;
4545 }
4546 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
4547 // Allow any number of calls to adjust.trampoline.
4548 continue;
4549 return nullptr;
4550 }
4551
4552 // No call to init.trampoline found.
4553 if (!InitTrampoline)
4554 return nullptr;
4555
4556 // Check that the alloca is being used in the expected way.
4557 if (InitTrampoline->getOperand(0) != TrampMem)
4558 return nullptr;
4559
4560 return InitTrampoline;
4561}
4562
4564 Value *TrampMem) {
4565 // Visit all the previous instructions in the basic block, and try to find a
4566 // init.trampoline which has a direct path to the adjust.trampoline.
4567 for (BasicBlock::iterator I = AdjustTramp->getIterator(),
4568 E = AdjustTramp->getParent()->begin();
4569 I != E;) {
4570 Instruction *Inst = &*--I;
4572 if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
4573 II->getOperand(0) == TrampMem)
4574 return II;
4575 if (Inst->mayWriteToMemory())
4576 return nullptr;
4577 }
4578 return nullptr;
4579}
4580
4581// Given a call to llvm.adjust.trampoline, find and return the corresponding
4582// call to llvm.init.trampoline if the call to the trampoline can be optimized
4583// to a direct call to a function. Otherwise return NULL.
4585 Callee = Callee->stripPointerCasts();
4586 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
4587 if (!AdjustTramp ||
4588 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
4589 return nullptr;
4590
4591 Value *TrampMem = AdjustTramp->getOperand(0);
4592
4594 return IT;
4595 if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem))
4596 return IT;
4597 return nullptr;
4598}
4599
4600Instruction *InstCombinerImpl::foldPtrAuthIntrinsicCallee(CallBase &Call) {
4601 const Value *Callee = Call.getCalledOperand();
4602 const auto *IPC = dyn_cast<IntToPtrInst>(Callee);
4603 if (!IPC || !IPC->isNoopCast(DL))
4604 return nullptr;
4605
4606 const auto *II = dyn_cast<IntrinsicInst>(IPC->getOperand(0));
4607 if (!II)
4608 return nullptr;
4609
4610 Intrinsic::ID IIID = II->getIntrinsicID();
4611 if (IIID != Intrinsic::ptrauth_resign && IIID != Intrinsic::ptrauth_sign)
4612 return nullptr;
4613
4614 // Isolate the ptrauth bundle from the others.
4615 std::optional<OperandBundleUse> PtrAuthBundleOrNone;
4617 for (unsigned BI = 0, BE = Call.getNumOperandBundles(); BI != BE; ++BI) {
4618 OperandBundleUse Bundle = Call.getOperandBundleAt(BI);
4619 if (Bundle.getTagID() == LLVMContext::OB_ptrauth)
4620 PtrAuthBundleOrNone = Bundle;
4621 else
4622 NewBundles.emplace_back(Bundle);
4623 }
4624
4625 if (!PtrAuthBundleOrNone)
4626 return nullptr;
4627
4628 Value *NewCallee = nullptr;
4629 switch (IIID) {
4630 // call(ptrauth.resign(p)), ["ptrauth"()] -> call p, ["ptrauth"()]
4631 // assuming the call bundle and the sign operands match.
4632 case Intrinsic::ptrauth_resign: {
4633 // Resign result key should match bundle.
4634 if (II->getOperand(3) != PtrAuthBundleOrNone->Inputs[0])
4635 return nullptr;
4636 // Resign result discriminator should match bundle.
4637 if (II->getOperand(4) != PtrAuthBundleOrNone->Inputs[1])
4638 return nullptr;
4639
4640 // Resign input (auth) key should also match: we can't change the key on
4641 // the new call we're generating, because we don't know what keys are valid.
4642 if (II->getOperand(1) != PtrAuthBundleOrNone->Inputs[0])
4643 return nullptr;
4644
4645 Value *NewBundleOps[] = {II->getOperand(1), II->getOperand(2)};
4646 NewBundles.emplace_back("ptrauth", NewBundleOps);
4647 NewCallee = II->getOperand(0);
4648 break;
4649 }
4650
4651 // call(ptrauth.sign(p)), ["ptrauth"()] -> call p
4652 // assuming the call bundle and the sign operands match.
4653 // Non-ptrauth indirect calls are undesirable, but so is ptrauth.sign.
4654 case Intrinsic::ptrauth_sign: {
4655 // Sign key should match bundle.
4656 if (II->getOperand(1) != PtrAuthBundleOrNone->Inputs[0])
4657 return nullptr;
4658 // Sign discriminator should match bundle.
4659 if (II->getOperand(2) != PtrAuthBundleOrNone->Inputs[1])
4660 return nullptr;
4661 NewCallee = II->getOperand(0);
4662 break;
4663 }
4664 default:
4665 llvm_unreachable("unexpected intrinsic ID");
4666 }
4667
4668 if (!NewCallee)
4669 return nullptr;
4670
4671 NewCallee = Builder.CreateBitOrPointerCast(NewCallee, Callee->getType());
4672 CallBase *NewCall = CallBase::Create(&Call, NewBundles);
4673 NewCall->setCalledOperand(NewCallee);
4674 return NewCall;
4675}
4676
4677Instruction *InstCombinerImpl::foldPtrAuthConstantCallee(CallBase &Call) {
4679 if (!CPA)
4680 return nullptr;
4681
4682 auto *CalleeF = dyn_cast<Function>(CPA->getPointer());
4683 // If the ptrauth constant isn't based on a function pointer, bail out.
4684 if (!CalleeF)
4685 return nullptr;
4686
4687 // Inspect the call ptrauth bundle to check it matches the ptrauth constant.
4689 if (!PAB)
4690 return nullptr;
4691
4692 auto *Key = cast<ConstantInt>(PAB->Inputs[0]);
4693 Value *Discriminator = PAB->Inputs[1];
4694
4695 // If the bundle doesn't match, this is probably going to fail to auth.
4696 if (!CPA->isKnownCompatibleWith(Key, Discriminator, DL))
4697 return nullptr;
4698
4699 // If the bundle matches the constant, proceed in making this a direct call.
4701 NewCall->setCalledOperand(CalleeF);
4702 return NewCall;
4703}
4704
4705bool InstCombinerImpl::annotateAnyAllocSite(CallBase &Call,
4706 const TargetLibraryInfo *TLI) {
4707 // Note: We only handle cases which can't be driven from generic attributes
4708 // here. So, for example, nonnull and noalias (which are common properties
4709 // of some allocation functions) are expected to be handled via annotation
4710 // of the respective allocator declaration with generic attributes.
4711 bool Changed = false;
4712
4713 if (!Call.getType()->isPointerTy())
4714 return Changed;
4715
4716 std::optional<APInt> Size = getAllocSize(&Call, TLI);
4717 if (Size && *Size != 0) {
4718 // TODO: We really should just emit deref_or_null here and then
4719 // let the generic inference code combine that with nonnull.
4720 if (Call.hasRetAttr(Attribute::NonNull)) {
4721 Changed = !Call.hasRetAttr(Attribute::Dereferenceable);
4723 Call.getContext(), Size->getLimitedValue()));
4724 } else {
4725 Changed = !Call.hasRetAttr(Attribute::DereferenceableOrNull);
4727 Call.getContext(), Size->getLimitedValue()));
4728 }
4729 }
4730
4731 // Add alignment attribute if alignment is a power of two constant.
4732 Value *Alignment = getAllocAlignment(&Call, TLI);
4733 if (!Alignment)
4734 return Changed;
4735
4736 ConstantInt *AlignOpC = dyn_cast<ConstantInt>(Alignment);
4737 if (AlignOpC && AlignOpC->getValue().ult(llvm::Value::MaximumAlignment)) {
4738 uint64_t AlignmentVal = AlignOpC->getZExtValue();
4739 if (llvm::isPowerOf2_64(AlignmentVal)) {
4740 Align ExistingAlign = Call.getRetAlign().valueOrOne();
4741 Align NewAlign = Align(AlignmentVal);
4742 if (NewAlign > ExistingAlign) {
4745 Changed = true;
4746 }
4747 }
4748 }
4749 return Changed;
4750}
4751
4752/// Improvements for call, callbr and invoke instructions.
4753Instruction *InstCombinerImpl::visitCallBase(CallBase &Call) {
4754 bool Changed = annotateAnyAllocSite(Call, &TLI);
4755
4756 // Mark any parameters that are known to be non-null with the nonnull
4757 // attribute. This is helpful for inlining calls to functions with null
4758 // checks on their arguments.
4759 SmallVector<unsigned, 4> ArgNos;
4760 unsigned ArgNo = 0;
4761
4762 for (Value *V : Call.args()) {
4763 if (V->getType()->isPointerTy()) {
4764 // Simplify the nonnull operand if the parameter is known to be nonnull.
4765 // Otherwise, try to infer nonnull for it.
4766 bool HasDereferenceable = Call.getParamDereferenceableBytes(ArgNo) > 0;
4767 if (Call.paramHasAttr(ArgNo, Attribute::NonNull) ||
4768 (HasDereferenceable &&
4770 V->getType()->getPointerAddressSpace()))) {
4771 if (Value *Res = simplifyNonNullOperand(V, HasDereferenceable)) {
4772 replaceOperand(Call, ArgNo, Res);
4773 Changed = true;
4774 }
4775 } else if (isKnownNonZero(V,
4776 getSimplifyQuery().getWithInstruction(&Call))) {
4777 ArgNos.push_back(ArgNo);
4778 }
4779 }
4780 ArgNo++;
4781 }
4782
4783 assert(ArgNo == Call.arg_size() && "Call arguments not processed correctly.");
4784
4785 if (!ArgNos.empty()) {
4786 AttributeList AS = Call.getAttributes();
4787 LLVMContext &Ctx = Call.getContext();
4788 AS = AS.addParamAttribute(Ctx, ArgNos,
4789 Attribute::get(Ctx, Attribute::NonNull));
4790 Call.setAttributes(AS);
4791 Changed = true;
4792 }
4793
4794 // If the callee is a pointer to a function, attempt to move any casts to the
4795 // arguments of the call/callbr/invoke.
4797 Function *CalleeF = dyn_cast<Function>(Callee);
4798 if ((!CalleeF || CalleeF->getFunctionType() != Call.getFunctionType()) &&
4799 transformConstExprCastCall(Call))
4800 return nullptr;
4801
4802 if (CalleeF) {
4803 // Remove the convergent attr on calls when the callee is not convergent.
4804 if (Call.isConvergent() && !CalleeF->isConvergent() &&
4805 !CalleeF->isIntrinsic()) {
4806 LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " << Call
4807 << "\n");
4809 return &Call;
4810 }
4811
4812 // If the call and callee calling conventions don't match, and neither one
4813 // of the calling conventions is compatible with C calling convention
4814 // this call must be unreachable, as the call is undefined.
4815 if ((CalleeF->getCallingConv() != Call.getCallingConv() &&
4816 !(CalleeF->getCallingConv() == llvm::CallingConv::C &&
4820 // Only do this for calls to a function with a body. A prototype may
4821 // not actually end up matching the implementation's calling conv for a
4822 // variety of reasons (e.g. it may be written in assembly).
4823 !CalleeF->isDeclaration()) {
4824 Instruction *OldCall = &Call;
4826 // If OldCall does not return void then replaceInstUsesWith poison.
4827 // This allows ValueHandlers and custom metadata to adjust itself.
4828 if (!OldCall->getType()->isVoidTy())
4829 replaceInstUsesWith(*OldCall, PoisonValue::get(OldCall->getType()));
4830 if (isa<CallInst>(OldCall))
4831 return eraseInstFromFunction(*OldCall);
4832
4833 // We cannot remove an invoke or a callbr, because it would change thexi
4834 // CFG, just change the callee to a null pointer.
4835 cast<CallBase>(OldCall)->setCalledFunction(
4836 CalleeF->getFunctionType(),
4837 Constant::getNullValue(CalleeF->getType()));
4838 return nullptr;
4839 }
4840 }
4841
4842 // Calling a null function pointer is undefined if a null address isn't
4843 // dereferenceable.
4844 if ((isa<ConstantPointerNull>(Callee) &&
4846 isa<UndefValue>(Callee)) {
4847 // If Call does not return void then replaceInstUsesWith poison.
4848 // This allows ValueHandlers and custom metadata to adjust itself.
4849 if (!Call.getType()->isVoidTy())
4851
4852 if (Call.isTerminator()) {
4853 // Can't remove an invoke or callbr because we cannot change the CFG.
4854 return nullptr;
4855 }
4856
4857 // This instruction is not reachable, just remove it.
4860 }
4861
4862 if (IntrinsicInst *II = findInitTrampoline(Callee))
4863 return transformCallThroughTrampoline(Call, *II);
4864
4865 // Combine calls involving pointer authentication intrinsics.
4866 if (Instruction *NewCall = foldPtrAuthIntrinsicCallee(Call))
4867 return NewCall;
4868
4869 // Combine calls to ptrauth constants.
4870 if (Instruction *NewCall = foldPtrAuthConstantCallee(Call))
4871 return NewCall;
4872
4873 if (isa<InlineAsm>(Callee) && !Call.doesNotThrow()) {
4874 InlineAsm *IA = cast<InlineAsm>(Callee);
4875 if (!IA->canThrow()) {
4876 // Normal inline asm calls cannot throw - mark them
4877 // 'nounwind'.
4879 Changed = true;
4880 }
4881 }
4882
4883 // Try to optimize the call if possible, we require DataLayout for most of
4884 // this. None of these calls are seen as possibly dead so go ahead and
4885 // delete the instruction now.
4886 if (CallInst *CI = dyn_cast<CallInst>(&Call)) {
4887 Instruction *I = tryOptimizeCall(CI);
4888 // If we changed something return the result, etc. Otherwise let
4889 // the fallthrough check.
4890 if (I) return eraseInstFromFunction(*I);
4891 }
4892
4893 if (!Call.use_empty() && !Call.isMustTailCall())
4894 if (Value *ReturnedArg = Call.getReturnedArgOperand()) {
4895 Type *CallTy = Call.getType();
4896 Type *RetArgTy = ReturnedArg->getType();
4897 if (RetArgTy->canLosslesslyBitCastTo(CallTy))
4898 return replaceInstUsesWith(
4899 Call, Builder.CreateBitOrPointerCast(ReturnedArg, CallTy));
4900 }
4901
4902 // Drop unnecessary callee_type metadata from calls that were converted
4903 // into direct calls.
4904 if (Call.getMetadata(LLVMContext::MD_callee_type) && !Call.isIndirectCall()) {
4905 Call.setMetadata(LLVMContext::MD_callee_type, nullptr);
4906 Changed = true;
4907 }
4908
4909 // Drop unnecessary kcfi operand bundles from calls that were converted
4910 // into direct calls.
4912 if (Bundle && !Call.isIndirectCall()) {
4913 DEBUG_WITH_TYPE(DEBUG_TYPE "-kcfi", {
4914 if (CalleeF) {
4915 ConstantInt *FunctionType = nullptr;
4916 ConstantInt *ExpectedType = cast<ConstantInt>(Bundle->Inputs[0]);
4917
4918 if (MDNode *MD = CalleeF->getMetadata(LLVMContext::MD_kcfi_type))
4919 FunctionType = mdconst::extract<ConstantInt>(MD->getOperand(0));
4920
4921 if (FunctionType &&
4922 FunctionType->getZExtValue() != ExpectedType->getZExtValue())
4923 dbgs() << Call.getModule()->getName()
4924 << ": warning: kcfi: " << Call.getCaller()->getName()
4925 << ": call to " << CalleeF->getName()
4926 << " using a mismatching function pointer type\n";
4927 }
4928 });
4929
4931 }
4932
4933 if (isRemovableAlloc(&Call, &TLI))
4934 return visitAllocSite(Call);
4935
4936 // Handle intrinsics which can be used in both call and invoke context.
4937 switch (Call.getIntrinsicID()) {
4938 case Intrinsic::experimental_gc_statepoint: {
4939 GCStatepointInst &GCSP = *cast<GCStatepointInst>(&Call);
4940 SmallPtrSet<Value *, 32> LiveGcValues;
4941 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) {
4942 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc);
4943
4944 // Remove the relocation if unused.
4945 if (GCR.use_empty()) {
4947 continue;
4948 }
4949
4950 Value *DerivedPtr = GCR.getDerivedPtr();
4951 Value *BasePtr = GCR.getBasePtr();
4952
4953 // Undef is undef, even after relocation.
4954 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
4957 continue;
4958 }
4959
4960 if (auto *PT = dyn_cast<PointerType>(GCR.getType())) {
4961 // The relocation of null will be null for most any collector.
4962 // TODO: provide a hook for this in GCStrategy. There might be some
4963 // weird collector this property does not hold for.
4964 if (isa<ConstantPointerNull>(DerivedPtr)) {
4965 // Use null-pointer of gc_relocate's type to replace it.
4968 continue;
4969 }
4970
4971 // isKnownNonNull -> nonnull attribute
4972 if (!GCR.hasRetAttr(Attribute::NonNull) &&
4973 isKnownNonZero(DerivedPtr,
4974 getSimplifyQuery().getWithInstruction(&Call))) {
4975 GCR.addRetAttr(Attribute::NonNull);
4976 // We discovered new fact, re-check users.
4977 Worklist.pushUsersToWorkList(GCR);
4978 }
4979 }
4980
4981 // If we have two copies of the same pointer in the statepoint argument
4982 // list, canonicalize to one. This may let us common gc.relocates.
4983 if (GCR.getBasePtr() == GCR.getDerivedPtr() &&
4984 GCR.getBasePtrIndex() != GCR.getDerivedPtrIndex()) {
4985 auto *OpIntTy = GCR.getOperand(2)->getType();
4986 GCR.setOperand(2, ConstantInt::get(OpIntTy, GCR.getBasePtrIndex()));
4987 }
4988
4989 // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
4990 // Canonicalize on the type from the uses to the defs
4991
4992 // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
4993 LiveGcValues.insert(BasePtr);
4994 LiveGcValues.insert(DerivedPtr);
4995 }
4996 std::optional<OperandBundleUse> Bundle =
4998 unsigned NumOfGCLives = LiveGcValues.size();
4999 if (!Bundle || NumOfGCLives == Bundle->Inputs.size())
5000 break;
5001 // We can reduce the size of gc live bundle.
5002 DenseMap<Value *, unsigned> Val2Idx;
5003 std::vector<Value *> NewLiveGc;
5004 for (Value *V : Bundle->Inputs) {
5005 auto [It, Inserted] = Val2Idx.try_emplace(V);
5006 if (!Inserted)
5007 continue;
5008 if (LiveGcValues.count(V)) {
5009 It->second = NewLiveGc.size();
5010 NewLiveGc.push_back(V);
5011 } else
5012 It->second = NumOfGCLives;
5013 }
5014 // Update all gc.relocates
5015 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) {
5016 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc);
5017 Value *BasePtr = GCR.getBasePtr();
5018 assert(Val2Idx.count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives &&
5019 "Missed live gc for base pointer");
5020 auto *OpIntTy1 = GCR.getOperand(1)->getType();
5021 GCR.setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr]));
5022 Value *DerivedPtr = GCR.getDerivedPtr();
5023 assert(Val2Idx.count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives &&
5024 "Missed live gc for derived pointer");
5025 auto *OpIntTy2 = GCR.getOperand(2)->getType();
5026 GCR.setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr]));
5027 }
5028 // Create new statepoint instruction.
5029 OperandBundleDef NewBundle("gc-live", std::move(NewLiveGc));
5030 return CallBase::Create(&Call, NewBundle);
5031 }
5032 default: { break; }
5033 }
5034
5035 return Changed ? &Call : nullptr;
5036}
5037
5038/// If the callee is a constexpr cast of a function, attempt to move the cast to
5039/// the arguments of the call/invoke.
5040/// CallBrInst is not supported.
5041bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) {
5042 auto *Callee =
5044 if (!Callee)
5045 return false;
5046
5048 "CallBr's don't have a single point after a def to insert at");
5049
5050 // Don't perform the transform for declarations, which may not be fully
5051 // accurate. For example, void @foo() is commonly used as a placeholder for
5052 // unknown prototypes.
5053 if (Callee->isDeclaration())
5054 return false;
5055
5056 // If this is a call to a thunk function, don't remove the cast. Thunks are
5057 // used to transparently forward all incoming parameters and outgoing return
5058 // values, so it's important to leave the cast in place.
5059 if (Callee->hasFnAttribute("thunk"))
5060 return false;
5061
5062 // If this is a call to a naked function, the assembly might be
5063 // using an argument, or otherwise rely on the frame layout,
5064 // the function prototype will mismatch.
5065 if (Callee->hasFnAttribute(Attribute::Naked))
5066 return false;
5067
5068 // If this is a musttail call, the callee's prototype must match the caller's
5069 // prototype with the exception of pointee types. The code below doesn't
5070 // implement that, so we can't do this transform.
5071 // TODO: Do the transform if it only requires adding pointer casts.
5072 if (Call.isMustTailCall())
5073 return false;
5074
5076 const AttributeList &CallerPAL = Call.getAttributes();
5077
5078 // Okay, this is a cast from a function to a different type. Unless doing so
5079 // would cause a type conversion of one of our arguments, change this call to
5080 // be a direct call with arguments casted to the appropriate types.
5081 FunctionType *FT = Callee->getFunctionType();
5082 Type *OldRetTy = Caller->getType();
5083 Type *NewRetTy = FT->getReturnType();
5084
5085 // Check to see if we are changing the return type...
5086 if (OldRetTy != NewRetTy) {
5087
5088 if (NewRetTy->isStructTy())
5089 return false; // TODO: Handle multiple return values.
5090
5091 if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
5092 if (!Caller->use_empty())
5093 return false; // Cannot transform this return value.
5094 }
5095
5096 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
5097 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs());
5098 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(
5099 NewRetTy, CallerPAL.getRetAttrs())))
5100 return false; // Attribute not compatible with transformed value.
5101 }
5102
5103 // If the callbase is an invoke instruction, and the return value is
5104 // used by a PHI node in a successor, we cannot change the return type of
5105 // the call because there is no place to put the cast instruction (without
5106 // breaking the critical edge). Bail out in this case.
5107 if (!Caller->use_empty()) {
5108 BasicBlock *PhisNotSupportedBlock = nullptr;
5109 if (auto *II = dyn_cast<InvokeInst>(Caller))
5110 PhisNotSupportedBlock = II->getNormalDest();
5111 if (PhisNotSupportedBlock)
5112 for (User *U : Caller->users())
5113 if (PHINode *PN = dyn_cast<PHINode>(U))
5114 if (PN->getParent() == PhisNotSupportedBlock)
5115 return false;
5116 }
5117 }
5118
5119 unsigned NumActualArgs = Call.arg_size();
5120 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
5121
5122 // Prevent us turning:
5123 // declare void @takes_i32_inalloca(i32* inalloca)
5124 // call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
5125 //
5126 // into:
5127 // call void @takes_i32_inalloca(i32* null)
5128 //
5129 // Similarly, avoid folding away bitcasts of byval calls.
5130 if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
5131 Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated))
5132 return false;
5133
5134 auto AI = Call.arg_begin();
5135 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
5136 Type *ParamTy = FT->getParamType(i);
5137 Type *ActTy = (*AI)->getType();
5138
5139 if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
5140 return false; // Cannot transform this parameter value.
5141
5142 // Check if there are any incompatible attributes we cannot drop safely.
5143 if (AttrBuilder(FT->getContext(), CallerPAL.getParamAttrs(i))
5144 .overlaps(AttributeFuncs::typeIncompatible(
5145 ParamTy, CallerPAL.getParamAttrs(i),
5146 AttributeFuncs::ASK_UNSAFE_TO_DROP)))
5147 return false; // Attribute not compatible with transformed value.
5148
5149 if (Call.isInAllocaArgument(i) ||
5150 CallerPAL.hasParamAttr(i, Attribute::Preallocated))
5151 return false; // Cannot transform to and from inalloca/preallocated.
5152
5153 if (CallerPAL.hasParamAttr(i, Attribute::SwiftError))
5154 return false;
5155
5156 if (CallerPAL.hasParamAttr(i, Attribute::ByVal) !=
5157 Callee->getAttributes().hasParamAttr(i, Attribute::ByVal))
5158 return false; // Cannot transform to or from byval.
5159 }
5160
5161 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
5162 !CallerPAL.isEmpty()) {
5163 // In this case we have more arguments than the new function type, but we
5164 // won't be dropping them. Check that these extra arguments have attributes
5165 // that are compatible with being a vararg call argument.
5166 unsigned SRetIdx;
5167 if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) &&
5168 SRetIdx - AttributeList::FirstArgIndex >= FT->getNumParams())
5169 return false;
5170 }
5171
5172 // Okay, we decided that this is a safe thing to do: go ahead and start
5173 // inserting cast instructions as necessary.
5174 SmallVector<Value *, 8> Args;
5176 Args.reserve(NumActualArgs);
5177 ArgAttrs.reserve(NumActualArgs);
5178
5179 // Get any return attributes.
5180 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs());
5181
5182 // If the return value is not being used, the type may not be compatible
5183 // with the existing attributes. Wipe out any problematic attributes.
5184 RAttrs.remove(
5185 AttributeFuncs::typeIncompatible(NewRetTy, CallerPAL.getRetAttrs()));
5186
5187 LLVMContext &Ctx = Call.getContext();
5188 AI = Call.arg_begin();
5189 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
5190 Type *ParamTy = FT->getParamType(i);
5191
5192 Value *NewArg = *AI;
5193 if ((*AI)->getType() != ParamTy)
5194 NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy);
5195 Args.push_back(NewArg);
5196
5197 // Add any parameter attributes except the ones incompatible with the new
5198 // type. Note that we made sure all incompatible ones are safe to drop.
5199 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(
5200 ParamTy, CallerPAL.getParamAttrs(i), AttributeFuncs::ASK_SAFE_TO_DROP);
5201 ArgAttrs.push_back(
5202 CallerPAL.getParamAttrs(i).removeAttributes(Ctx, IncompatibleAttrs));
5203 }
5204
5205 // If the function takes more arguments than the call was taking, add them
5206 // now.
5207 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
5208 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
5209 ArgAttrs.push_back(AttributeSet());
5210 }
5211
5212 // If we are removing arguments to the function, emit an obnoxious warning.
5213 if (FT->getNumParams() < NumActualArgs) {
5214 // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
5215 if (FT->isVarArg()) {
5216 // Add all of the arguments in their promoted form to the arg list.
5217 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
5218 Type *PTy = getPromotedType((*AI)->getType());
5219 Value *NewArg = *AI;
5220 if (PTy != (*AI)->getType()) {
5221 // Must promote to pass through va_arg area!
5222 Instruction::CastOps opcode =
5223 CastInst::getCastOpcode(*AI, false, PTy, false);
5224 NewArg = Builder.CreateCast(opcode, *AI, PTy);
5225 }
5226 Args.push_back(NewArg);
5227
5228 // Add any parameter attributes.
5229 ArgAttrs.push_back(CallerPAL.getParamAttrs(i));
5230 }
5231 }
5232 }
5233
5234 AttributeSet FnAttrs = CallerPAL.getFnAttrs();
5235
5236 if (NewRetTy->isVoidTy())
5237 Caller->setName(""); // Void type should not have a name.
5238
5239 assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) &&
5240 "missing argument attributes");
5241 AttributeList NewCallerPAL = AttributeList::get(
5242 Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs);
5243
5245 Call.getOperandBundlesAsDefs(OpBundles);
5246
5247 CallBase *NewCall;
5248 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
5249 NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(),
5250 II->getUnwindDest(), Args, OpBundles);
5251 } else {
5252 NewCall = Builder.CreateCall(Callee, Args, OpBundles);
5253 cast<CallInst>(NewCall)->setTailCallKind(
5254 cast<CallInst>(Caller)->getTailCallKind());
5255 }
5256 NewCall->takeName(Caller);
5258 NewCall->setAttributes(NewCallerPAL);
5259
5260 // Preserve prof metadata if any.
5261 NewCall->copyMetadata(*Caller, {LLVMContext::MD_prof});
5262
5263 // Insert a cast of the return type as necessary.
5264 Instruction *NC = NewCall;
5265 Value *NV = NC;
5266 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
5267 assert(!NV->getType()->isVoidTy());
5269 NC->setDebugLoc(Caller->getDebugLoc());
5270
5271 auto OptInsertPt = NewCall->getInsertionPointAfterDef();
5272 assert(OptInsertPt && "No place to insert cast");
5273 InsertNewInstBefore(NC, *OptInsertPt);
5274 Worklist.pushUsersToWorkList(*Caller);
5275 }
5276
5277 if (!Caller->use_empty())
5278 replaceInstUsesWith(*Caller, NV);
5279 else if (Caller->hasValueHandle()) {
5280 if (OldRetTy == NV->getType())
5282 else
5283 // We cannot call ValueIsRAUWd with a different type, and the
5284 // actual tracked value will disappear.
5286 }
5287
5288 eraseInstFromFunction(*Caller);
5289 return true;
5290}
5291
5292/// Turn a call to a function created by init_trampoline / adjust_trampoline
5293/// intrinsic pair into a direct call to the underlying function.
5295InstCombinerImpl::transformCallThroughTrampoline(CallBase &Call,
5296 IntrinsicInst &Tramp) {
5297 FunctionType *FTy = Call.getFunctionType();
5298 AttributeList Attrs = Call.getAttributes();
5299
5300 // If the call already has the 'nest' attribute somewhere then give up -
5301 // otherwise 'nest' would occur twice after splicing in the chain.
5302 if (Attrs.hasAttrSomewhere(Attribute::Nest))
5303 return nullptr;
5304
5306 FunctionType *NestFTy = NestF->getFunctionType();
5307
5308 AttributeList NestAttrs = NestF->getAttributes();
5309 if (!NestAttrs.isEmpty()) {
5310 unsigned NestArgNo = 0;
5311 Type *NestTy = nullptr;
5312 AttributeSet NestAttr;
5313
5314 // Look for a parameter marked with the 'nest' attribute.
5315 for (FunctionType::param_iterator I = NestFTy->param_begin(),
5316 E = NestFTy->param_end();
5317 I != E; ++NestArgNo, ++I) {
5318 AttributeSet AS = NestAttrs.getParamAttrs(NestArgNo);
5319 if (AS.hasAttribute(Attribute::Nest)) {
5320 // Record the parameter type and any other attributes.
5321 NestTy = *I;
5322 NestAttr = AS;
5323 break;
5324 }
5325 }
5326
5327 if (NestTy) {
5328 std::vector<Value*> NewArgs;
5329 std::vector<AttributeSet> NewArgAttrs;
5330 NewArgs.reserve(Call.arg_size() + 1);
5331 NewArgAttrs.reserve(Call.arg_size());
5332
5333 // Insert the nest argument into the call argument list, which may
5334 // mean appending it. Likewise for attributes.
5335
5336 {
5337 unsigned ArgNo = 0;
5338 auto I = Call.arg_begin(), E = Call.arg_end();
5339 do {
5340 if (ArgNo == NestArgNo) {
5341 // Add the chain argument and attributes.
5342 Value *NestVal = Tramp.getArgOperand(2);
5343 if (NestVal->getType() != NestTy)
5344 NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest");
5345 NewArgs.push_back(NestVal);
5346 NewArgAttrs.push_back(NestAttr);
5347 }
5348
5349 if (I == E)
5350 break;
5351
5352 // Add the original argument and attributes.
5353 NewArgs.push_back(*I);
5354 NewArgAttrs.push_back(Attrs.getParamAttrs(ArgNo));
5355
5356 ++ArgNo;
5357 ++I;
5358 } while (true);
5359 }
5360
5361 // The trampoline may have been bitcast to a bogus type (FTy).
5362 // Handle this by synthesizing a new function type, equal to FTy
5363 // with the chain parameter inserted.
5364
5365 std::vector<Type*> NewTypes;
5366 NewTypes.reserve(FTy->getNumParams()+1);
5367
5368 // Insert the chain's type into the list of parameter types, which may
5369 // mean appending it.
5370 {
5371 unsigned ArgNo = 0;
5372 FunctionType::param_iterator I = FTy->param_begin(),
5373 E = FTy->param_end();
5374
5375 do {
5376 if (ArgNo == NestArgNo)
5377 // Add the chain's type.
5378 NewTypes.push_back(NestTy);
5379
5380 if (I == E)
5381 break;
5382
5383 // Add the original type.
5384 NewTypes.push_back(*I);
5385
5386 ++ArgNo;
5387 ++I;
5388 } while (true);
5389 }
5390
5391 // Replace the trampoline call with a direct call. Let the generic
5392 // code sort out any function type mismatches.
5393 FunctionType *NewFTy =
5394 FunctionType::get(FTy->getReturnType(), NewTypes, FTy->isVarArg());
5395 AttributeList NewPAL =
5396 AttributeList::get(FTy->getContext(), Attrs.getFnAttrs(),
5397 Attrs.getRetAttrs(), NewArgAttrs);
5398
5400 Call.getOperandBundlesAsDefs(OpBundles);
5401
5402 Instruction *NewCaller;
5403 if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
5404 NewCaller = InvokeInst::Create(NewFTy, NestF, II->getNormalDest(),
5405 II->getUnwindDest(), NewArgs, OpBundles);
5406 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
5407 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
5408 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) {
5409 NewCaller =
5410 CallBrInst::Create(NewFTy, NestF, CBI->getDefaultDest(),
5411 CBI->getIndirectDests(), NewArgs, OpBundles);
5412 cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv());
5413 cast<CallBrInst>(NewCaller)->setAttributes(NewPAL);
5414 } else {
5415 NewCaller = CallInst::Create(NewFTy, NestF, NewArgs, OpBundles);
5416 cast<CallInst>(NewCaller)->setTailCallKind(
5417 cast<CallInst>(Call).getTailCallKind());
5418 cast<CallInst>(NewCaller)->setCallingConv(
5419 cast<CallInst>(Call).getCallingConv());
5420 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
5421 }
5422 NewCaller->setDebugLoc(Call.getDebugLoc());
5423
5424 return NewCaller;
5425 }
5426 }
5427
5428 // Replace the trampoline call with a direct call. Since there is no 'nest'
5429 // parameter, there is no need to adjust the argument list. Let the generic
5430 // code sort out any function type mismatches.
5431 Call.setCalledFunction(FTy, NestF);
5432 return &Call;
5433}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
@ Scaled
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
#define X(NUM, ENUM, NAME)
Definition ELF.h:851
BitTracker BT
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static SDValue foldBitOrderCrossLogicOp(SDNode *N, SelectionDAG &DAG)
#define Check(C,...)
#define DEBUG_TYPE
IRTranslator LLVM IR MI
static Type * getPromotedType(Type *Ty)
Return the specified type promoted as it would be to pass though a va_arg area.
static Instruction * createOverflowTuple(IntrinsicInst *II, Value *Result, Constant *Overflow)
Creates a result tuple for an overflow intrinsic II with a given Result and a constant Overflow value...
static IntrinsicInst * findInitTrampolineFromAlloca(Value *TrampMem)
static bool removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC, std::function< bool(const IntrinsicInst &)> IsStart)
static bool inputDenormalIsDAZ(const Function &F, const Type *Ty)
static Instruction * reassociateMinMaxWithConstantInOperand(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
If this min/max has a matching min/max operand with a constant, try to push the constant operand into...
static bool isIdempotentBinaryIntrinsic(Intrinsic::ID IID)
Helper to match idempotent binary intrinsics, namely, intrinsics where f(f(x, y), y) == f(x,...
static bool signBitMustBeTheSame(Value *Op0, Value *Op1, const SimplifyQuery &SQ)
Return true if two values Op0 and Op1 are known to have the same sign.
static Value * optimizeModularFormat(CallInst *CI, IRBuilderBase &B)
static Instruction * moveAddAfterMinMax(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
Try to canonicalize min/max(X + C0, C1) as min/max(X, C1 - C0) + C0.
static Instruction * simplifyInvariantGroupIntrinsic(IntrinsicInst &II, InstCombinerImpl &IC)
This function transforms launder.invariant.group and strip.invariant.group like: launder(launder(x)) ...
static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E, unsigned NumOperands)
static std::optional< bool > getKnownSign(Value *Op, const SimplifyQuery &SQ)
static cl::opt< unsigned > GuardWideningWindow("instcombine-guard-widening-window", cl::init(3), cl::desc("How wide an instruction window to bypass looking for " "another guard"))
static bool hasUndefSource(AnyMemTransferInst *MI)
Recognize a memcpy/memmove from a trivially otherwise unused alloca.
static Instruction * factorizeMinMaxTree(IntrinsicInst *II)
Reduce a sequence of min/max intrinsics with a common operand.
static Instruction * foldClampRangeOfTwo(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
If we have a clamp pattern like max (min X, 42), 41 – where the output can only be one of two possibl...
static Value * simplifyReductionOperand(Value *Arg, bool CanReorderLanes)
static IntrinsicInst * findInitTrampolineFromBB(IntrinsicInst *AdjustTramp, Value *TrampMem)
static Value * foldIntrinsicUsingDistributiveLaws(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
static std::optional< bool > getKnownSignOrZero(Value *Op, const SimplifyQuery &SQ)
static Value * foldMinimumOverTrailingOrLeadingZeroCount(Value *I0, Value *I1, const DataLayout &DL, InstCombiner::BuilderTy &Builder)
Fold an unsigned minimum of trailing or leading zero bits counts: umin(cttz(CtOp1,...
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "(X ROp Y) LOp Z" is always equal to "(X LOp Z) ROp (Y LOp Z)".
static Value * foldIdempotentBinaryIntrinsicRecurrence(InstCombinerImpl &IC, IntrinsicInst *II)
Attempt to simplify value-accumulating recurrences of kind: umax.acc = phi i8 [ umax,...
static Instruction * foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC)
static Instruction * simplifyNeonTbl(IntrinsicInst &II, InstCombiner &IC, bool IsExtension)
Convert tbl/tbx intrinsics to shufflevector if the mask is constant, and at most two source operands ...
static Instruction * foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC)
static IntrinsicInst * findInitTrampoline(Value *Callee)
static FCmpInst::Predicate fpclassTestIsFCmp0(FPClassTest Mask, const Function &F, Type *Ty)
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
static Value * reassociateMinMaxWithConstants(IntrinsicInst *II, IRBuilderBase &Builder, const SimplifyQuery &SQ)
If this min/max has a constant operand and an operand that is a matching min/max with a constant oper...
static CallInst * canonicalizeConstantArg0ToArg1(CallInst &Call)
static Instruction * foldNeonShift(IntrinsicInst *II, InstCombinerImpl &IC)
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
static bool hasNoSignedWrap(BinaryOperator &I)
static bool inputDenormalIsIEEE(DenormalMode Mode)
Return true if it's possible to assume IEEE treatment of input denormals in F for Val.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
static const Function * getCalledFunction(const Value *V)
This file contains the declarations for metadata subclasses.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
if(auto Err=PB.parsePassPipeline(MPM, Passes)) return wrap(std MPM run * Mod
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
This file implements the SmallBitVector class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:114
#define DEBUG_WITH_TYPE(TYPE,...)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition Debug.h:72
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:344
static LLVM_ABI bool hasSignBitInMSB(const fltSemantics &)
Definition APFloat.cpp:260
bool isNegative() const
Definition APFloat.h:1516
void clearSign()
Definition APFloat.h:1353
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
Definition APFloat.h:1143
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
Definition APInt.h:230
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1208
LLVM_ABI APInt usub_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1999
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
Definition APInt.h:1189
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition APInt.cpp:1708
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1511
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition APInt.h:1118
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1979
LLVM_ABI APInt uadd_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1986
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
Definition APInt.cpp:651
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition APInt.h:220
LLVM_ABI APInt uadd_sat(const APInt &RHS) const
Definition APInt.cpp:2087
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition APInt.h:335
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1992
static APSInt getMinValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the minimum integer value with the given bit width and signedness.
Definition APSInt.h:310
static APSInt getMaxValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the maximum integer value with the given bit width and signedness.
Definition APSInt.h:302
This class represents any memset intrinsic.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition ArrayRef.h:195
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
static LLVM_ABI AttributeSet get(LLVMContext &C, const AttrBuilder &B)
static LLVM_ABI Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
static LLVM_ABI Attribute getWithDereferenceableBytes(LLVMContext &Context, uint64_t Bytes)
static LLVM_ABI Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context, uint64_t Bytes)
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
static LLVM_ABI Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
InstListType::reverse_iterator reverse_iterator
Definition BasicBlock.h:172
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI bool isSigned() const
Whether the intrinsic is signed or unsigned.
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
static BinaryOperator * CreateFAddFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
Definition InstrTypes.h:236
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
static BinaryOperator * CreateNSW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition InstrTypes.h:279
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition InstrTypes.h:294
static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
Definition InstrTypes.h:244
static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
Definition InstrTypes.h:248
static BinaryOperator * CreateFSubFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
Definition InstrTypes.h:240
static LLVM_ABI BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
void setDoesNotThrow()
MaybeAlign getRetAlign() const
Extract the alignment of the return value.
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool isInAllocaArgument(unsigned ArgNo) const
Determine whether this argument is passed in an alloca.
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
uint64_t getParamDereferenceableBytes(unsigned i) const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
void setNotConvergent()
Value * getCalledOperand() const
void setAttributes(AttributeList A)
Set the attributes for this call.
Attribute getFnAttr(StringRef Kind) const
Get the attribute of a given kind for the function.
bool doesNotThrow() const
Determine if the call cannot unwind.
void addRetAttr(Attribute::AttrKind Kind)
Adds the attribute to the return value.
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
Value * getReturnedArgOperand() const
If one of the arguments has the 'returned' attribute, returns its operand value.
static LLVM_ABI CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
void setCalledOperand(Value *V)
void addParamAttrs(unsigned ArgNo, const AttrBuilder &B)
Adds attributes to the indicated argument.
static LLVM_ABI CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
void setCalledFunction(Function *Fn)
Sets the function called, including updating the function type.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This class represents a function call, abstracting a target machine's calling convention.
bool isNoTailCall() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
bool isMustTailCall() const
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition InstrTypes.h:871
Predicate getUnorderedPredicate() const
Definition InstrTypes.h:811
static LLVM_ABI ConstantAggregateZero * get(Type *Ty)
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getInfinity(Type *Ty, bool Negative=false)
static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...
Definition Constants.h:269
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static LLVM_ABI ConstantPtrAuth * get(Constant *Ptr, ConstantInt *Key, ConstantInt *Disc, Constant *AddrDisc, Constant *DeactivationSymbol)
Return a pointer signed with the specified parameters.
This class represents a range of values.
LLVM_ABI ConstantRange multiply(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a multiplication of a value in thi...
LLVM_ABI ConstantRange zextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Record of a variable value-assignment, aka a non instruction representation of the dbg....
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:256
unsigned size() const
Definition DenseMap.h:110
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition DenseMap.h:174
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:169
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Lightweight error class with error context and mandatory checking.
Definition Error.h:159
static FMFSource intersect(Value *A, Value *B)
Intersect the FMF from two instructions.
Definition IRBuilder.h:107
This class represents an extension of floating point types.
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
bool allowReassoc() const
Flag queries.
Definition FMF.h:67
An instruction for ordering other memory operations.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Class to represent function types.
Type::subtype_iterator param_iterator
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
bool isConvergent() const
Determine if the call is convergent.
Definition Function.h:618
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:211
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:272
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition Function.h:354
bool doesNotThrow() const
Determine if the function cannot unwind.
Definition Function.h:602
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition Function.h:251
LLVM_ABI Value * getBasePtr() const
unsigned getBasePtrIndex() const
The index into the associate statepoint's argument list which contains the base pointer of the pointe...
LLVM_ABI Value * getDerivedPtr() const
unsigned getDerivedPtrIndex() const
The index into the associate statepoint's argument list which contains the pointer whose relocation t...
std::vector< const GCRelocateInst * > getGCRelocates() const
Get list of all gc reloactes linked to this statepoint May contain several relocations for the same b...
Definition Statepoint.h:206
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this GlobalObject.
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:337
PointerType * getType() const
Global values are always pointers.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
LLVM_ABI Value * CreateLaunderInvariantGroup(Value *Ptr)
Create a launder.invariant.group intrinsic call.
ConstantInt * getTrue()
Get the constant value for i1 true.
Definition IRBuilder.h:509
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > OverloadTypes, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using OverloadTypes.
LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1460
LLVM_ABI CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition IRBuilder.h:2113
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Definition IRBuilder.h:2642
ConstantInt * getFalse()
Get the constant value for i1 false.
Definition IRBuilder.h:514
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2477
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2240
LLVM_ABI Value * CreateStripInvariantGroup(Value *Ptr)
Create a strip.invariant.group intrinsic call.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
bool SimplifyDemandedBits(Instruction *I, unsigned Op, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0) override
This form of SimplifyDemandedBits simplifies the specified instruction operand if possible,...
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * SimplifyAnyMemSet(AnyMemSetInst *MI)
Instruction * foldItoFPtoI(FPToIntTy &FI)
fpto{s/u}i.sat --> X or zext(X) or sext(X) or trunc(X) This is safe if the intermediate type has enou...
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitCallBrInst(CallBrInst &CBI)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Value * foldReversedIntrinsicOperands(IntrinsicInst *II)
If all arguments of the intrinsic are reverses, try to pull the reverse after the intrinsic.
Value * tryGetLog2(Value *Op, bool AssumeNonZero)
Instruction * visitFenceInst(FenceInst &FI)
Instruction * foldShuffledIntrinsicOperands(IntrinsicInst *II)
If all arguments of the intrinsic are unary shuffles with the same mask, try to shuffle after the int...
Instruction * visitInvokeInst(InvokeInst &II)
bool SimplifyDemandedInstructionBits(Instruction &Inst)
Tries to simplify operands to an integer instruction based on its demanded bits.
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Instruction * visitVAEndInst(VAEndInst &I)
Instruction * matchBSwapOrBitReverse(Instruction &I, bool MatchBSwaps, bool MatchBitReversals)
Given an initial instruction, check to see if it is the root of a bswap/bitreverse idiom.
Constant * unshuffleConstant(ArrayRef< int > ShMask, Constant *C, VectorType *NewCTy)
Find a constant NewC that has property: shuffle(NewC, ShMask) = C Returns nullptr if such a constant ...
Instruction * visitAllocSite(Instruction &FI)
Instruction * SimplifyAnyMemTransfer(AnyMemTransferInst *MI)
OverflowResult computeOverflow(Instruction::BinaryOps BinaryOp, bool IsSigned, Value *LHS, Value *RHS, Instruction *CxtI) const
Instruction * visitCallInst(CallInst &CI)
CallInst simplification.
The core instruction combiner logic.
SimplifyQuery SQ
unsigned ComputeMaxSignificantBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
DominatorTree & getDominatorTree() const
BlockFrequencyInfo * BFI
TargetLibraryInfo & TLI
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
const DataLayout & DL
DomConditionCache DC
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
AssumptionCache & AC
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const Instruction *CxtI=nullptr, unsigned Depth=0) const
DominatorTree & DT
ProfileSummaryInfo * PSI
BuilderTy & Builder
AssumptionCache & getAssumptionCache() const
OptimizationRemarkEmitter & ORE
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
const SimplifyQuery & getSimplifyQuery() const
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)
LLVM_ABI Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
LLVM_ABI void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
LLVM_ABI void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
bool isTerminator() const
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM_ABI std::optional< InstListType::iterator > getInsertionPointAfterDef()
Get the first insertion point at which the result of this instruction is defined.
LLVM_ABI bool isIdenticalTo(const Instruction *I) const LLVM_READONLY
Return true if the specified instruction is exactly identical to the current one.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Metadata node.
Definition Metadata.h:1080
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1572
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Definition Metadata.cpp:614
static LLVM_ABI MetadataAsValue * get(LLVMContext &Context, Metadata *MD)
Definition Metadata.cpp:110
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
bool isSigned() const
Whether the intrinsic is signed or unsigned.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
StringRef getName() const
Get a short "name" for the module.
Definition Module.h:269
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Definition Operator.h:43
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition Operator.h:78
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
Definition Operator.h:111
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Definition Operator.h:105
bool isCommutative() const
Return true if the instruction is commutative.
Definition Operator.h:128
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Represents a saturating add/sub intrinsic.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
This instruction constructs a fixed permutation of two input vectors.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
SmallBitVector & set()
bool test(unsigned Idx) const
bool all() const
Returns true if all bits are set.
size_type size() const
Definition SmallPtrSet.h:99
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Class to represent struct types.
static LLVM_ABI bool isCallingConvCCompatible(CallBase *CI)
Returns true if call site / callee has cdecl-compatible calling conventions.
Provides information about what library functions are available for the current target.
This class represents a truncation of integer types.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
Definition Type.cpp:314
LLVM_ABI unsigned getIntegerBitWidth() const
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:313
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:263
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Definition Type.cpp:157
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:278
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:201
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
LLVM_ABI const fltSemantics & getFltSemantics() const
Definition Type.cpp:110
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
static UnaryOperator * CreateWithCopiedFlags(UnaryOps Opc, Value *V, Instruction *CopyO, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Definition InstrTypes.h:139
static UnaryOperator * CreateFNegFMF(Value *Op, Instruction *FMFSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Definition InstrTypes.h:147
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM_ABI unsigned getOperandNo() const
Return the operand # of this use in its User.
Definition Use.cpp:35
void setOperand(unsigned i, Value *Val)
Definition User.h:212
Value * getOperand(unsigned i) const
Definition User.h:207
This represents the llvm.va_end intrinsic.
static LLVM_ABI void ValueIsDeleted(Value *V)
Definition Value.cpp:1232
static LLVM_ABI void ValueIsRAUWd(Value *Old, Value *New)
Definition Value.cpp:1285
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
static constexpr uint64_t MaximumAlignment
Definition Value.h:808
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
iterator_range< user_iterator > users()
Definition Value.h:426
static LLVM_ABI void dropDroppableUse(Use &U)
Remove the droppable use U.
Definition Value.cpp:222
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:709
bool use_empty() const
Definition Value.h:346
static constexpr unsigned MaxAlignmentExponent
The maximum alignment for instructions.
Definition Value.h:807
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:399
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
Definition TypeSize.h:269
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr ScalarTy getKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns a value X where RHS.multiplyCoefficientBy(X) will result in a value whose quantity matches ou...
Definition TypeSize.h:277
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
Definition TypeSize.h:171
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:223
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > OverloadTys={})
Look up the Function declaration of the intrinsic id in the Module M.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
match_combine_and< Ty... > m_CombineAnd(const Ty &...Ps)
Combine pattern matchers matching all of Ps patterns.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
auto m_PtrToIntOrAddr(const OpTy &Op)
Matches PtrToInt or PtrToAddr.
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
auto m_Poison()
Match an arbitrary poison constant.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
match_bind< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
match_deferred< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
auto match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
OverflowingBinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWNeg(const ValTy &V)
Matches a 'Neg' as 'sub nsw 0, V'.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
cstfp_pred_ty< is_neg_zero_fp > m_NegZeroFP()
Match a floating-point negative zero.
auto m_BinOp()
Match an arbitrary binary operation and ignore it.
specific_fpval m_SpecificFP(double V)
Match a specific floating point value or vector with all elements equal to the value.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
auto m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
auto m_Constant()
Match an arbitrary Constant and ignore it.
match_combine_or< match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > >, OpTy > m_ZExtOrSExtOrSelf(const OpTy &Op)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
cst_pred_ty< is_strictlypositive > m_StrictlyPositive()
Match an integer or vector of strictly positive values.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
cst_pred_ty< is_negated_power2 > m_NegatedPower2()
Match a integer or vector negated power-of-2.
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
auto m_MaxOrMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
auto m_c_MaxOrMin(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
Exact_match< T > m_Exact(const T &SubPattern)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
auto m_UnOp()
Match an arbitrary unary operation and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
BinOpPred_match< LHS, RHS, is_bitwiselogic_op > m_BitwiseLogic(const LHS &L, const RHS &R)
Matches bitwise logic operations.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty m_CopySign(const Opnd0 &Op0, const Opnd1 &Op1)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
auto m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition LLVMContext.h:55
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
Return a range of dbg_assign records for which Inst performs the assignment they encode.
Definition DebugInfo.h:203
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
constexpr double e
DiagnosticInfoOptimizationBase::Argument NV
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI cl::opt< bool > EnableKnowledgeRetention
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition MathExtras.h:344
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, const SimplifyQuery &SQ, unsigned Depth=0)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
LLVM_ABI Value * simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FMul, fold the result or return null.
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
LLVM_ABI APInt possiblyDemandedEltsInMask(Value *Mask)
Given a mask vector of the form <Y x i1>, return an APInt (of bitwidth Y) for each lane which may be ...
LLVM_ABI RetainedKnowledge simplifyRetainedKnowledge(AssumeInst *Assume, RetainedKnowledge RK, AssumptionCache *AC, DominatorTree *DT)
canonicalize the RetainedKnowledge RK.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
LLVM_ABI Value * getAllocAlignment(const CallBase *V, const TargetLibraryInfo *TLI)
Gets the alignment argument for an aligned_alloc-like function, using either built-in knowledge based...
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI RetainedKnowledge getKnowledgeFromOperandInAssume(AssumeInst &Assume, unsigned Idx)
Retreive the information help by Assume on the operand at index Idx.
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
Definition APFloat.h:1710
LLVM_ABI Value * simplifyCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
Given a callsite, callee, and arguments, fold the result or return null.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
Definition MathExtras.h:546
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
LLVM_ABI bool isAssumeWithEmptyBundle(const AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
constexpr T MinAlign(U A, V B)
A and B are either alignments or offsets.
Definition MathExtras.h:357
LLVM_ABI RetainedKnowledge getKnowledgeFromBundle(AssumeInst &Assume, const CallBase::BundleOpInfo &BOI)
This extracts the Knowledge from an element of an operand bundle.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
Definition Local.h:252
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 maxNum semantics.
Definition APFloat.h:1665
LLVM_ABI FPClassTest fneg(FPClassTest Mask)
Return the test mask which returns true if the value's sign bit is flipped.
SelectPatternFlavor
Specific patterns of select instructions we can match.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
LLVM_ABI Constant * getLosslessUnsignedTrunc(Constant *C, Type *DestTy, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1636
LLVM_READONLY APFloat minimumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimumNumber semantics.
Definition APFloat.h:1696
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM)
Returns: X * 2^Exp for integral exponents.
Definition APFloat.h:1610
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI bool matchSimpleBinaryIntrinsicRecurrence(const IntrinsicInst *I, PHINode *&P, Value *&Init, Value *&OtherOp)
Attempt to match a simple value-accumulating recurrence of the form: llvm.intrinsic....
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
auto find_if_not(R &&Range, UnaryPredicate P)
Definition STLExtras.h:1777
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1753
bool isAtLeastOrStrongerThan(AtomicOrdering AO, AtomicOrdering Other)
LLVM_ABI Constant * getLosslessSignedTrunc(Constant *C, Type *DestTy, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
LLVM_ABI AssumeInst * buildAssumeFromKnowledge(ArrayRef< RetainedKnowledge > Knowledge, Instruction *CtxI, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Build and return a new assume created from the provided knowledge if the knowledge in the assume is f...
LLVM_ABI FPClassTest inverse_fabs(FPClassTest Mask)
Return the test mask which returns true after fabs is applied to the value.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
iterator_range< SplittingIterator > split(StringRef Str, StringRef Separator)
Split the specified string over a separator and return a range-compatible iterable over its partition...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isNotCrossLaneOperation(const Instruction *I)
Return true if the instruction doesn't potentially cross vector lanes.
LLVM_ABI bool maskIsAllOneOrUndef(Value *Mask)
Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be ...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
@ Mod
The access may modify the value stored in memory.
Definition ModRef.h:34
LLVM_ABI Value * simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for the multiplication of a FMA, fold the result or return null.
@ Other
Any other memory.
Definition ModRef.h:68
FunctionAddr VTableAddr uintptr_t uintptr_t Data
Definition InstrProf.h:221
LLVM_ABI Value * simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q)
Given a constrained FP intrinsic call, tries to compute its simplified version.
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 minNum semantics.
Definition APFloat.h:1646
OperandBundleDefT< Value * > OperandBundleDef
Definition AutoUpgrade.h:34
@ Add
Sum of integers.
LLVM_ABI bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic has a scalar operand.
LLVM_ABI ConstantRange computeConstantRangeIncludingKnownBits(const WithCache< const Value * > &V, bool ForSigned, const SimplifyQuery &SQ)
Combine constant ranges from computeConstantRange() and computeKnownBits().
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)
Don't use information from its non-constant operands.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
LLVM_ABI bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if this is always a dereferenceable pointer.
Definition Loads.cpp:249
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
LLVM_ABI bool maskIsAllZeroOrUndef(Value *Mask)
Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be ...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
LLVM_ABI std::optional< APInt > getAllocSize(const CallBase *CB, const TargetLibraryInfo *TLI, function_ref< const Value *(const Value *)> Mapper=[](const Value *V) { return V;})
Return the size of the requested allocation.
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition Alignment.h:197
LLVM_ABI bool maskContainsAllOneOrUndef(Value *Mask)
Given a mask vector of i1, Return true if any of the elements of this predicate mask are known to be ...
LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
Definition APFloat.h:1683
LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
LLVM_READONLY APFloat maximumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximumNumber semantics.
Definition APFloat.h:1723
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
LLVM_ABI bool isTriviallyVectorizable(Intrinsic::ID ID)
Identify if the intrinsic is trivially vectorizable.
LLVM_ABI std::optional< bool > computeKnownFPSignBit(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return false if we can prove that the specified FP value's sign bit is 0.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define NC
Definition regutils.h:42
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:763
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
@ IEEE
IEEE-754 denormal numbers preserved.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition KnownBits.h:106
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition KnownBits.h:256
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
Definition KnownBits.h:288
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
Definition KnownBits.h:303
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
bool isNonZero() const
Returns true if this value is known to be non-zero.
Definition KnownBits.h:109
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition KnownBits.h:262
bool isNegative() const
Returns true if this value is known to be negative.
Definition KnownBits.h:103
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
Definition KnownBits.h:294
unsigned countMinPopulation() const
Returns the number of bits known to be one.
Definition KnownBits.h:300
bool isAllOnes() const
Returns true if value is all one bits.
Definition KnownBits.h:81
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
Matching combinators.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition Alignment.h:130
A lightweight accessor for an operand bundle meant to be passed around by value.
StringRef getTagName() const
Return the tag of this operand bundle as a string.
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
ArrayRef< Use > Inputs
Represent one information held inside an operand bundle of an llvm.assume.
Attribute::AttrKind AttrKind
SelectPatternFlavor Flavor
const DataLayout & DL
const Instruction * CxtI
SimplifyQuery getWithInstruction(const Instruction *I) const