LLVM  10.0.0svn
InstCombineCalls.cpp
Go to the documentation of this file.
1 //===- InstCombineCalls.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitCall, visitInvoke, and visitCallBr functions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APFloat.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/ADT/Twine.h"
26 #include "llvm/Analysis/Loads.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GlobalVariable.h"
38 #include "llvm/IR/InstrTypes.h"
39 #include "llvm/IR/Instruction.h"
40 #include "llvm/IR/Instructions.h"
41 #include "llvm/IR/IntrinsicInst.h"
42 #include "llvm/IR/Intrinsics.h"
43 #include "llvm/IR/LLVMContext.h"
44 #include "llvm/IR/Metadata.h"
45 #include "llvm/IR/PatternMatch.h"
46 #include "llvm/IR/Statepoint.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/IR/User.h"
49 #include "llvm/IR/Value.h"
50 #include "llvm/IR/ValueHandle.h"
52 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/Compiler.h"
55 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/KnownBits.h"
63 #include <algorithm>
64 #include <cassert>
65 #include <cstdint>
66 #include <cstring>
67 #include <utility>
68 #include <vector>
69 
70 using namespace llvm;
71 using namespace PatternMatch;
72 
73 #define DEBUG_TYPE "instcombine"
74 
75 STATISTIC(NumSimplified, "Number of library calls simplified");
76 
78  "instcombine-guard-widening-window",
79  cl::init(3),
80  cl::desc("How wide an instruction window to bypass looking for "
81  "another guard"));
82 
83 /// Return the specified type promoted as it would be to pass though a va_arg
84 /// area.
85 static Type *getPromotedType(Type *Ty) {
86  if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
87  if (ITy->getBitWidth() < 32)
88  return Type::getInt32Ty(Ty->getContext());
89  }
90  return Ty;
91 }
92 
93 /// Return a constant boolean vector that has true elements in all positions
94 /// where the input constant data vector has an element with the sign bit set.
97  IntegerType *BoolTy = Type::getInt1Ty(V->getContext());
98  for (unsigned I = 0, E = V->getNumElements(); I != E; ++I) {
99  Constant *Elt = V->getElementAsConstant(I);
100  assert((isa<ConstantInt>(Elt) || isa<ConstantFP>(Elt)) &&
101  "Unexpected constant data vector element type");
102  bool Sign = V->getElementType()->isIntegerTy()
103  ? cast<ConstantInt>(Elt)->isNegative()
104  : cast<ConstantFP>(Elt)->isNegative();
105  BoolVec.push_back(ConstantInt::get(BoolTy, Sign));
106  }
107  return ConstantVector::get(BoolVec);
108 }
109 
110 Instruction *InstCombiner::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
111  unsigned DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT);
112  unsigned CopyDstAlign = MI->getDestAlignment();
113  if (CopyDstAlign < DstAlign){
114  MI->setDestAlignment(DstAlign);
115  return MI;
116  }
117 
118  unsigned SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT);
119  unsigned CopySrcAlign = MI->getSourceAlignment();
120  if (CopySrcAlign < SrcAlign) {
121  MI->setSourceAlignment(SrcAlign);
122  return MI;
123  }
124 
125  // If we have a store to a location which is known constant, we can conclude
126  // that the store must be storing the constant value (else the memory
127  // wouldn't be constant), and this must be a noop.
128  if (AA->pointsToConstantMemory(MI->getDest())) {
129  // Set the size of the copy to 0, it will be deleted on the next iteration.
130  MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
131  return MI;
132  }
133 
134  // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
135  // load/store.
136  ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getLength());
137  if (!MemOpLength) return nullptr;
138 
139  // Source and destination pointer types are always "i8*" for intrinsic. See
140  // if the size is something we can handle with a single primitive load/store.
141  // A single load+store correctly handles overlapping memory in the memmove
142  // case.
143  uint64_t Size = MemOpLength->getLimitedValue();
144  assert(Size && "0-sized memory transferring should be removed already.");
145 
146  if (Size > 8 || (Size&(Size-1)))
147  return nullptr; // If not 1/2/4/8 bytes, exit.
148 
149  // If it is an atomic and alignment is less than the size then we will
150  // introduce the unaligned memory access which will be later transformed
151  // into libcall in CodeGen. This is not evident performance gain so disable
152  // it now.
153  if (isa<AtomicMemTransferInst>(MI))
154  if (CopyDstAlign < Size || CopySrcAlign < Size)
155  return nullptr;
156 
157  // Use an integer load+store unless we can find something better.
158  unsigned SrcAddrSp =
159  cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
160  unsigned DstAddrSp =
161  cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
162 
163  IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
164  Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
165  Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
166 
167  // If the memcpy has metadata describing the members, see if we can get the
168  // TBAA tag describing our copy.
169  MDNode *CopyMD = nullptr;
170  if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa)) {
171  CopyMD = M;
172  } else if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
173  if (M->getNumOperands() == 3 && M->getOperand(0) &&
174  mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
175  mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() &&
176  M->getOperand(1) &&
177  mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
178  mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
179  Size &&
180  M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
181  CopyMD = cast<MDNode>(M->getOperand(2));
182  }
183 
184  Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
185  Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
186  LoadInst *L = Builder.CreateLoad(IntType, Src);
187  // Alignment from the mem intrinsic will be better, so use it.
188  L->setAlignment(
189  MaybeAlign(CopySrcAlign)); // FIXME: Check if we can use Align instead.
190  if (CopyMD)
191  L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
192  MDNode *LoopMemParallelMD =
193  MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
194  if (LoopMemParallelMD)
195  L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
196  MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group);
197  if (AccessGroupMD)
198  L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
199 
200  StoreInst *S = Builder.CreateStore(L, Dest);
201  // Alignment from the mem intrinsic will be better, so use it.
202  S->setAlignment(
203  MaybeAlign(CopyDstAlign)); // FIXME: Check if we can use Align instead.
204  if (CopyMD)
205  S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
206  if (LoopMemParallelMD)
207  S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
208  if (AccessGroupMD)
209  S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
210 
211  if (auto *MT = dyn_cast<MemTransferInst>(MI)) {
212  // non-atomics can be volatile
213  L->setVolatile(MT->isVolatile());
214  S->setVolatile(MT->isVolatile());
215  }
216  if (isa<AtomicMemTransferInst>(MI)) {
217  // atomics have to be unordered
220  }
221 
222  // Set the size of the copy to 0, it will be deleted on the next iteration.
223  MI->setLength(Constant::getNullValue(MemOpLength->getType()));
224  return MI;
225 }
226 
227 Instruction *InstCombiner::SimplifyAnyMemSet(AnyMemSetInst *MI) {
228  const unsigned KnownAlignment =
229  getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT);
230  if (MI->getDestAlignment() < KnownAlignment) {
231  MI->setDestAlignment(KnownAlignment);
232  return MI;
233  }
234 
235  // If we have a store to a location which is known constant, we can conclude
236  // that the store must be storing the constant value (else the memory
237  // wouldn't be constant), and this must be a noop.
238  if (AA->pointsToConstantMemory(MI->getDest())) {
239  // Set the size of the copy to 0, it will be deleted on the next iteration.
240  MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
241  return MI;
242  }
243 
244  // Extract the length and alignment and fill if they are constant.
245  ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
246  ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
247  if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
248  return nullptr;
249  const uint64_t Len = LenC->getLimitedValue();
250  assert(Len && "0-sized memory setting should be removed already.");
251  const Align Alignment = assumeAligned(MI->getDestAlignment());
252 
253  // If it is an atomic and alignment is less than the size then we will
254  // introduce the unaligned memory access which will be later transformed
255  // into libcall in CodeGen. This is not evident performance gain so disable
256  // it now.
257  if (isa<AtomicMemSetInst>(MI))
258  if (Alignment < Len)
259  return nullptr;
260 
261  // memset(s,c,n) -> store s, c (for n=1,2,4,8)
262  if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
263  Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
264 
265  Value *Dest = MI->getDest();
266  unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
267  Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
268  Dest = Builder.CreateBitCast(Dest, NewDstPtrTy);
269 
270  // Extract the fill value and store.
271  uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
272  StoreInst *S = Builder.CreateStore(ConstantInt::get(ITy, Fill), Dest,
273  MI->isVolatile());
274  S->setAlignment(Alignment);
275  if (isa<AtomicMemSetInst>(MI))
277 
278  // Set the size of the copy to 0, it will be deleted on the next iteration.
279  MI->setLength(Constant::getNullValue(LenC->getType()));
280  return MI;
281  }
282 
283  return nullptr;
284 }
285 
287  InstCombiner::BuilderTy &Builder) {
288  bool LogicalShift = false;
289  bool ShiftLeft = false;
290 
291  switch (II.getIntrinsicID()) {
292  default: llvm_unreachable("Unexpected intrinsic!");
293  case Intrinsic::x86_sse2_psra_d:
294  case Intrinsic::x86_sse2_psra_w:
295  case Intrinsic::x86_sse2_psrai_d:
296  case Intrinsic::x86_sse2_psrai_w:
297  case Intrinsic::x86_avx2_psra_d:
298  case Intrinsic::x86_avx2_psra_w:
299  case Intrinsic::x86_avx2_psrai_d:
300  case Intrinsic::x86_avx2_psrai_w:
301  case Intrinsic::x86_avx512_psra_q_128:
302  case Intrinsic::x86_avx512_psrai_q_128:
303  case Intrinsic::x86_avx512_psra_q_256:
304  case Intrinsic::x86_avx512_psrai_q_256:
305  case Intrinsic::x86_avx512_psra_d_512:
306  case Intrinsic::x86_avx512_psra_q_512:
307  case Intrinsic::x86_avx512_psra_w_512:
308  case Intrinsic::x86_avx512_psrai_d_512:
309  case Intrinsic::x86_avx512_psrai_q_512:
310  case Intrinsic::x86_avx512_psrai_w_512:
311  LogicalShift = false; ShiftLeft = false;
312  break;
313  case Intrinsic::x86_sse2_psrl_d:
314  case Intrinsic::x86_sse2_psrl_q:
315  case Intrinsic::x86_sse2_psrl_w:
316  case Intrinsic::x86_sse2_psrli_d:
317  case Intrinsic::x86_sse2_psrli_q:
318  case Intrinsic::x86_sse2_psrli_w:
319  case Intrinsic::x86_avx2_psrl_d:
320  case Intrinsic::x86_avx2_psrl_q:
321  case Intrinsic::x86_avx2_psrl_w:
322  case Intrinsic::x86_avx2_psrli_d:
323  case Intrinsic::x86_avx2_psrli_q:
324  case Intrinsic::x86_avx2_psrli_w:
325  case Intrinsic::x86_avx512_psrl_d_512:
326  case Intrinsic::x86_avx512_psrl_q_512:
327  case Intrinsic::x86_avx512_psrl_w_512:
328  case Intrinsic::x86_avx512_psrli_d_512:
329  case Intrinsic::x86_avx512_psrli_q_512:
330  case Intrinsic::x86_avx512_psrli_w_512:
331  LogicalShift = true; ShiftLeft = false;
332  break;
333  case Intrinsic::x86_sse2_psll_d:
334  case Intrinsic::x86_sse2_psll_q:
335  case Intrinsic::x86_sse2_psll_w:
336  case Intrinsic::x86_sse2_pslli_d:
337  case Intrinsic::x86_sse2_pslli_q:
338  case Intrinsic::x86_sse2_pslli_w:
339  case Intrinsic::x86_avx2_psll_d:
340  case Intrinsic::x86_avx2_psll_q:
341  case Intrinsic::x86_avx2_psll_w:
342  case Intrinsic::x86_avx2_pslli_d:
343  case Intrinsic::x86_avx2_pslli_q:
344  case Intrinsic::x86_avx2_pslli_w:
345  case Intrinsic::x86_avx512_psll_d_512:
346  case Intrinsic::x86_avx512_psll_q_512:
347  case Intrinsic::x86_avx512_psll_w_512:
348  case Intrinsic::x86_avx512_pslli_d_512:
349  case Intrinsic::x86_avx512_pslli_q_512:
350  case Intrinsic::x86_avx512_pslli_w_512:
351  LogicalShift = true; ShiftLeft = true;
352  break;
353  }
354  assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
355 
356  // Simplify if count is constant.
357  auto Arg1 = II.getArgOperand(1);
358  auto CAZ = dyn_cast<ConstantAggregateZero>(Arg1);
359  auto CDV = dyn_cast<ConstantDataVector>(Arg1);
360  auto CInt = dyn_cast<ConstantInt>(Arg1);
361  if (!CAZ && !CDV && !CInt)
362  return nullptr;
363 
364  APInt Count(64, 0);
365  if (CDV) {
366  // SSE2/AVX2 uses all the first 64-bits of the 128-bit vector
367  // operand to compute the shift amount.
368  auto VT = cast<VectorType>(CDV->getType());
369  unsigned BitWidth = VT->getElementType()->getPrimitiveSizeInBits();
370  assert((64 % BitWidth) == 0 && "Unexpected packed shift size");
371  unsigned NumSubElts = 64 / BitWidth;
372 
373  // Concatenate the sub-elements to create the 64-bit value.
374  for (unsigned i = 0; i != NumSubElts; ++i) {
375  unsigned SubEltIdx = (NumSubElts - 1) - i;
376  auto SubElt = cast<ConstantInt>(CDV->getElementAsConstant(SubEltIdx));
377  Count <<= BitWidth;
378  Count |= SubElt->getValue().zextOrTrunc(64);
379  }
380  }
381  else if (CInt)
382  Count = CInt->getValue();
383 
384  auto Vec = II.getArgOperand(0);
385  auto VT = cast<VectorType>(Vec->getType());
386  auto SVT = VT->getElementType();
387  unsigned VWidth = VT->getNumElements();
388  unsigned BitWidth = SVT->getPrimitiveSizeInBits();
389 
390  // If shift-by-zero then just return the original value.
391  if (Count.isNullValue())
392  return Vec;
393 
394  // Handle cases when Shift >= BitWidth.
395  if (Count.uge(BitWidth)) {
396  // If LogicalShift - just return zero.
397  if (LogicalShift)
398  return ConstantAggregateZero::get(VT);
399 
400  // If ArithmeticShift - clamp Shift to (BitWidth - 1).
401  Count = APInt(64, BitWidth - 1);
402  }
403 
404  // Get a constant vector of the same type as the first operand.
405  auto ShiftAmt = ConstantInt::get(SVT, Count.zextOrTrunc(BitWidth));
406  auto ShiftVec = Builder.CreateVectorSplat(VWidth, ShiftAmt);
407 
408  if (ShiftLeft)
409  return Builder.CreateShl(Vec, ShiftVec);
410 
411  if (LogicalShift)
412  return Builder.CreateLShr(Vec, ShiftVec);
413 
414  return Builder.CreateAShr(Vec, ShiftVec);
415 }
416 
417 // Attempt to simplify AVX2 per-element shift intrinsics to a generic IR shift.
418 // Unlike the generic IR shifts, the intrinsics have defined behaviour for out
419 // of range shift amounts (logical - set to zero, arithmetic - splat sign bit).
421  InstCombiner::BuilderTy &Builder) {
422  bool LogicalShift = false;
423  bool ShiftLeft = false;
424 
425  switch (II.getIntrinsicID()) {
426  default: llvm_unreachable("Unexpected intrinsic!");
427  case Intrinsic::x86_avx2_psrav_d:
428  case Intrinsic::x86_avx2_psrav_d_256:
429  case Intrinsic::x86_avx512_psrav_q_128:
430  case Intrinsic::x86_avx512_psrav_q_256:
431  case Intrinsic::x86_avx512_psrav_d_512:
432  case Intrinsic::x86_avx512_psrav_q_512:
433  case Intrinsic::x86_avx512_psrav_w_128:
434  case Intrinsic::x86_avx512_psrav_w_256:
435  case Intrinsic::x86_avx512_psrav_w_512:
436  LogicalShift = false;
437  ShiftLeft = false;
438  break;
439  case Intrinsic::x86_avx2_psrlv_d:
440  case Intrinsic::x86_avx2_psrlv_d_256:
441  case Intrinsic::x86_avx2_psrlv_q:
442  case Intrinsic::x86_avx2_psrlv_q_256:
443  case Intrinsic::x86_avx512_psrlv_d_512:
444  case Intrinsic::x86_avx512_psrlv_q_512:
445  case Intrinsic::x86_avx512_psrlv_w_128:
446  case Intrinsic::x86_avx512_psrlv_w_256:
447  case Intrinsic::x86_avx512_psrlv_w_512:
448  LogicalShift = true;
449  ShiftLeft = false;
450  break;
451  case Intrinsic::x86_avx2_psllv_d:
452  case Intrinsic::x86_avx2_psllv_d_256:
453  case Intrinsic::x86_avx2_psllv_q:
454  case Intrinsic::x86_avx2_psllv_q_256:
455  case Intrinsic::x86_avx512_psllv_d_512:
456  case Intrinsic::x86_avx512_psllv_q_512:
457  case Intrinsic::x86_avx512_psllv_w_128:
458  case Intrinsic::x86_avx512_psllv_w_256:
459  case Intrinsic::x86_avx512_psllv_w_512:
460  LogicalShift = true;
461  ShiftLeft = true;
462  break;
463  }
464  assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
465 
466  // Simplify if all shift amounts are constant/undef.
467  auto *CShift = dyn_cast<Constant>(II.getArgOperand(1));
468  if (!CShift)
469  return nullptr;
470 
471  auto Vec = II.getArgOperand(0);
472  auto VT = cast<VectorType>(II.getType());
473  auto SVT = VT->getVectorElementType();
474  int NumElts = VT->getNumElements();
475  int BitWidth = SVT->getIntegerBitWidth();
476 
477  // Collect each element's shift amount.
478  // We also collect special cases: UNDEF = -1, OUT-OF-RANGE = BitWidth.
479  bool AnyOutOfRange = false;
480  SmallVector<int, 8> ShiftAmts;
481  for (int I = 0; I < NumElts; ++I) {
482  auto *CElt = CShift->getAggregateElement(I);
483  if (CElt && isa<UndefValue>(CElt)) {
484  ShiftAmts.push_back(-1);
485  continue;
486  }
487 
488  auto *COp = dyn_cast_or_null<ConstantInt>(CElt);
489  if (!COp)
490  return nullptr;
491 
492  // Handle out of range shifts.
493  // If LogicalShift - set to BitWidth (special case).
494  // If ArithmeticShift - set to (BitWidth - 1) (sign splat).
495  APInt ShiftVal = COp->getValue();
496  if (ShiftVal.uge(BitWidth)) {
497  AnyOutOfRange = LogicalShift;
498  ShiftAmts.push_back(LogicalShift ? BitWidth : BitWidth - 1);
499  continue;
500  }
501 
502  ShiftAmts.push_back((int)ShiftVal.getZExtValue());
503  }
504 
505  // If all elements out of range or UNDEF, return vector of zeros/undefs.
506  // ArithmeticShift should only hit this if they are all UNDEF.
507  auto OutOfRange = [&](int Idx) { return (Idx < 0) || (BitWidth <= Idx); };
508  if (llvm::all_of(ShiftAmts, OutOfRange)) {
509  SmallVector<Constant *, 8> ConstantVec;
510  for (int Idx : ShiftAmts) {
511  if (Idx < 0) {
512  ConstantVec.push_back(UndefValue::get(SVT));
513  } else {
514  assert(LogicalShift && "Logical shift expected");
515  ConstantVec.push_back(ConstantInt::getNullValue(SVT));
516  }
517  }
518  return ConstantVector::get(ConstantVec);
519  }
520 
521  // We can't handle only some out of range values with generic logical shifts.
522  if (AnyOutOfRange)
523  return nullptr;
524 
525  // Build the shift amount constant vector.
526  SmallVector<Constant *, 8> ShiftVecAmts;
527  for (int Idx : ShiftAmts) {
528  if (Idx < 0)
529  ShiftVecAmts.push_back(UndefValue::get(SVT));
530  else
531  ShiftVecAmts.push_back(ConstantInt::get(SVT, Idx));
532  }
533  auto ShiftVec = ConstantVector::get(ShiftVecAmts);
534 
535  if (ShiftLeft)
536  return Builder.CreateShl(Vec, ShiftVec);
537 
538  if (LogicalShift)
539  return Builder.CreateLShr(Vec, ShiftVec);
540 
541  return Builder.CreateAShr(Vec, ShiftVec);
542 }
543 
545  InstCombiner::BuilderTy &Builder, bool IsSigned) {
546  Value *Arg0 = II.getArgOperand(0);
547  Value *Arg1 = II.getArgOperand(1);
548  Type *ResTy = II.getType();
549 
550  // Fast all undef handling.
551  if (isa<UndefValue>(Arg0) && isa<UndefValue>(Arg1))
552  return UndefValue::get(ResTy);
553 
554  Type *ArgTy = Arg0->getType();
555  unsigned NumLanes = ResTy->getPrimitiveSizeInBits() / 128;
556  unsigned NumSrcElts = ArgTy->getVectorNumElements();
557  assert(ResTy->getVectorNumElements() == (2 * NumSrcElts) &&
558  "Unexpected packing types");
559 
560  unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
561  unsigned DstScalarSizeInBits = ResTy->getScalarSizeInBits();
562  unsigned SrcScalarSizeInBits = ArgTy->getScalarSizeInBits();
563  assert(SrcScalarSizeInBits == (2 * DstScalarSizeInBits) &&
564  "Unexpected packing types");
565 
566  // Constant folding.
567  if (!isa<Constant>(Arg0) || !isa<Constant>(Arg1))
568  return nullptr;
569 
570  // Clamp Values - signed/unsigned both use signed clamp values, but they
571  // differ on the min/max values.
572  APInt MinValue, MaxValue;
573  if (IsSigned) {
574  // PACKSS: Truncate signed value with signed saturation.
575  // Source values less than dst minint are saturated to minint.
576  // Source values greater than dst maxint are saturated to maxint.
577  MinValue =
578  APInt::getSignedMinValue(DstScalarSizeInBits).sext(SrcScalarSizeInBits);
579  MaxValue =
580  APInt::getSignedMaxValue(DstScalarSizeInBits).sext(SrcScalarSizeInBits);
581  } else {
582  // PACKUS: Truncate signed value with unsigned saturation.
583  // Source values less than zero are saturated to zero.
584  // Source values greater than dst maxuint are saturated to maxuint.
585  MinValue = APInt::getNullValue(SrcScalarSizeInBits);
586  MaxValue = APInt::getLowBitsSet(SrcScalarSizeInBits, DstScalarSizeInBits);
587  }
588 
589  auto *MinC = Constant::getIntegerValue(ArgTy, MinValue);
590  auto *MaxC = Constant::getIntegerValue(ArgTy, MaxValue);
591  Arg0 = Builder.CreateSelect(Builder.CreateICmpSLT(Arg0, MinC), MinC, Arg0);
592  Arg1 = Builder.CreateSelect(Builder.CreateICmpSLT(Arg1, MinC), MinC, Arg1);
593  Arg0 = Builder.CreateSelect(Builder.CreateICmpSGT(Arg0, MaxC), MaxC, Arg0);
594  Arg1 = Builder.CreateSelect(Builder.CreateICmpSGT(Arg1, MaxC), MaxC, Arg1);
595 
596  // Shuffle clamped args together at the lane level.
597  SmallVector<unsigned, 32> PackMask;
598  for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
599  for (unsigned Elt = 0; Elt != NumSrcEltsPerLane; ++Elt)
600  PackMask.push_back(Elt + (Lane * NumSrcEltsPerLane));
601  for (unsigned Elt = 0; Elt != NumSrcEltsPerLane; ++Elt)
602  PackMask.push_back(Elt + (Lane * NumSrcEltsPerLane) + NumSrcElts);
603  }
604  auto *Shuffle = Builder.CreateShuffleVector(Arg0, Arg1, PackMask);
605 
606  // Truncate to dst size.
607  return Builder.CreateTrunc(Shuffle, ResTy);
608 }
609 
611  InstCombiner::BuilderTy &Builder) {
612  Value *Arg = II.getArgOperand(0);
613  Type *ResTy = II.getType();
614  Type *ArgTy = Arg->getType();
615 
616  // movmsk(undef) -> zero as we must ensure the upper bits are zero.
617  if (isa<UndefValue>(Arg))
618  return Constant::getNullValue(ResTy);
619 
620  // We can't easily peek through x86_mmx types.
621  if (!ArgTy->isVectorTy())
622  return nullptr;
623 
624  // Expand MOVMSK to compare/bitcast/zext:
625  // e.g. PMOVMSKB(v16i8 x):
626  // %cmp = icmp slt <16 x i8> %x, zeroinitializer
627  // %int = bitcast <16 x i1> %cmp to i16
628  // %res = zext i16 %int to i32
629  unsigned NumElts = ArgTy->getVectorNumElements();
630  Type *IntegerVecTy = VectorType::getInteger(cast<VectorType>(ArgTy));
631  Type *IntegerTy = Builder.getIntNTy(NumElts);
632 
633  Value *Res = Builder.CreateBitCast(Arg, IntegerVecTy);
634  Res = Builder.CreateICmpSLT(Res, Constant::getNullValue(IntegerVecTy));
635  Res = Builder.CreateBitCast(Res, IntegerTy);
636  Res = Builder.CreateZExtOrTrunc(Res, ResTy);
637  return Res;
638 }
639 
641  InstCombiner::BuilderTy &Builder) {
642  Value *CarryIn = II.getArgOperand(0);
643  Value *Op1 = II.getArgOperand(1);
644  Value *Op2 = II.getArgOperand(2);
645  Type *RetTy = II.getType();
646  Type *OpTy = Op1->getType();
647  assert(RetTy->getStructElementType(0)->isIntegerTy(8) &&
648  RetTy->getStructElementType(1) == OpTy && OpTy == Op2->getType() &&
649  "Unexpected types for x86 addcarry");
650 
651  // If carry-in is zero, this is just an unsigned add with overflow.
652  if (match(CarryIn, m_ZeroInt())) {
653  Value *UAdd = Builder.CreateIntrinsic(Intrinsic::uadd_with_overflow, OpTy,
654  { Op1, Op2 });
655  // The types have to be adjusted to match the x86 call types.
656  Value *UAddResult = Builder.CreateExtractValue(UAdd, 0);
657  Value *UAddOV = Builder.CreateZExt(Builder.CreateExtractValue(UAdd, 1),
658  Builder.getInt8Ty());
659  Value *Res = UndefValue::get(RetTy);
660  Res = Builder.CreateInsertValue(Res, UAddOV, 0);
661  return Builder.CreateInsertValue(Res, UAddResult, 1);
662  }
663 
664  return nullptr;
665 }
666 
668  InstCombiner::BuilderTy &Builder) {
669  auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2));
670  if (!CInt)
671  return nullptr;
672 
673  VectorType *VecTy = cast<VectorType>(II.getType());
674  assert(VecTy->getNumElements() == 4 && "insertps with wrong vector type");
675 
676  // The immediate permute control byte looks like this:
677  // [3:0] - zero mask for each 32-bit lane
678  // [5:4] - select one 32-bit destination lane
679  // [7:6] - select one 32-bit source lane
680 
681  uint8_t Imm = CInt->getZExtValue();
682  uint8_t ZMask = Imm & 0xf;
683  uint8_t DestLane = (Imm >> 4) & 0x3;
684  uint8_t SourceLane = (Imm >> 6) & 0x3;
685 
687 
688  // If all zero mask bits are set, this was just a weird way to
689  // generate a zero vector.
690  if (ZMask == 0xf)
691  return ZeroVector;
692 
693  // Initialize by passing all of the first source bits through.
694  uint32_t ShuffleMask[4] = { 0, 1, 2, 3 };
695 
696  // We may replace the second operand with the zero vector.
697  Value *V1 = II.getArgOperand(1);
698 
699  if (ZMask) {
700  // If the zero mask is being used with a single input or the zero mask
701  // overrides the destination lane, this is a shuffle with the zero vector.
702  if ((II.getArgOperand(0) == II.getArgOperand(1)) ||
703  (ZMask & (1 << DestLane))) {
704  V1 = ZeroVector;
705  // We may still move 32-bits of the first source vector from one lane
706  // to another.
707  ShuffleMask[DestLane] = SourceLane;
708  // The zero mask may override the previous insert operation.
709  for (unsigned i = 0; i < 4; ++i)
710  if ((ZMask >> i) & 0x1)
711  ShuffleMask[i] = i + 4;
712  } else {
713  // TODO: Model this case as 2 shuffles or a 'logical and' plus shuffle?
714  return nullptr;
715  }
716  } else {
717  // Replace the selected destination lane with the selected source lane.
718  ShuffleMask[DestLane] = SourceLane + 4;
719  }
720 
721  return Builder.CreateShuffleVector(II.getArgOperand(0), V1, ShuffleMask);
722 }
723 
724 /// Attempt to simplify SSE4A EXTRQ/EXTRQI instructions using constant folding
725 /// or conversion to a shuffle vector.
727  ConstantInt *CILength, ConstantInt *CIIndex,
728  InstCombiner::BuilderTy &Builder) {
729  auto LowConstantHighUndef = [&](uint64_t Val) {
730  Type *IntTy64 = Type::getInt64Ty(II.getContext());
731  Constant *Args[] = {ConstantInt::get(IntTy64, Val),
732  UndefValue::get(IntTy64)};
733  return ConstantVector::get(Args);
734  };
735 
736  // See if we're dealing with constant values.
737  Constant *C0 = dyn_cast<Constant>(Op0);
738  ConstantInt *CI0 =
739  C0 ? dyn_cast_or_null<ConstantInt>(C0->getAggregateElement((unsigned)0))
740  : nullptr;
741 
742  // Attempt to constant fold.
743  if (CILength && CIIndex) {
744  // From AMD documentation: "The bit index and field length are each six
745  // bits in length other bits of the field are ignored."
746  APInt APIndex = CIIndex->getValue().zextOrTrunc(6);
747  APInt APLength = CILength->getValue().zextOrTrunc(6);
748 
749  unsigned Index = APIndex.getZExtValue();
750 
751  // From AMD documentation: "a value of zero in the field length is
752  // defined as length of 64".
753  unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
754 
755  // From AMD documentation: "If the sum of the bit index + length field
756  // is greater than 64, the results are undefined".
757  unsigned End = Index + Length;
758 
759  // Note that both field index and field length are 8-bit quantities.
760  // Since variables 'Index' and 'Length' are unsigned values
761  // obtained from zero-extending field index and field length
762  // respectively, their sum should never wrap around.
763  if (End > 64)
764  return UndefValue::get(II.getType());
765 
766  // If we are inserting whole bytes, we can convert this to a shuffle.
767  // Lowering can recognize EXTRQI shuffle masks.
768  if ((Length % 8) == 0 && (Index % 8) == 0) {
769  // Convert bit indices to byte indices.
770  Length /= 8;
771  Index /= 8;
772 
773  Type *IntTy8 = Type::getInt8Ty(II.getContext());
774  Type *IntTy32 = Type::getInt32Ty(II.getContext());
775  VectorType *ShufTy = VectorType::get(IntTy8, 16);
776 
777  SmallVector<Constant *, 16> ShuffleMask;
778  for (int i = 0; i != (int)Length; ++i)
779  ShuffleMask.push_back(
780  Constant::getIntegerValue(IntTy32, APInt(32, i + Index)));
781  for (int i = Length; i != 8; ++i)
782  ShuffleMask.push_back(
783  Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
784  for (int i = 8; i != 16; ++i)
785  ShuffleMask.push_back(UndefValue::get(IntTy32));
786 
787  Value *SV = Builder.CreateShuffleVector(
788  Builder.CreateBitCast(Op0, ShufTy),
789  ConstantAggregateZero::get(ShufTy), ConstantVector::get(ShuffleMask));
790  return Builder.CreateBitCast(SV, II.getType());
791  }
792 
793  // Constant Fold - shift Index'th bit to lowest position and mask off
794  // Length bits.
795  if (CI0) {
796  APInt Elt = CI0->getValue();
797  Elt.lshrInPlace(Index);
798  Elt = Elt.zextOrTrunc(Length);
799  return LowConstantHighUndef(Elt.getZExtValue());
800  }
801 
802  // If we were an EXTRQ call, we'll save registers if we convert to EXTRQI.
803  if (II.getIntrinsicID() == Intrinsic::x86_sse4a_extrq) {
804  Value *Args[] = {Op0, CILength, CIIndex};
805  Module *M = II.getModule();
806  Function *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_extrqi);
807  return Builder.CreateCall(F, Args);
808  }
809  }
810 
811  // Constant Fold - extraction from zero is always {zero, undef}.
812  if (CI0 && CI0->isZero())
813  return LowConstantHighUndef(0);
814 
815  return nullptr;
816 }
817 
818 /// Attempt to simplify SSE4A INSERTQ/INSERTQI instructions using constant
819 /// folding or conversion to a shuffle vector.
821  APInt APLength, APInt APIndex,
822  InstCombiner::BuilderTy &Builder) {
823  // From AMD documentation: "The bit index and field length are each six bits
824  // in length other bits of the field are ignored."
825  APIndex = APIndex.zextOrTrunc(6);
826  APLength = APLength.zextOrTrunc(6);
827 
828  // Attempt to constant fold.
829  unsigned Index = APIndex.getZExtValue();
830 
831  // From AMD documentation: "a value of zero in the field length is
832  // defined as length of 64".
833  unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
834 
835  // From AMD documentation: "If the sum of the bit index + length field
836  // is greater than 64, the results are undefined".
837  unsigned End = Index + Length;
838 
839  // Note that both field index and field length are 8-bit quantities.
840  // Since variables 'Index' and 'Length' are unsigned values
841  // obtained from zero-extending field index and field length
842  // respectively, their sum should never wrap around.
843  if (End > 64)
844  return UndefValue::get(II.getType());
845 
846  // If we are inserting whole bytes, we can convert this to a shuffle.
847  // Lowering can recognize INSERTQI shuffle masks.
848  if ((Length % 8) == 0 && (Index % 8) == 0) {
849  // Convert bit indices to byte indices.
850  Length /= 8;
851  Index /= 8;
852 
853  Type *IntTy8 = Type::getInt8Ty(II.getContext());
854  Type *IntTy32 = Type::getInt32Ty(II.getContext());
855  VectorType *ShufTy = VectorType::get(IntTy8, 16);
856 
857  SmallVector<Constant *, 16> ShuffleMask;
858  for (int i = 0; i != (int)Index; ++i)
859  ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
860  for (int i = 0; i != (int)Length; ++i)
861  ShuffleMask.push_back(
862  Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
863  for (int i = Index + Length; i != 8; ++i)
864  ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
865  for (int i = 8; i != 16; ++i)
866  ShuffleMask.push_back(UndefValue::get(IntTy32));
867 
868  Value *SV = Builder.CreateShuffleVector(Builder.CreateBitCast(Op0, ShufTy),
869  Builder.CreateBitCast(Op1, ShufTy),
870  ConstantVector::get(ShuffleMask));
871  return Builder.CreateBitCast(SV, II.getType());
872  }
873 
874  // See if we're dealing with constant values.
875  Constant *C0 = dyn_cast<Constant>(Op0);
876  Constant *C1 = dyn_cast<Constant>(Op1);
877  ConstantInt *CI00 =
878  C0 ? dyn_cast_or_null<ConstantInt>(C0->getAggregateElement((unsigned)0))
879  : nullptr;
880  ConstantInt *CI10 =
881  C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)0))
882  : nullptr;
883 
884  // Constant Fold - insert bottom Length bits starting at the Index'th bit.
885  if (CI00 && CI10) {
886  APInt V00 = CI00->getValue();
887  APInt V10 = CI10->getValue();
888  APInt Mask = APInt::getLowBitsSet(64, Length).shl(Index);
889  V00 = V00 & ~Mask;
890  V10 = V10.zextOrTrunc(Length).zextOrTrunc(64).shl(Index);
891  APInt Val = V00 | V10;
892  Type *IntTy64 = Type::getInt64Ty(II.getContext());
893  Constant *Args[] = {ConstantInt::get(IntTy64, Val.getZExtValue()),
894  UndefValue::get(IntTy64)};
895  return ConstantVector::get(Args);
896  }
897 
898  // If we were an INSERTQ call, we'll save demanded elements if we convert to
899  // INSERTQI.
900  if (II.getIntrinsicID() == Intrinsic::x86_sse4a_insertq) {
901  Type *IntTy8 = Type::getInt8Ty(II.getContext());
902  Constant *CILength = ConstantInt::get(IntTy8, Length, false);
903  Constant *CIIndex = ConstantInt::get(IntTy8, Index, false);
904 
905  Value *Args[] = {Op0, Op1, CILength, CIIndex};
906  Module *M = II.getModule();
907  Function *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_insertqi);
908  return Builder.CreateCall(F, Args);
909  }
910 
911  return nullptr;
912 }
913 
914 /// Attempt to convert pshufb* to shufflevector if the mask is constant.
916  InstCombiner::BuilderTy &Builder) {
918  if (!V)
919  return nullptr;
920 
921  auto *VecTy = cast<VectorType>(II.getType());
922  auto *MaskEltTy = Type::getInt32Ty(II.getContext());
923  unsigned NumElts = VecTy->getNumElements();
924  assert((NumElts == 16 || NumElts == 32 || NumElts == 64) &&
925  "Unexpected number of elements in shuffle mask!");
926 
927  // Construct a shuffle mask from constant integers or UNDEFs.
928  Constant *Indexes[64] = {nullptr};
929 
930  // Each byte in the shuffle control mask forms an index to permute the
931  // corresponding byte in the destination operand.
932  for (unsigned I = 0; I < NumElts; ++I) {
933  Constant *COp = V->getAggregateElement(I);
934  if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
935  return nullptr;
936 
937  if (isa<UndefValue>(COp)) {
938  Indexes[I] = UndefValue::get(MaskEltTy);
939  continue;
940  }
941 
942  int8_t Index = cast<ConstantInt>(COp)->getValue().getZExtValue();
943 
944  // If the most significant bit (bit[7]) of each byte of the shuffle
945  // control mask is set, then zero is written in the result byte.
946  // The zero vector is in the right-hand side of the resulting
947  // shufflevector.
948 
949  // The value of each index for the high 128-bit lane is the least
950  // significant 4 bits of the respective shuffle control byte.
951  Index = ((Index < 0) ? NumElts : Index & 0x0F) + (I & 0xF0);
952  Indexes[I] = ConstantInt::get(MaskEltTy, Index);
953  }
954 
955  auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, NumElts));
956  auto V1 = II.getArgOperand(0);
957  auto V2 = Constant::getNullValue(VecTy);
958  return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
959 }
960 
961 /// Attempt to convert vpermilvar* to shufflevector if the mask is constant.
963  InstCombiner::BuilderTy &Builder) {
965  if (!V)
966  return nullptr;
967 
968  auto *VecTy = cast<VectorType>(II.getType());
969  auto *MaskEltTy = Type::getInt32Ty(II.getContext());
970  unsigned NumElts = VecTy->getVectorNumElements();
971  bool IsPD = VecTy->getScalarType()->isDoubleTy();
972  unsigned NumLaneElts = IsPD ? 2 : 4;
973  assert(NumElts == 16 || NumElts == 8 || NumElts == 4 || NumElts == 2);
974 
975  // Construct a shuffle mask from constant integers or UNDEFs.
976  Constant *Indexes[16] = {nullptr};
977 
978  // The intrinsics only read one or two bits, clear the rest.
979  for (unsigned I = 0; I < NumElts; ++I) {
980  Constant *COp = V->getAggregateElement(I);
981  if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
982  return nullptr;
983 
984  if (isa<UndefValue>(COp)) {
985  Indexes[I] = UndefValue::get(MaskEltTy);
986  continue;
987  }
988 
989  APInt Index = cast<ConstantInt>(COp)->getValue();
990  Index = Index.zextOrTrunc(32).getLoBits(2);
991 
992  // The PD variants uses bit 1 to select per-lane element index, so
993  // shift down to convert to generic shuffle mask index.
994  if (IsPD)
995  Index.lshrInPlace(1);
996 
997  // The _256 variants are a bit trickier since the mask bits always index
998  // into the corresponding 128 half. In order to convert to a generic
999  // shuffle, we have to make that explicit.
1000  Index += APInt(32, (I / NumLaneElts) * NumLaneElts);
1001 
1002  Indexes[I] = ConstantInt::get(MaskEltTy, Index);
1003  }
1004 
1005  auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, NumElts));
1006  auto V1 = II.getArgOperand(0);
1007  auto V2 = UndefValue::get(V1->getType());
1008  return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1009 }
1010 
1011 /// Attempt to convert vpermd/vpermps to shufflevector if the mask is constant.
1013  InstCombiner::BuilderTy &Builder) {
1014  auto *V = dyn_cast<Constant>(II.getArgOperand(1));
1015  if (!V)
1016  return nullptr;
1017 
1018  auto *VecTy = cast<VectorType>(II.getType());
1019  auto *MaskEltTy = Type::getInt32Ty(II.getContext());
1020  unsigned Size = VecTy->getNumElements();
1021  assert((Size == 4 || Size == 8 || Size == 16 || Size == 32 || Size == 64) &&
1022  "Unexpected shuffle mask size");
1023 
1024  // Construct a shuffle mask from constant integers or UNDEFs.
1025  Constant *Indexes[64] = {nullptr};
1026 
1027  for (unsigned I = 0; I < Size; ++I) {
1028  Constant *COp = V->getAggregateElement(I);
1029  if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
1030  return nullptr;
1031 
1032  if (isa<UndefValue>(COp)) {
1033  Indexes[I] = UndefValue::get(MaskEltTy);
1034  continue;
1035  }
1036 
1037  uint32_t Index = cast<ConstantInt>(COp)->getZExtValue();
1038  Index &= Size - 1;
1039  Indexes[I] = ConstantInt::get(MaskEltTy, Index);
1040  }
1041 
1042  auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, Size));
1043  auto V1 = II.getArgOperand(0);
1044  auto V2 = UndefValue::get(VecTy);
1045  return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1046 }
1047 
1048 // TODO, Obvious Missing Transforms:
1049 // * Narrow width by halfs excluding zero/undef lanes
1050 Value *InstCombiner::simplifyMaskedLoad(IntrinsicInst &II) {
1051  Value *LoadPtr = II.getArgOperand(0);
1052  unsigned Alignment = cast<ConstantInt>(II.getArgOperand(1))->getZExtValue();
1053 
1054  // If the mask is all ones or undefs, this is a plain vector load of the 1st
1055  // argument.
1057  return Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
1058  "unmaskedload");
1059 
1060  // If we can unconditionally load from this address, replace with a
1061  // load/select idiom. TODO: use DT for context sensitive query
1062  if (isDereferenceableAndAlignedPointer(LoadPtr, II.getType(), Alignment,
1063  II.getModule()->getDataLayout(),
1064  &II, nullptr)) {
1065  Value *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
1066  "unmaskedload");
1067  return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3));
1068  }
1069 
1070  return nullptr;
1071 }
1072 
1073 // TODO, Obvious Missing Transforms:
1074 // * Single constant active lane -> store
1075 // * Narrow width by halfs excluding zero/undef lanes
1076 Instruction *InstCombiner::simplifyMaskedStore(IntrinsicInst &II) {
1077  auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
1078  if (!ConstMask)
1079  return nullptr;
1080 
1081  // If the mask is all zeros, this instruction does nothing.
1082  if (ConstMask->isNullValue())
1083  return eraseInstFromFunction(II);
1084 
1085  // If the mask is all ones, this is a plain vector store of the 1st argument.
1086  if (ConstMask->isAllOnesValue()) {
1087  Value *StorePtr = II.getArgOperand(1);
1088  unsigned Alignment = cast<ConstantInt>(II.getArgOperand(2))->getZExtValue();
1089  return new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment);
1090  }
1091 
1092  // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
1093  APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
1094  APInt UndefElts(DemandedElts.getBitWidth(), 0);
1095  if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0),
1096  DemandedElts, UndefElts)) {
1097  II.setOperand(0, V);
1098  return &II;
1099  }
1100 
1101  return nullptr;
1102 }
1103 
1104 // TODO, Obvious Missing Transforms:
1105 // * Single constant active lane load -> load
1106 // * Dereferenceable address & few lanes -> scalarize speculative load/selects
1107 // * Adjacent vector addresses -> masked.load
1108 // * Narrow width by halfs excluding zero/undef lanes
1109 // * Vector splat address w/known mask -> scalar load
1110 // * Vector incrementing address -> vector masked load
1111 Instruction *InstCombiner::simplifyMaskedGather(IntrinsicInst &II) {
1112  return nullptr;
1113 }
1114 
1115 // TODO, Obvious Missing Transforms:
1116 // * Single constant active lane -> store
1117 // * Adjacent vector addresses -> masked.store
1118 // * Narrow store width by halfs excluding zero/undef lanes
1119 // * Vector splat address w/known mask -> scalar store
1120 // * Vector incrementing address -> vector masked store
1121 Instruction *InstCombiner::simplifyMaskedScatter(IntrinsicInst &II) {
1122  auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
1123  if (!ConstMask)
1124  return nullptr;
1125 
1126  // If the mask is all zeros, a scatter does nothing.
1127  if (ConstMask->isNullValue())
1128  return eraseInstFromFunction(II);
1129 
1130  // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
1131  APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
1132  APInt UndefElts(DemandedElts.getBitWidth(), 0);
1133  if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0),
1134  DemandedElts, UndefElts)) {
1135  II.setOperand(0, V);
1136  return &II;
1137  }
1138  if (Value *V = SimplifyDemandedVectorElts(II.getOperand(1),
1139  DemandedElts, UndefElts)) {
1140  II.setOperand(1, V);
1141  return &II;
1142  }
1143 
1144  return nullptr;
1145 }
1146 
1147 /// This function transforms launder.invariant.group and strip.invariant.group
1148 /// like:
1149 /// launder(launder(%x)) -> launder(%x) (the result is not the argument)
1150 /// launder(strip(%x)) -> launder(%x)
1151 /// strip(strip(%x)) -> strip(%x) (the result is not the argument)
1152 /// strip(launder(%x)) -> strip(%x)
1153 /// This is legal because it preserves the most recent information about
1154 /// the presence or absence of invariant.group.
1156  InstCombiner &IC) {
1157  auto *Arg = II.getArgOperand(0);
1158  auto *StrippedArg = Arg->stripPointerCasts();
1159  auto *StrippedInvariantGroupsArg = Arg->stripPointerCastsAndInvariantGroups();
1160  if (StrippedArg == StrippedInvariantGroupsArg)
1161  return nullptr; // No launders/strips to remove.
1162 
1163  Value *Result = nullptr;
1164 
1165  if (II.getIntrinsicID() == Intrinsic::launder_invariant_group)
1166  Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg);
1167  else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group)
1168  Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg);
1169  else
1171  "simplifyInvariantGroupIntrinsic only handles launder and strip");
1172  if (Result->getType()->getPointerAddressSpace() !=
1174  Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType());
1175  if (Result->getType() != II.getType())
1176  Result = IC.Builder.CreateBitCast(Result, II.getType());
1177 
1178  return cast<Instruction>(Result);
1179 }
1180 
1182  assert((II.getIntrinsicID() == Intrinsic::cttz ||
1183  II.getIntrinsicID() == Intrinsic::ctlz) &&
1184  "Expected cttz or ctlz intrinsic");
1185  bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz;
1186  Value *Op0 = II.getArgOperand(0);
1187  Value *X;
1188  // ctlz(bitreverse(x)) -> cttz(x)
1189  // cttz(bitreverse(x)) -> ctlz(x)
1190  if (match(Op0, m_BitReverse(m_Value(X)))) {
1191  Intrinsic::ID ID = IsTZ ? Intrinsic::ctlz : Intrinsic::cttz;
1193  return CallInst::Create(F, {X, II.getArgOperand(1)});
1194  }
1195 
1196  if (IsTZ) {
1197  // cttz(-x) -> cttz(x)
1198  if (match(Op0, m_Neg(m_Value(X)))) {
1199  II.setOperand(0, X);
1200  return &II;
1201  }
1202 
1203  // cttz(abs(x)) -> cttz(x)
1204  // cttz(nabs(x)) -> cttz(x)
1205  Value *Y;
1207  if (SPF == SPF_ABS || SPF == SPF_NABS) {
1208  II.setOperand(0, X);
1209  return &II;
1210  }
1211  }
1212 
1213  KnownBits Known = IC.computeKnownBits(Op0, 0, &II);
1214 
1215  // Create a mask for bits above (ctlz) or below (cttz) the first known one.
1216  unsigned PossibleZeros = IsTZ ? Known.countMaxTrailingZeros()
1217  : Known.countMaxLeadingZeros();
1218  unsigned DefiniteZeros = IsTZ ? Known.countMinTrailingZeros()
1219  : Known.countMinLeadingZeros();
1220 
1221  // If all bits above (ctlz) or below (cttz) the first known one are known
1222  // zero, this value is constant.
1223  // FIXME: This should be in InstSimplify because we're replacing an
1224  // instruction with a constant.
1225  if (PossibleZeros == DefiniteZeros) {
1226  auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros);
1227  return IC.replaceInstUsesWith(II, C);
1228  }
1229 
1230  // If the input to cttz/ctlz is known to be non-zero,
1231  // then change the 'ZeroIsUndef' parameter to 'true'
1232  // because we know the zero behavior can't affect the result.
1233  if (!Known.One.isNullValue() ||
1234  isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II,
1235  &IC.getDominatorTree())) {
1236  if (!match(II.getArgOperand(1), m_One())) {
1237  II.setOperand(1, IC.Builder.getTrue());
1238  return &II;
1239  }
1240  }
1241 
1242  // Add range metadata since known bits can't completely reflect what we know.
1243  // TODO: Handle splat vectors.
1244  auto *IT = dyn_cast<IntegerType>(Op0->getType());
1245  if (IT && IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
1246  Metadata *LowAndHigh[] = {
1247  ConstantAsMetadata::get(ConstantInt::get(IT, DefiniteZeros)),
1248  ConstantAsMetadata::get(ConstantInt::get(IT, PossibleZeros + 1))};
1249  II.setMetadata(LLVMContext::MD_range,
1251  return &II;
1252  }
1253 
1254  return nullptr;
1255 }
1256 
1258  assert(II.getIntrinsicID() == Intrinsic::ctpop &&
1259  "Expected ctpop intrinsic");
1260  Value *Op0 = II.getArgOperand(0);
1261  Value *X;
1262  // ctpop(bitreverse(x)) -> ctpop(x)
1263  // ctpop(bswap(x)) -> ctpop(x)
1264  if (match(Op0, m_BitReverse(m_Value(X))) || match(Op0, m_BSwap(m_Value(X)))) {
1265  II.setOperand(0, X);
1266  return &II;
1267  }
1268 
1269  // FIXME: Try to simplify vectors of integers.
1270  auto *IT = dyn_cast<IntegerType>(Op0->getType());
1271  if (!IT)
1272  return nullptr;
1273 
1274  unsigned BitWidth = IT->getBitWidth();
1275  KnownBits Known(BitWidth);
1276  IC.computeKnownBits(Op0, Known, 0, &II);
1277 
1278  unsigned MinCount = Known.countMinPopulation();
1279  unsigned MaxCount = Known.countMaxPopulation();
1280 
1281  // Add range metadata since known bits can't completely reflect what we know.
1282  if (IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
1283  Metadata *LowAndHigh[] = {
1285  ConstantAsMetadata::get(ConstantInt::get(IT, MaxCount + 1))};
1286  II.setMetadata(LLVMContext::MD_range,
1288  return &II;
1289  }
1290 
1291  return nullptr;
1292 }
1293 
1294 // TODO: If the x86 backend knew how to convert a bool vector mask back to an
1295 // XMM register mask efficiently, we could transform all x86 masked intrinsics
1296 // to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
1298  Value *Ptr = II.getOperand(0);
1299  Value *Mask = II.getOperand(1);
1300  Constant *ZeroVec = Constant::getNullValue(II.getType());
1301 
1302  // Special case a zero mask since that's not a ConstantDataVector.
1303  // This masked load instruction creates a zero vector.
1304  if (isa<ConstantAggregateZero>(Mask))
1305  return IC.replaceInstUsesWith(II, ZeroVec);
1306 
1307  auto *ConstMask = dyn_cast<ConstantDataVector>(Mask);
1308  if (!ConstMask)
1309  return nullptr;
1310 
1311  // The mask is constant. Convert this x86 intrinsic to the LLVM instrinsic
1312  // to allow target-independent optimizations.
1313 
1314  // First, cast the x86 intrinsic scalar pointer to a vector pointer to match
1315  // the LLVM intrinsic definition for the pointer argument.
1316  unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
1317  PointerType *VecPtrTy = PointerType::get(II.getType(), AddrSpace);
1318  Value *PtrCast = IC.Builder.CreateBitCast(Ptr, VecPtrTy, "castvec");
1319 
1320  // Second, convert the x86 XMM integer vector mask to a vector of bools based
1321  // on each element's most significant bit (the sign bit).
1322  Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
1323 
1324  // The pass-through vector for an x86 masked load is a zero vector.
1325  CallInst *NewMaskedLoad =
1326  IC.Builder.CreateMaskedLoad(PtrCast, 1, BoolMask, ZeroVec);
1327  return IC.replaceInstUsesWith(II, NewMaskedLoad);
1328 }
1329 
1330 // TODO: If the x86 backend knew how to convert a bool vector mask back to an
1331 // XMM register mask efficiently, we could transform all x86 masked intrinsics
1332 // to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
1334  Value *Ptr = II.getOperand(0);
1335  Value *Mask = II.getOperand(1);
1336  Value *Vec = II.getOperand(2);
1337 
1338  // Special case a zero mask since that's not a ConstantDataVector:
1339  // this masked store instruction does nothing.
1340  if (isa<ConstantAggregateZero>(Mask)) {
1341  IC.eraseInstFromFunction(II);
1342  return true;
1343  }
1344 
1345  // The SSE2 version is too weird (eg, unaligned but non-temporal) to do
1346  // anything else at this level.
1347  if (II.getIntrinsicID() == Intrinsic::x86_sse2_maskmov_dqu)
1348  return false;
1349 
1350  auto *ConstMask = dyn_cast<ConstantDataVector>(Mask);
1351  if (!ConstMask)
1352  return false;
1353 
1354  // The mask is constant. Convert this x86 intrinsic to the LLVM instrinsic
1355  // to allow target-independent optimizations.
1356 
1357  // First, cast the x86 intrinsic scalar pointer to a vector pointer to match
1358  // the LLVM intrinsic definition for the pointer argument.
1359  unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
1360  PointerType *VecPtrTy = PointerType::get(Vec->getType(), AddrSpace);
1361  Value *PtrCast = IC.Builder.CreateBitCast(Ptr, VecPtrTy, "castvec");
1362 
1363  // Second, convert the x86 XMM integer vector mask to a vector of bools based
1364  // on each element's most significant bit (the sign bit).
1365  Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
1366 
1367  IC.Builder.CreateMaskedStore(Vec, PtrCast, 1, BoolMask);
1368 
1369  // 'Replace uses' doesn't work for stores. Erase the original masked store.
1370  IC.eraseInstFromFunction(II);
1371  return true;
1372 }
1373 
1374 // Constant fold llvm.amdgcn.fmed3 intrinsics for standard inputs.
1375 //
1376 // A single NaN input is folded to minnum, so we rely on that folding for
1377 // handling NaNs.
1378 static APFloat fmed3AMDGCN(const APFloat &Src0, const APFloat &Src1,
1379  const APFloat &Src2) {
1380  APFloat Max3 = maxnum(maxnum(Src0, Src1), Src2);
1381 
1382  APFloat::cmpResult Cmp0 = Max3.compare(Src0);
1383  assert(Cmp0 != APFloat::cmpUnordered && "nans handled separately");
1384  if (Cmp0 == APFloat::cmpEqual)
1385  return maxnum(Src1, Src2);
1386 
1387  APFloat::cmpResult Cmp1 = Max3.compare(Src1);
1388  assert(Cmp1 != APFloat::cmpUnordered && "nans handled separately");
1389  if (Cmp1 == APFloat::cmpEqual)
1390  return maxnum(Src0, Src2);
1391 
1392  return maxnum(Src0, Src1);
1393 }
1394 
1395 /// Convert a table lookup to shufflevector if the mask is constant.
1396 /// This could benefit tbl1 if the mask is { 7,6,5,4,3,2,1,0 }, in
1397 /// which case we could lower the shufflevector with rev64 instructions
1398 /// as it's actually a byte reverse.
1400  InstCombiner::BuilderTy &Builder) {
1401  // Bail out if the mask is not a constant.
1402  auto *C = dyn_cast<Constant>(II.getArgOperand(1));
1403  if (!C)
1404  return nullptr;
1405 
1406  auto *VecTy = cast<VectorType>(II.getType());
1407  unsigned NumElts = VecTy->getNumElements();
1408 
1409  // Only perform this transformation for <8 x i8> vector types.
1410  if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)
1411  return nullptr;
1412 
1413  uint32_t Indexes[8];
1414 
1415  for (unsigned I = 0; I < NumElts; ++I) {
1416  Constant *COp = C->getAggregateElement(I);
1417 
1418  if (!COp || !isa<ConstantInt>(COp))
1419  return nullptr;
1420 
1421  Indexes[I] = cast<ConstantInt>(COp)->getLimitedValue();
1422 
1423  // Make sure the mask indices are in range.
1424  if (Indexes[I] >= NumElts)
1425  return nullptr;
1426  }
1427 
1428  auto *ShuffleMask = ConstantDataVector::get(II.getContext(),
1429  makeArrayRef(Indexes));
1430  auto *V1 = II.getArgOperand(0);
1431  auto *V2 = Constant::getNullValue(V1->getType());
1432  return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1433 }
1434 
1435 /// Convert a vector load intrinsic into a simple llvm load instruction.
1436 /// This is beneficial when the underlying object being addressed comes
1437 /// from a constant, since we get constant-folding for free.
1439  unsigned MemAlign,
1440  InstCombiner::BuilderTy &Builder) {
1441  auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1));
1442 
1443  if (!IntrAlign)
1444  return nullptr;
1445 
1446  unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign ?
1447  MemAlign : IntrAlign->getLimitedValue();
1448 
1449  if (!isPowerOf2_32(Alignment))
1450  return nullptr;
1451 
1452  auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
1453  PointerType::get(II.getType(), 0));
1454  return Builder.CreateAlignedLoad(II.getType(), BCastInst, Alignment);
1455 }
1456 
1457 // Returns true iff the 2 intrinsics have the same operands, limiting the
1458 // comparison to the first NumOperands.
1459 static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E,
1460  unsigned NumOperands) {
1461  assert(I.getNumArgOperands() >= NumOperands && "Not enough operands");
1462  assert(E.getNumArgOperands() >= NumOperands && "Not enough operands");
1463  for (unsigned i = 0; i < NumOperands; i++)
1464  if (I.getArgOperand(i) != E.getArgOperand(i))
1465  return false;
1466  return true;
1467 }
1468 
1469 // Remove trivially empty start/end intrinsic ranges, i.e. a start
1470 // immediately followed by an end (ignoring debuginfo or other
1471 // start/end intrinsics in between). As this handles only the most trivial
1472 // cases, tracking the nesting level is not needed:
1473 //
1474 // call @llvm.foo.start(i1 0) ; &I
1475 // call @llvm.foo.start(i1 0)
1476 // call @llvm.foo.end(i1 0) ; This one will not be skipped: it will be removed
1477 // call @llvm.foo.end(i1 0)
1478 static bool removeTriviallyEmptyRange(IntrinsicInst &I, unsigned StartID,
1479  unsigned EndID, InstCombiner &IC) {
1480  assert(I.getIntrinsicID() == StartID &&
1481  "Start intrinsic does not have expected ID");
1482  BasicBlock::iterator BI(I), BE(I.getParent()->end());
1483  for (++BI; BI != BE; ++BI) {
1484  if (auto *E = dyn_cast<IntrinsicInst>(BI)) {
1485  if (isa<DbgInfoIntrinsic>(E) || E->getIntrinsicID() == StartID)
1486  continue;
1487  if (E->getIntrinsicID() == EndID &&
1488  haveSameOperands(I, *E, E->getNumArgOperands())) {
1489  IC.eraseInstFromFunction(*E);
1490  IC.eraseInstFromFunction(I);
1491  return true;
1492  }
1493  }
1494  break;
1495  }
1496 
1497  return false;
1498 }
1499 
1500 // Convert NVVM intrinsics to target-generic LLVM code where possible.
1502  // Each NVVM intrinsic we can simplify can be replaced with one of:
1503  //
1504  // * an LLVM intrinsic,
1505  // * an LLVM cast operation,
1506  // * an LLVM binary operation, or
1507  // * ad-hoc LLVM IR for the particular operation.
1508 
1509  // Some transformations are only valid when the module's
1510  // flush-denormals-to-zero (ftz) setting is true/false, whereas other
1511  // transformations are valid regardless of the module's ftz setting.
1512  enum FtzRequirementTy {
1513  FTZ_Any, // Any ftz setting is ok.
1514  FTZ_MustBeOn, // Transformation is valid only if ftz is on.
1515  FTZ_MustBeOff, // Transformation is valid only if ftz is off.
1516  };
1517  // Classes of NVVM intrinsics that can't be replaced one-to-one with a
1518  // target-generic intrinsic, cast op, or binary op but that we can nonetheless
1519  // simplify.
1520  enum SpecialCase {
1521  SPC_Reciprocal,
1522  };
1523 
1524  // SimplifyAction is a poor-man's variant (plus an additional flag) that
1525  // represents how to replace an NVVM intrinsic with target-generic LLVM IR.
1526  struct SimplifyAction {
1527  // Invariant: At most one of these Optionals has a value.
1531  Optional<SpecialCase> Special;
1532 
1533  FtzRequirementTy FtzRequirement = FTZ_Any;
1534 
1535  SimplifyAction() = default;
1536 
1537  SimplifyAction(Intrinsic::ID IID, FtzRequirementTy FtzReq)
1538  : IID(IID), FtzRequirement(FtzReq) {}
1539 
1540  // Cast operations don't have anything to do with FTZ, so we skip that
1541  // argument.
1542  SimplifyAction(Instruction::CastOps CastOp) : CastOp(CastOp) {}
1543 
1544  SimplifyAction(Instruction::BinaryOps BinaryOp, FtzRequirementTy FtzReq)
1545  : BinaryOp(BinaryOp), FtzRequirement(FtzReq) {}
1546 
1547  SimplifyAction(SpecialCase Special, FtzRequirementTy FtzReq)
1548  : Special(Special), FtzRequirement(FtzReq) {}
1549  };
1550 
1551  // Try to generate a SimplifyAction describing how to replace our
1552  // IntrinsicInstr with target-generic LLVM IR.
1553  const SimplifyAction Action = [II]() -> SimplifyAction {
1554  switch (II->getIntrinsicID()) {
1555  // NVVM intrinsics that map directly to LLVM intrinsics.
1556  case Intrinsic::nvvm_ceil_d:
1557  return {Intrinsic::ceil, FTZ_Any};
1558  case Intrinsic::nvvm_ceil_f:
1559  return {Intrinsic::ceil, FTZ_MustBeOff};
1560  case Intrinsic::nvvm_ceil_ftz_f:
1561  return {Intrinsic::ceil, FTZ_MustBeOn};
1562  case Intrinsic::nvvm_fabs_d:
1563  return {Intrinsic::fabs, FTZ_Any};
1564  case Intrinsic::nvvm_fabs_f:
1565  return {Intrinsic::fabs, FTZ_MustBeOff};
1566  case Intrinsic::nvvm_fabs_ftz_f:
1567  return {Intrinsic::fabs, FTZ_MustBeOn};
1568  case Intrinsic::nvvm_floor_d:
1569  return {Intrinsic::floor, FTZ_Any};
1570  case Intrinsic::nvvm_floor_f:
1571  return {Intrinsic::floor, FTZ_MustBeOff};
1572  case Intrinsic::nvvm_floor_ftz_f:
1573  return {Intrinsic::floor, FTZ_MustBeOn};
1574  case Intrinsic::nvvm_fma_rn_d:
1575  return {Intrinsic::fma, FTZ_Any};
1576  case Intrinsic::nvvm_fma_rn_f:
1577  return {Intrinsic::fma, FTZ_MustBeOff};
1578  case Intrinsic::nvvm_fma_rn_ftz_f:
1579  return {Intrinsic::fma, FTZ_MustBeOn};
1580  case Intrinsic::nvvm_fmax_d:
1581  return {Intrinsic::maxnum, FTZ_Any};
1582  case Intrinsic::nvvm_fmax_f:
1583  return {Intrinsic::maxnum, FTZ_MustBeOff};
1584  case Intrinsic::nvvm_fmax_ftz_f:
1585  return {Intrinsic::maxnum, FTZ_MustBeOn};
1586  case Intrinsic::nvvm_fmin_d:
1587  return {Intrinsic::minnum, FTZ_Any};
1588  case Intrinsic::nvvm_fmin_f:
1589  return {Intrinsic::minnum, FTZ_MustBeOff};
1590  case Intrinsic::nvvm_fmin_ftz_f:
1591  return {Intrinsic::minnum, FTZ_MustBeOn};
1592  case Intrinsic::nvvm_round_d:
1593  return {Intrinsic::round, FTZ_Any};
1594  case Intrinsic::nvvm_round_f:
1595  return {Intrinsic::round, FTZ_MustBeOff};
1596  case Intrinsic::nvvm_round_ftz_f:
1597  return {Intrinsic::round, FTZ_MustBeOn};
1598  case Intrinsic::nvvm_sqrt_rn_d:
1599  return {Intrinsic::sqrt, FTZ_Any};
1600  case Intrinsic::nvvm_sqrt_f:
1601  // nvvm_sqrt_f is a special case. For most intrinsics, foo_ftz_f is the
1602  // ftz version, and foo_f is the non-ftz version. But nvvm_sqrt_f adopts
1603  // the ftz-ness of the surrounding code. sqrt_rn_f and sqrt_rn_ftz_f are
1604  // the versions with explicit ftz-ness.
1605  return {Intrinsic::sqrt, FTZ_Any};
1606  case Intrinsic::nvvm_sqrt_rn_f:
1607  return {Intrinsic::sqrt, FTZ_MustBeOff};
1608  case Intrinsic::nvvm_sqrt_rn_ftz_f:
1609  return {Intrinsic::sqrt, FTZ_MustBeOn};
1610  case Intrinsic::nvvm_trunc_d:
1611  return {Intrinsic::trunc, FTZ_Any};
1612  case Intrinsic::nvvm_trunc_f:
1613  return {Intrinsic::trunc, FTZ_MustBeOff};
1614  case Intrinsic::nvvm_trunc_ftz_f:
1615  return {Intrinsic::trunc, FTZ_MustBeOn};
1616 
1617  // NVVM intrinsics that map to LLVM cast operations.
1618  //
1619  // Note that llvm's target-generic conversion operators correspond to the rz
1620  // (round to zero) versions of the nvvm conversion intrinsics, even though
1621  // most everything else here uses the rn (round to nearest even) nvvm ops.
1622  case Intrinsic::nvvm_d2i_rz:
1623  case Intrinsic::nvvm_f2i_rz:
1624  case Intrinsic::nvvm_d2ll_rz:
1625  case Intrinsic::nvvm_f2ll_rz:
1626  return {Instruction::FPToSI};
1627  case Intrinsic::nvvm_d2ui_rz:
1628  case Intrinsic::nvvm_f2ui_rz:
1629  case Intrinsic::nvvm_d2ull_rz:
1630  case Intrinsic::nvvm_f2ull_rz:
1631  return {Instruction::FPToUI};
1632  case Intrinsic::nvvm_i2d_rz:
1633  case Intrinsic::nvvm_i2f_rz:
1634  case Intrinsic::nvvm_ll2d_rz:
1635  case Intrinsic::nvvm_ll2f_rz:
1636  return {Instruction::SIToFP};
1637  case Intrinsic::nvvm_ui2d_rz:
1638  case Intrinsic::nvvm_ui2f_rz:
1639  case Intrinsic::nvvm_ull2d_rz:
1640  case Intrinsic::nvvm_ull2f_rz:
1641  return {Instruction::UIToFP};
1642 
1643  // NVVM intrinsics that map to LLVM binary ops.
1644  case Intrinsic::nvvm_add_rn_d:
1645  return {Instruction::FAdd, FTZ_Any};
1646  case Intrinsic::nvvm_add_rn_f:
1647  return {Instruction::FAdd, FTZ_MustBeOff};
1648  case Intrinsic::nvvm_add_rn_ftz_f:
1649  return {Instruction::FAdd, FTZ_MustBeOn};
1650  case Intrinsic::nvvm_mul_rn_d:
1651  return {Instruction::FMul, FTZ_Any};
1652  case Intrinsic::nvvm_mul_rn_f:
1653  return {Instruction::FMul, FTZ_MustBeOff};
1654  case Intrinsic::nvvm_mul_rn_ftz_f:
1655  return {Instruction::FMul, FTZ_MustBeOn};
1656  case Intrinsic::nvvm_div_rn_d:
1657  return {Instruction::FDiv, FTZ_Any};
1658  case Intrinsic::nvvm_div_rn_f:
1659  return {Instruction::FDiv, FTZ_MustBeOff};
1660  case Intrinsic::nvvm_div_rn_ftz_f:
1661  return {Instruction::FDiv, FTZ_MustBeOn};
1662 
1663  // The remainder of cases are NVVM intrinsics that map to LLVM idioms, but
1664  // need special handling.
1665  //
1666  // We seem to be missing intrinsics for rcp.approx.{ftz.}f32, which is just
1667  // as well.
1668  case Intrinsic::nvvm_rcp_rn_d:
1669  return {SPC_Reciprocal, FTZ_Any};
1670  case Intrinsic::nvvm_rcp_rn_f:
1671  return {SPC_Reciprocal, FTZ_MustBeOff};
1672  case Intrinsic::nvvm_rcp_rn_ftz_f:
1673  return {SPC_Reciprocal, FTZ_MustBeOn};
1674 
1675  // We do not currently simplify intrinsics that give an approximate answer.
1676  // These include:
1677  //
1678  // - nvvm_cos_approx_{f,ftz_f}
1679  // - nvvm_ex2_approx_{d,f,ftz_f}
1680  // - nvvm_lg2_approx_{d,f,ftz_f}
1681  // - nvvm_sin_approx_{f,ftz_f}
1682  // - nvvm_sqrt_approx_{f,ftz_f}
1683  // - nvvm_rsqrt_approx_{d,f,ftz_f}
1684  // - nvvm_div_approx_{ftz_d,ftz_f,f}
1685  // - nvvm_rcp_approx_ftz_d
1686  //
1687  // Ideally we'd encode them as e.g. "fast call @llvm.cos", where "fast"
1688  // means that fastmath is enabled in the intrinsic. Unfortunately only
1689  // binary operators (currently) have a fastmath bit in SelectionDAG, so this
1690  // information gets lost and we can't select on it.
1691  //
1692  // TODO: div and rcp are lowered to a binary op, so these we could in theory
1693  // lower them to "fast fdiv".
1694 
1695  default:
1696  return {};
1697  }
1698  }();
1699 
1700  // If Action.FtzRequirementTy is not satisfied by the module's ftz state, we
1701  // can bail out now. (Notice that in the case that IID is not an NVVM
1702  // intrinsic, we don't have to look up any module metadata, as
1703  // FtzRequirementTy will be FTZ_Any.)
1704  if (Action.FtzRequirement != FTZ_Any) {
1705  bool FtzEnabled =
1706  II->getFunction()->getFnAttribute("nvptx-f32ftz").getValueAsString() ==
1707  "true";
1708 
1709  if (FtzEnabled != (Action.FtzRequirement == FTZ_MustBeOn))
1710  return nullptr;
1711  }
1712 
1713  // Simplify to target-generic intrinsic.
1714  if (Action.IID) {
1716  // All the target-generic intrinsics currently of interest to us have one
1717  // type argument, equal to that of the nvvm intrinsic's argument.
1718  Type *Tys[] = {II->getArgOperand(0)->getType()};
1719  return CallInst::Create(
1720  Intrinsic::getDeclaration(II->getModule(), *Action.IID, Tys), Args);
1721  }
1722 
1723  // Simplify to target-generic binary op.
1724  if (Action.BinaryOp)
1725  return BinaryOperator::Create(*Action.BinaryOp, II->getArgOperand(0),
1726  II->getArgOperand(1), II->getName());
1727 
1728  // Simplify to target-generic cast op.
1729  if (Action.CastOp)
1730  return CastInst::Create(*Action.CastOp, II->getArgOperand(0), II->getType(),
1731  II->getName());
1732 
1733  // All that's left are the special cases.
1734  if (!Action.Special)
1735  return nullptr;
1736 
1737  switch (*Action.Special) {
1738  case SPC_Reciprocal:
1739  // Simplify reciprocal.
1740  return BinaryOperator::Create(
1741  Instruction::FDiv, ConstantFP::get(II->getArgOperand(0)->getType(), 1),
1742  II->getArgOperand(0), II->getName());
1743  }
1744  llvm_unreachable("All SpecialCase enumerators should be handled in switch.");
1745 }
1746 
1748  removeTriviallyEmptyRange(I, Intrinsic::vastart, Intrinsic::vaend, *this);
1749  return nullptr;
1750 }
1751 
1753  removeTriviallyEmptyRange(I, Intrinsic::vacopy, Intrinsic::vaend, *this);
1754  return nullptr;
1755 }
1756 
1758  assert(Call.getNumArgOperands() > 1 && "Need at least 2 args to swap");
1759  Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);
1760  if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) {
1761  Call.setArgOperand(0, Arg1);
1762  Call.setArgOperand(1, Arg0);
1763  return &Call;
1764  }
1765  return nullptr;
1766 }
1767 
1768 Instruction *InstCombiner::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) {
1769  WithOverflowInst *WO = cast<WithOverflowInst>(II);
1770  Value *OperationResult = nullptr;
1771  Constant *OverflowResult = nullptr;
1772  if (OptimizeOverflowCheck(WO->getBinaryOp(), WO->isSigned(), WO->getLHS(),
1773  WO->getRHS(), *WO, OperationResult, OverflowResult))
1774  return CreateOverflowTuple(WO, OperationResult, OverflowResult);
1775  return nullptr;
1776 }
1777 
1778 /// CallInst simplification. This mostly only handles folding of intrinsic
1779 /// instructions. For normal calls, it allows visitCallBase to do the heavy
1780 /// lifting.
1782  if (Value *V = SimplifyCall(&CI, SQ.getWithInstruction(&CI)))
1783  return replaceInstUsesWith(CI, V);
1784 
1785  if (isFreeCall(&CI, &TLI))
1786  return visitFree(CI);
1787 
1788  // If the caller function is nounwind, mark the call as nounwind, even if the
1789  // callee isn't.
1790  if (CI.getFunction()->doesNotThrow() && !CI.doesNotThrow()) {
1791  CI.setDoesNotThrow();
1792  return &CI;
1793  }
1794 
1795  IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
1796  if (!II) return visitCallBase(CI);
1797 
1798  // Intrinsics cannot occur in an invoke or a callbr, so handle them here
1799  // instead of in visitCallBase.
1800  if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
1801  bool Changed = false;
1802 
1803  // memmove/cpy/set of zero bytes is a noop.
1804  if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
1805  if (NumBytes->isNullValue())
1806  return eraseInstFromFunction(CI);
1807 
1808  if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
1809  if (CI->getZExtValue() == 1) {
1810  // Replace the instruction with just byte operations. We would
1811  // transform other cases to loads/stores, but we don't know if
1812  // alignment is sufficient.
1813  }
1814  }
1815 
1816  // No other transformations apply to volatile transfers.
1817  if (auto *M = dyn_cast<MemIntrinsic>(MI))
1818  if (M->isVolatile())
1819  return nullptr;
1820 
1821  // If we have a memmove and the source operation is a constant global,
1822  // then the source and dest pointers can't alias, so we can change this
1823  // into a call to memcpy.
1824  if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) {
1825  if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
1826  if (GVSrc->isConstant()) {
1827  Module *M = CI.getModule();
1828  Intrinsic::ID MemCpyID =
1829  isa<AtomicMemMoveInst>(MMI)
1830  ? Intrinsic::memcpy_element_unordered_atomic
1831  : Intrinsic::memcpy;
1832  Type *Tys[3] = { CI.getArgOperand(0)->getType(),
1833  CI.getArgOperand(1)->getType(),
1834  CI.getArgOperand(2)->getType() };
1835  CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
1836  Changed = true;
1837  }
1838  }
1839 
1840  if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1841  // memmove(x,x,size) -> noop.
1842  if (MTI->getSource() == MTI->getDest())
1843  return eraseInstFromFunction(CI);
1844  }
1845 
1846  // If we can determine a pointer alignment that is bigger than currently
1847  // set, update the alignment.
1848  if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1849  if (Instruction *I = SimplifyAnyMemTransfer(MTI))
1850  return I;
1851  } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MI)) {
1852  if (Instruction *I = SimplifyAnyMemSet(MSI))
1853  return I;
1854  }
1855 
1856  if (Changed) return II;
1857  }
1858 
1859  // For vector result intrinsics, use the generic demanded vector support.
1860  if (II->getType()->isVectorTy()) {
1861  auto VWidth = II->getType()->getVectorNumElements();
1862  APInt UndefElts(VWidth, 0);
1863  APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
1864  if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) {
1865  if (V != II)
1866  return replaceInstUsesWith(*II, V);
1867  return II;
1868  }
1869  }
1870 
1871  if (Instruction *I = SimplifyNVVMIntrinsic(II, *this))
1872  return I;
1873 
1874  auto SimplifyDemandedVectorEltsLow = [this](Value *Op, unsigned Width,
1875  unsigned DemandedWidth) {
1876  APInt UndefElts(Width, 0);
1877  APInt DemandedElts = APInt::getLowBitsSet(Width, DemandedWidth);
1878  return SimplifyDemandedVectorElts(Op, DemandedElts, UndefElts);
1879  };
1880 
1881  Intrinsic::ID IID = II->getIntrinsicID();
1882  switch (IID) {
1883  default: break;
1884  case Intrinsic::objectsize:
1885  if (Value *V = lowerObjectSizeCall(II, DL, &TLI, /*MustSucceed=*/false))
1886  return replaceInstUsesWith(CI, V);
1887  return nullptr;
1888  case Intrinsic::bswap: {
1889  Value *IIOperand = II->getArgOperand(0);
1890  Value *X = nullptr;
1891 
1892  // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
1893  if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
1894  unsigned C = X->getType()->getPrimitiveSizeInBits() -
1895  IIOperand->getType()->getPrimitiveSizeInBits();
1896  Value *CV = ConstantInt::get(X->getType(), C);
1897  Value *V = Builder.CreateLShr(X, CV);
1898  return new TruncInst(V, IIOperand->getType());
1899  }
1900  break;
1901  }
1902  case Intrinsic::masked_load:
1903  if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II))
1904  return replaceInstUsesWith(CI, SimplifiedMaskedOp);
1905  break;
1906  case Intrinsic::masked_store:
1907  return simplifyMaskedStore(*II);
1908  case Intrinsic::masked_gather:
1909  return simplifyMaskedGather(*II);
1910  case Intrinsic::masked_scatter:
1911  return simplifyMaskedScatter(*II);
1912  case Intrinsic::launder_invariant_group:
1913  case Intrinsic::strip_invariant_group:
1914  if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this))
1915  return replaceInstUsesWith(*II, SkippedBarrier);
1916  break;
1917  case Intrinsic::powi:
1918  if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
1919  // 0 and 1 are handled in instsimplify
1920 
1921  // powi(x, -1) -> 1/x
1922  if (Power->isMinusOne())
1923  return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
1924  II->getArgOperand(0));
1925  // powi(x, 2) -> x*x
1926  if (Power->equalsInt(2))
1927  return BinaryOperator::CreateFMul(II->getArgOperand(0),
1928  II->getArgOperand(0));
1929  }
1930  break;
1931 
1932  case Intrinsic::cttz:
1933  case Intrinsic::ctlz:
1934  if (auto *I = foldCttzCtlz(*II, *this))
1935  return I;
1936  break;
1937 
1938  case Intrinsic::ctpop:
1939  if (auto *I = foldCtpop(*II, *this))
1940  return I;
1941  break;
1942 
1943  case Intrinsic::fshl:
1944  case Intrinsic::fshr: {
1945  Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1);
1946  Type *Ty = II->getType();
1947  unsigned BitWidth = Ty->getScalarSizeInBits();
1948  Constant *ShAmtC;
1949  if (match(II->getArgOperand(2), m_Constant(ShAmtC)) &&
1950  !isa<ConstantExpr>(ShAmtC) && !ShAmtC->containsConstantExpression()) {
1951  // Canonicalize a shift amount constant operand to modulo the bit-width.
1952  Constant *WidthC = ConstantInt::get(Ty, BitWidth);
1953  Constant *ModuloC = ConstantExpr::getURem(ShAmtC, WidthC);
1954  if (ModuloC != ShAmtC) {
1955  II->setArgOperand(2, ModuloC);
1956  return II;
1957  }
1958  assert(ConstantExpr::getICmp(ICmpInst::ICMP_UGT, WidthC, ShAmtC) ==
1960  "Shift amount expected to be modulo bitwidth");
1961 
1962  // Canonicalize funnel shift right by constant to funnel shift left. This
1963  // is not entirely arbitrary. For historical reasons, the backend may
1964  // recognize rotate left patterns but miss rotate right patterns.
1965  if (IID == Intrinsic::fshr) {
1966  // fshr X, Y, C --> fshl X, Y, (BitWidth - C)
1967  Constant *LeftShiftC = ConstantExpr::getSub(WidthC, ShAmtC);
1968  Module *Mod = II->getModule();
1969  Function *Fshl = Intrinsic::getDeclaration(Mod, Intrinsic::fshl, Ty);
1970  return CallInst::Create(Fshl, { Op0, Op1, LeftShiftC });
1971  }
1972  assert(IID == Intrinsic::fshl &&
1973  "All funnel shifts by simple constants should go left");
1974 
1975  // fshl(X, 0, C) --> shl X, C
1976  // fshl(X, undef, C) --> shl X, C
1977  if (match(Op1, m_ZeroInt()) || match(Op1, m_Undef()))
1978  return BinaryOperator::CreateShl(Op0, ShAmtC);
1979 
1980  // fshl(0, X, C) --> lshr X, (BW-C)
1981  // fshl(undef, X, C) --> lshr X, (BW-C)
1982  if (match(Op0, m_ZeroInt()) || match(Op0, m_Undef()))
1983  return BinaryOperator::CreateLShr(Op1,
1984  ConstantExpr::getSub(WidthC, ShAmtC));
1985 
1986  // fshl i16 X, X, 8 --> bswap i16 X (reduce to more-specific form)
1987  if (Op0 == Op1 && BitWidth == 16 && match(ShAmtC, m_SpecificInt(8))) {
1988  Module *Mod = II->getModule();
1989  Function *Bswap = Intrinsic::getDeclaration(Mod, Intrinsic::bswap, Ty);
1990  return CallInst::Create(Bswap, { Op0 });
1991  }
1992  }
1993 
1994  // Left or right might be masked.
1995  if (SimplifyDemandedInstructionBits(*II))
1996  return &CI;
1997 
1998  // The shift amount (operand 2) of a funnel shift is modulo the bitwidth,
1999  // so only the low bits of the shift amount are demanded if the bitwidth is
2000  // a power-of-2.
2001  if (!isPowerOf2_32(BitWidth))
2002  break;
2003  APInt Op2Demanded = APInt::getLowBitsSet(BitWidth, Log2_32_Ceil(BitWidth));
2004  KnownBits Op2Known(BitWidth);
2005  if (SimplifyDemandedBits(II, 2, Op2Demanded, Op2Known))
2006  return &CI;
2007  break;
2008  }
2009  case Intrinsic::uadd_with_overflow:
2010  case Intrinsic::sadd_with_overflow: {
2012  return I;
2013  if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
2014  return I;
2015 
2016  // Given 2 constant operands whose sum does not overflow:
2017  // uaddo (X +nuw C0), C1 -> uaddo X, C0 + C1
2018  // saddo (X +nsw C0), C1 -> saddo X, C0 + C1
2019  Value *X;
2020  const APInt *C0, *C1;
2021  Value *Arg0 = II->getArgOperand(0);
2022  Value *Arg1 = II->getArgOperand(1);
2023  bool IsSigned = IID == Intrinsic::sadd_with_overflow;
2024  bool HasNWAdd = IsSigned ? match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0)))
2025  : match(Arg0, m_NUWAdd(m_Value(X), m_APInt(C0)));
2026  if (HasNWAdd && match(Arg1, m_APInt(C1))) {
2027  bool Overflow;
2028  APInt NewC =
2029  IsSigned ? C1->sadd_ov(*C0, Overflow) : C1->uadd_ov(*C0, Overflow);
2030  if (!Overflow)
2031  return replaceInstUsesWith(
2032  *II, Builder.CreateBinaryIntrinsic(
2033  IID, X, ConstantInt::get(Arg1->getType(), NewC)));
2034  }
2035  break;
2036  }
2037 
2038  case Intrinsic::umul_with_overflow:
2039  case Intrinsic::smul_with_overflow:
2041  return I;
2043 
2044  case Intrinsic::usub_with_overflow:
2045  if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
2046  return I;
2047  break;
2048 
2049  case Intrinsic::ssub_with_overflow: {
2050  if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
2051  return I;
2052 
2053  Constant *C;
2054  Value *Arg0 = II->getArgOperand(0);
2055  Value *Arg1 = II->getArgOperand(1);
2056  // Given a constant C that is not the minimum signed value
2057  // for an integer of a given bit width:
2058  //
2059  // ssubo X, C -> saddo X, -C
2060  if (match(Arg1, m_Constant(C)) && C->isNotMinSignedValue()) {
2061  Value *NegVal = ConstantExpr::getNeg(C);
2062  // Build a saddo call that is equivalent to the discovered
2063  // ssubo call.
2064  return replaceInstUsesWith(
2065  *II, Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow,
2066  Arg0, NegVal));
2067  }
2068 
2069  break;
2070  }
2071 
2072  case Intrinsic::uadd_sat:
2073  case Intrinsic::sadd_sat:
2075  return I;
2077  case Intrinsic::usub_sat:
2078  case Intrinsic::ssub_sat: {
2079  SaturatingInst *SI = cast<SaturatingInst>(II);
2080  Type *Ty = SI->getType();
2081  Value *Arg0 = SI->getLHS();
2082  Value *Arg1 = SI->getRHS();
2083 
2084  // Make use of known overflow information.
2085  OverflowResult OR = computeOverflow(SI->getBinaryOp(), SI->isSigned(),
2086  Arg0, Arg1, SI);
2087  switch (OR) {
2089  break;
2091  if (SI->isSigned())
2092  return BinaryOperator::CreateNSW(SI->getBinaryOp(), Arg0, Arg1);
2093  else
2094  return BinaryOperator::CreateNUW(SI->getBinaryOp(), Arg0, Arg1);
2096  unsigned BitWidth = Ty->getScalarSizeInBits();
2097  APInt Min = APSInt::getMinValue(BitWidth, !SI->isSigned());
2098  return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Min));
2099  }
2101  unsigned BitWidth = Ty->getScalarSizeInBits();
2102  APInt Max = APSInt::getMaxValue(BitWidth, !SI->isSigned());
2103  return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Max));
2104  }
2105  }
2106 
2107  // ssub.sat(X, C) -> sadd.sat(X, -C) if C != MIN
2108  Constant *C;
2109  if (IID == Intrinsic::ssub_sat && match(Arg1, m_Constant(C)) &&
2110  C->isNotMinSignedValue()) {
2111  Value *NegVal = ConstantExpr::getNeg(C);
2112  return replaceInstUsesWith(
2113  *II, Builder.CreateBinaryIntrinsic(
2114  Intrinsic::sadd_sat, Arg0, NegVal));
2115  }
2116 
2117  // sat(sat(X + Val2) + Val) -> sat(X + (Val+Val2))
2118  // sat(sat(X - Val2) - Val) -> sat(X - (Val+Val2))
2119  // if Val and Val2 have the same sign
2120  if (auto *Other = dyn_cast<IntrinsicInst>(Arg0)) {
2121  Value *X;
2122  const APInt *Val, *Val2;
2123  APInt NewVal;
2124  bool IsUnsigned =
2125  IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
2126  if (Other->getIntrinsicID() == IID &&
2127  match(Arg1, m_APInt(Val)) &&
2128  match(Other->getArgOperand(0), m_Value(X)) &&
2129  match(Other->getArgOperand(1), m_APInt(Val2))) {
2130  if (IsUnsigned)
2131  NewVal = Val->uadd_sat(*Val2);
2132  else if (Val->isNonNegative() == Val2->isNonNegative()) {
2133  bool Overflow;
2134  NewVal = Val->sadd_ov(*Val2, Overflow);
2135  if (Overflow) {
2136  // Both adds together may add more than SignedMaxValue
2137  // without saturating the final result.
2138  break;
2139  }
2140  } else {
2141  // Cannot fold saturated addition with different signs.
2142  break;
2143  }
2144 
2145  return replaceInstUsesWith(
2146  *II, Builder.CreateBinaryIntrinsic(
2147  IID, X, ConstantInt::get(II->getType(), NewVal)));
2148  }
2149  }
2150  break;
2151  }
2152 
2153  case Intrinsic::minnum:
2154  case Intrinsic::maxnum:
2155  case Intrinsic::minimum:
2156  case Intrinsic::maximum: {
2158  return I;
2159  Value *Arg0 = II->getArgOperand(0);
2160  Value *Arg1 = II->getArgOperand(1);
2161  Value *X, *Y;
2162  if (match(Arg0, m_FNeg(m_Value(X))) && match(Arg1, m_FNeg(m_Value(Y))) &&
2163  (Arg0->hasOneUse() || Arg1->hasOneUse())) {
2164  // If both operands are negated, invert the call and negate the result:
2165  // min(-X, -Y) --> -(max(X, Y))
2166  // max(-X, -Y) --> -(min(X, Y))
2167  Intrinsic::ID NewIID;
2168  switch (IID) {
2169  case Intrinsic::maxnum:
2170  NewIID = Intrinsic::minnum;
2171  break;
2172  case Intrinsic::minnum:
2173  NewIID = Intrinsic::maxnum;
2174  break;
2175  case Intrinsic::maximum:
2176  NewIID = Intrinsic::minimum;
2177  break;
2178  case Intrinsic::minimum:
2179  NewIID = Intrinsic::maximum;
2180  break;
2181  default:
2182  llvm_unreachable("unexpected intrinsic ID");
2183  }
2184  Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II);
2185  Instruction *FNeg = BinaryOperator::CreateFNeg(NewCall);
2186  FNeg->copyIRFlags(II);
2187  return FNeg;
2188  }
2189 
2190  // m(m(X, C2), C1) -> m(X, C)
2191  const APFloat *C1, *C2;
2192  if (auto *M = dyn_cast<IntrinsicInst>(Arg0)) {
2193  if (M->getIntrinsicID() == IID && match(Arg1, m_APFloat(C1)) &&
2194  ((match(M->getArgOperand(0), m_Value(X)) &&
2195  match(M->getArgOperand(1), m_APFloat(C2))) ||
2196  (match(M->getArgOperand(1), m_Value(X)) &&
2197  match(M->getArgOperand(0), m_APFloat(C2))))) {
2198  APFloat Res(0.0);
2199  switch (IID) {
2200  case Intrinsic::maxnum:
2201  Res = maxnum(*C1, *C2);
2202  break;
2203  case Intrinsic::minnum:
2204  Res = minnum(*C1, *C2);
2205  break;
2206  case Intrinsic::maximum:
2207  Res = maximum(*C1, *C2);
2208  break;
2209  case Intrinsic::minimum:
2210  Res = minimum(*C1, *C2);
2211  break;
2212  default:
2213  llvm_unreachable("unexpected intrinsic ID");
2214  }
2215  Instruction *NewCall = Builder.CreateBinaryIntrinsic(
2216  IID, X, ConstantFP::get(Arg0->getType(), Res));
2217  NewCall->copyIRFlags(II);
2218  return replaceInstUsesWith(*II, NewCall);
2219  }
2220  }
2221 
2222  break;
2223  }
2224  case Intrinsic::fmuladd: {
2225  // Canonicalize fast fmuladd to the separate fmul + fadd.
2226  if (II->isFast()) {
2227  BuilderTy::FastMathFlagGuard Guard(Builder);
2228  Builder.setFastMathFlags(II->getFastMathFlags());
2229  Value *Mul = Builder.CreateFMul(II->getArgOperand(0),
2230  II->getArgOperand(1));
2231  Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2));
2232  Add->takeName(II);
2233  return replaceInstUsesWith(*II, Add);
2234  }
2235 
2236  // Try to simplify the underlying FMul.
2237  if (Value *V = SimplifyFMulInst(II->getArgOperand(0), II->getArgOperand(1),
2238  II->getFastMathFlags(),
2239  SQ.getWithInstruction(II))) {
2240  auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2));
2241  FAdd->copyFastMathFlags(II);
2242  return FAdd;
2243  }
2244 
2246  }
2247  case Intrinsic::fma: {
2249  return I;
2250 
2251  // fma fneg(x), fneg(y), z -> fma x, y, z
2252  Value *Src0 = II->getArgOperand(0);
2253  Value *Src1 = II->getArgOperand(1);
2254  Value *X, *Y;
2255  if (match(Src0, m_FNeg(m_Value(X))) && match(Src1, m_FNeg(m_Value(Y)))) {
2256  II->setArgOperand(0, X);
2257  II->setArgOperand(1, Y);
2258  return II;
2259  }
2260 
2261  // fma fabs(x), fabs(x), z -> fma x, x, z
2262  if (match(Src0, m_FAbs(m_Value(X))) &&
2263  match(Src1, m_FAbs(m_Specific(X)))) {
2264  II->setArgOperand(0, X);
2265  II->setArgOperand(1, X);
2266  return II;
2267  }
2268 
2269  // Try to simplify the underlying FMul. We can only apply simplifications
2270  // that do not require rounding.
2271  if (Value *V = SimplifyFMAFMul(II->getArgOperand(0), II->getArgOperand(1),
2272  II->getFastMathFlags(),
2273  SQ.getWithInstruction(II))) {
2274  auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2));
2275  FAdd->copyFastMathFlags(II);
2276  return FAdd;
2277  }
2278 
2279  break;
2280  }
2281  case Intrinsic::fabs: {
2282  Value *Cond;
2283  Constant *LHS, *RHS;
2284  if (match(II->getArgOperand(0),
2285  m_Select(m_Value(Cond), m_Constant(LHS), m_Constant(RHS)))) {
2286  CallInst *Call0 = Builder.CreateCall(II->getCalledFunction(), {LHS});
2287  CallInst *Call1 = Builder.CreateCall(II->getCalledFunction(), {RHS});
2288  return SelectInst::Create(Cond, Call0, Call1);
2289  }
2290 
2292  }
2293  case Intrinsic::ceil:
2294  case Intrinsic::floor:
2295  case Intrinsic::round:
2296  case Intrinsic::nearbyint:
2297  case Intrinsic::rint:
2298  case Intrinsic::trunc: {
2299  Value *ExtSrc;
2300  if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc))))) {
2301  // Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x)
2302  Value *NarrowII = Builder.CreateUnaryIntrinsic(IID, ExtSrc, II);
2303  return new FPExtInst(NarrowII, II->getType());
2304  }
2305  break;
2306  }
2307  case Intrinsic::cos:
2308  case Intrinsic::amdgcn_cos: {
2309  Value *X;
2310  Value *Src = II->getArgOperand(0);
2311  if (match(Src, m_FNeg(m_Value(X))) || match(Src, m_FAbs(m_Value(X)))) {
2312  // cos(-x) -> cos(x)
2313  // cos(fabs(x)) -> cos(x)
2314  II->setArgOperand(0, X);
2315  return II;
2316  }
2317  break;
2318  }
2319  case Intrinsic::sin: {
2320  Value *X;
2321  if (match(II->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X))))) {
2322  // sin(-x) --> -sin(x)
2323  Value *NewSin = Builder.CreateUnaryIntrinsic(Intrinsic::sin, X, II);
2324  Instruction *FNeg = BinaryOperator::CreateFNeg(NewSin);
2325  FNeg->copyFastMathFlags(II);
2326  return FNeg;
2327  }
2328  break;
2329  }
2330  case Intrinsic::ppc_altivec_lvx:
2331  case Intrinsic::ppc_altivec_lvxl:
2332  // Turn PPC lvx -> load if the pointer is known aligned.
2333  if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
2334  &DT) >= 16) {
2335  Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2336  PointerType::getUnqual(II->getType()));
2337  return new LoadInst(II->getType(), Ptr);
2338  }
2339  break;
2340  case Intrinsic::ppc_vsx_lxvw4x:
2341  case Intrinsic::ppc_vsx_lxvd2x: {
2342  // Turn PPC VSX loads into normal loads.
2343  Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2344  PointerType::getUnqual(II->getType()));
2345  return new LoadInst(II->getType(), Ptr, Twine(""), false, 1);
2346  }
2347  case Intrinsic::ppc_altivec_stvx:
2348  case Intrinsic::ppc_altivec_stvxl:
2349  // Turn stvx -> store if the pointer is known aligned.
2350  if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
2351  &DT) >= 16) {
2352  Type *OpPtrTy =
2353  PointerType::getUnqual(II->getArgOperand(0)->getType());
2354  Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2355  return new StoreInst(II->getArgOperand(0), Ptr);
2356  }
2357  break;
2358  case Intrinsic::ppc_vsx_stxvw4x:
2359  case Intrinsic::ppc_vsx_stxvd2x: {
2360  // Turn PPC VSX stores into normal stores.
2361  Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType());
2362  Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2363  return new StoreInst(II->getArgOperand(0), Ptr, false, 1);
2364  }
2365  case Intrinsic::ppc_qpx_qvlfs:
2366  // Turn PPC QPX qvlfs -> load if the pointer is known aligned.
2367  if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
2368  &DT) >= 16) {
2369  Type *VTy = VectorType::get(Builder.getFloatTy(),
2370  II->getType()->getVectorNumElements());
2371  Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2372  PointerType::getUnqual(VTy));
2373  Value *Load = Builder.CreateLoad(VTy, Ptr);
2374  return new FPExtInst(Load, II->getType());
2375  }
2376  break;
2377  case Intrinsic::ppc_qpx_qvlfd:
2378  // Turn PPC QPX qvlfd -> load if the pointer is known aligned.
2379  if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, &AC,
2380  &DT) >= 32) {
2381  Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2382  PointerType::getUnqual(II->getType()));
2383  return new LoadInst(II->getType(), Ptr);
2384  }
2385  break;
2386  case Intrinsic::ppc_qpx_qvstfs:
2387  // Turn PPC QPX qvstfs -> store if the pointer is known aligned.
2388  if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
2389  &DT) >= 16) {
2390  Type *VTy = VectorType::get(Builder.getFloatTy(),
2391  II->getArgOperand(0)->getType()->getVectorNumElements());
2392  Value *TOp = Builder.CreateFPTrunc(II->getArgOperand(0), VTy);
2393  Type *OpPtrTy = PointerType::getUnqual(VTy);
2394  Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2395  return new StoreInst(TOp, Ptr);
2396  }
2397  break;
2398  case Intrinsic::ppc_qpx_qvstfd:
2399  // Turn PPC QPX qvstfd -> store if the pointer is known aligned.
2400  if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, &AC,
2401  &DT) >= 32) {
2402  Type *OpPtrTy =
2403  PointerType::getUnqual(II->getArgOperand(0)->getType());
2404  Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
2405  return new StoreInst(II->getArgOperand(0), Ptr);
2406  }
2407  break;
2408 
2409  case Intrinsic::x86_bmi_bextr_32:
2410  case Intrinsic::x86_bmi_bextr_64:
2411  case Intrinsic::x86_tbm_bextri_u32:
2412  case Intrinsic::x86_tbm_bextri_u64:
2413  // If the RHS is a constant we can try some simplifications.
2414  if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2415  uint64_t Shift = C->getZExtValue();
2416  uint64_t Length = (Shift >> 8) & 0xff;
2417  Shift &= 0xff;
2418  unsigned BitWidth = II->getType()->getIntegerBitWidth();
2419  // If the length is 0 or the shift is out of range, replace with zero.
2420  if (Length == 0 || Shift >= BitWidth)
2421  return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), 0));
2422  // If the LHS is also a constant, we can completely constant fold this.
2423  if (auto *InC = dyn_cast<ConstantInt>(II->getArgOperand(0))) {
2424  uint64_t Result = InC->getZExtValue() >> Shift;
2425  if (Length > BitWidth)
2426  Length = BitWidth;
2427  Result &= maskTrailingOnes<uint64_t>(Length);
2428  return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Result));
2429  }
2430  // TODO should we turn this into 'and' if shift is 0? Or 'shl' if we
2431  // are only masking bits that a shift already cleared?
2432  }
2433  break;
2434 
2435  case Intrinsic::x86_bmi_bzhi_32:
2436  case Intrinsic::x86_bmi_bzhi_64:
2437  // If the RHS is a constant we can try some simplifications.
2438  if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2439  uint64_t Index = C->getZExtValue() & 0xff;
2440  unsigned BitWidth = II->getType()->getIntegerBitWidth();
2441  if (Index >= BitWidth)
2442  return replaceInstUsesWith(CI, II->getArgOperand(0));
2443  if (Index == 0)
2444  return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), 0));
2445  // If the LHS is also a constant, we can completely constant fold this.
2446  if (auto *InC = dyn_cast<ConstantInt>(II->getArgOperand(0))) {
2447  uint64_t Result = InC->getZExtValue();
2448  Result &= maskTrailingOnes<uint64_t>(Index);
2449  return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Result));
2450  }
2451  // TODO should we convert this to an AND if the RHS is constant?
2452  }
2453  break;
2454 
2455  case Intrinsic::x86_vcvtph2ps_128:
2456  case Intrinsic::x86_vcvtph2ps_256: {
2457  auto Arg = II->getArgOperand(0);
2458  auto ArgType = cast<VectorType>(Arg->getType());
2459  auto RetType = cast<VectorType>(II->getType());
2460  unsigned ArgWidth = ArgType->getNumElements();
2461  unsigned RetWidth = RetType->getNumElements();
2462  assert(RetWidth <= ArgWidth && "Unexpected input/return vector widths");
2463  assert(ArgType->isIntOrIntVectorTy() &&
2464  ArgType->getScalarSizeInBits() == 16 &&
2465  "CVTPH2PS input type should be 16-bit integer vector");
2466  assert(RetType->getScalarType()->isFloatTy() &&
2467  "CVTPH2PS output type should be 32-bit float vector");
2468 
2469  // Constant folding: Convert to generic half to single conversion.
2470  if (isa<ConstantAggregateZero>(Arg))
2471  return replaceInstUsesWith(*II, ConstantAggregateZero::get(RetType));
2472 
2473  if (isa<ConstantDataVector>(Arg)) {
2474  auto VectorHalfAsShorts = Arg;
2475  if (RetWidth < ArgWidth) {
2476  SmallVector<uint32_t, 8> SubVecMask;
2477  for (unsigned i = 0; i != RetWidth; ++i)
2478  SubVecMask.push_back((int)i);
2479  VectorHalfAsShorts = Builder.CreateShuffleVector(
2480  Arg, UndefValue::get(ArgType), SubVecMask);
2481  }
2482 
2483  auto VectorHalfType =
2484  VectorType::get(Type::getHalfTy(II->getContext()), RetWidth);
2485  auto VectorHalfs =
2486  Builder.CreateBitCast(VectorHalfAsShorts, VectorHalfType);
2487  auto VectorFloats = Builder.CreateFPExt(VectorHalfs, RetType);
2488  return replaceInstUsesWith(*II, VectorFloats);
2489  }
2490 
2491  // We only use the lowest lanes of the argument.
2492  if (Value *V = SimplifyDemandedVectorEltsLow(Arg, ArgWidth, RetWidth)) {
2493  II->setArgOperand(0, V);
2494  return II;
2495  }
2496  break;
2497  }
2498 
2499  case Intrinsic::x86_sse_cvtss2si:
2500  case Intrinsic::x86_sse_cvtss2si64:
2501  case Intrinsic::x86_sse_cvttss2si:
2502  case Intrinsic::x86_sse_cvttss2si64:
2503  case Intrinsic::x86_sse2_cvtsd2si:
2504  case Intrinsic::x86_sse2_cvtsd2si64:
2505  case Intrinsic::x86_sse2_cvttsd2si:
2506  case Intrinsic::x86_sse2_cvttsd2si64:
2507  case Intrinsic::x86_avx512_vcvtss2si32:
2508  case Intrinsic::x86_avx512_vcvtss2si64:
2509  case Intrinsic::x86_avx512_vcvtss2usi32:
2510  case Intrinsic::x86_avx512_vcvtss2usi64:
2511  case Intrinsic::x86_avx512_vcvtsd2si32:
2512  case Intrinsic::x86_avx512_vcvtsd2si64:
2513  case Intrinsic::x86_avx512_vcvtsd2usi32:
2514  case Intrinsic::x86_avx512_vcvtsd2usi64:
2515  case Intrinsic::x86_avx512_cvttss2si:
2516  case Intrinsic::x86_avx512_cvttss2si64:
2517  case Intrinsic::x86_avx512_cvttss2usi:
2518  case Intrinsic::x86_avx512_cvttss2usi64:
2519  case Intrinsic::x86_avx512_cvttsd2si:
2520  case Intrinsic::x86_avx512_cvttsd2si64:
2521  case Intrinsic::x86_avx512_cvttsd2usi:
2522  case Intrinsic::x86_avx512_cvttsd2usi64: {
2523  // These intrinsics only demand the 0th element of their input vectors. If
2524  // we can simplify the input based on that, do so now.
2525  Value *Arg = II->getArgOperand(0);
2526  unsigned VWidth = Arg->getType()->getVectorNumElements();
2527  if (Value *V = SimplifyDemandedVectorEltsLow(Arg, VWidth, 1)) {
2528  II->setArgOperand(0, V);
2529  return II;
2530  }
2531  break;
2532  }
2533 
2534  case Intrinsic::x86_mmx_pmovmskb:
2535  case Intrinsic::x86_sse_movmsk_ps:
2536  case Intrinsic::x86_sse2_movmsk_pd:
2537  case Intrinsic::x86_sse2_pmovmskb_128:
2538  case Intrinsic::x86_avx_movmsk_pd_256:
2539  case Intrinsic::x86_avx_movmsk_ps_256:
2540  case Intrinsic::x86_avx2_pmovmskb:
2541  if (Value *V = simplifyX86movmsk(*II, Builder))
2542  return replaceInstUsesWith(*II, V);
2543  break;
2544 
2545  case Intrinsic::x86_sse_comieq_ss:
2546  case Intrinsic::x86_sse_comige_ss:
2547  case Intrinsic::x86_sse_comigt_ss:
2548  case Intrinsic::x86_sse_comile_ss:
2549  case Intrinsic::x86_sse_comilt_ss:
2550  case Intrinsic::x86_sse_comineq_ss:
2551  case Intrinsic::x86_sse_ucomieq_ss:
2552  case Intrinsic::x86_sse_ucomige_ss:
2553  case Intrinsic::x86_sse_ucomigt_ss:
2554  case Intrinsic::x86_sse_ucomile_ss:
2555  case Intrinsic::x86_sse_ucomilt_ss:
2556  case Intrinsic::x86_sse_ucomineq_ss:
2557  case Intrinsic::x86_sse2_comieq_sd:
2558  case Intrinsic::x86_sse2_comige_sd:
2559  case Intrinsic::x86_sse2_comigt_sd:
2560  case Intrinsic::x86_sse2_comile_sd:
2561  case Intrinsic::x86_sse2_comilt_sd:
2562  case Intrinsic::x86_sse2_comineq_sd:
2563  case Intrinsic::x86_sse2_ucomieq_sd:
2564  case Intrinsic::x86_sse2_ucomige_sd:
2565  case Intrinsic::x86_sse2_ucomigt_sd:
2566  case Intrinsic::x86_sse2_ucomile_sd:
2567  case Intrinsic::x86_sse2_ucomilt_sd:
2568  case Intrinsic::x86_sse2_ucomineq_sd:
2569  case Intrinsic::x86_avx512_vcomi_ss:
2570  case Intrinsic::x86_avx512_vcomi_sd:
2571  case Intrinsic::x86_avx512_mask_cmp_ss:
2572  case Intrinsic::x86_avx512_mask_cmp_sd: {
2573  // These intrinsics only demand the 0th element of their input vectors. If
2574  // we can simplify the input based on that, do so now.
2575  bool MadeChange = false;
2576  Value *Arg0 = II->getArgOperand(0);
2577  Value *Arg1 = II->getArgOperand(1);
2578  unsigned VWidth = Arg0->getType()->getVectorNumElements();
2579  if (Value *V = SimplifyDemandedVectorEltsLow(Arg0, VWidth, 1)) {
2580  II->setArgOperand(0, V);
2581  MadeChange = true;
2582  }
2583  if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, 1)) {
2584  II->setArgOperand(1, V);
2585  MadeChange = true;
2586  }
2587  if (MadeChange)
2588  return II;
2589  break;
2590  }
2591  case Intrinsic::x86_avx512_cmp_pd_128:
2592  case Intrinsic::x86_avx512_cmp_pd_256:
2593  case Intrinsic::x86_avx512_cmp_pd_512:
2594  case Intrinsic::x86_avx512_cmp_ps_128:
2595  case Intrinsic::x86_avx512_cmp_ps_256:
2596  case Intrinsic::x86_avx512_cmp_ps_512: {
2597  // Folding cmp(sub(a,b),0) -> cmp(a,b) and cmp(0,sub(a,b)) -> cmp(b,a)
2598  Value *Arg0 = II->getArgOperand(0);
2599  Value *Arg1 = II->getArgOperand(1);
2600  bool Arg0IsZero = match(Arg0, m_PosZeroFP());
2601  if (Arg0IsZero)
2602  std::swap(Arg0, Arg1);
2603  Value *A, *B;
2604  // This fold requires only the NINF(not +/- inf) since inf minus
2605  // inf is nan.
2606  // NSZ(No Signed Zeros) is not needed because zeros of any sign are
2607  // equal for both compares.
2608  // NNAN is not needed because nans compare the same for both compares.
2609  // The compare intrinsic uses the above assumptions and therefore
2610  // doesn't require additional flags.
2611  if ((match(Arg0, m_OneUse(m_FSub(m_Value(A), m_Value(B)))) &&
2612  match(Arg1, m_PosZeroFP()) && isa<Instruction>(Arg0) &&
2613  cast<Instruction>(Arg0)->getFastMathFlags().noInfs())) {
2614  if (Arg0IsZero)
2615  std::swap(A, B);
2616  II->setArgOperand(0, A);
2617  II->setArgOperand(1, B);
2618  return II;
2619  }
2620  break;
2621  }
2622 
2623  case Intrinsic::x86_avx512_add_ps_512:
2624  case Intrinsic::x86_avx512_div_ps_512:
2625  case Intrinsic::x86_avx512_mul_ps_512:
2626  case Intrinsic::x86_avx512_sub_ps_512:
2627  case Intrinsic::x86_avx512_add_pd_512:
2628  case Intrinsic::x86_avx512_div_pd_512:
2629  case Intrinsic::x86_avx512_mul_pd_512:
2630  case Intrinsic::x86_avx512_sub_pd_512:
2631  // If the rounding mode is CUR_DIRECTION(4) we can turn these into regular
2632  // IR operations.
2633  if (auto *R = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
2634  if (R->getValue() == 4) {
2635  Value *Arg0 = II->getArgOperand(0);
2636  Value *Arg1 = II->getArgOperand(1);
2637 
2638  Value *V;
2639  switch (IID) {
2640  default: llvm_unreachable("Case stmts out of sync!");
2641  case Intrinsic::x86_avx512_add_ps_512:
2642  case Intrinsic::x86_avx512_add_pd_512:
2643  V = Builder.CreateFAdd(Arg0, Arg1);
2644  break;
2645  case Intrinsic::x86_avx512_sub_ps_512:
2646  case Intrinsic::x86_avx512_sub_pd_512:
2647  V = Builder.CreateFSub(Arg0, Arg1);
2648  break;
2649  case Intrinsic::x86_avx512_mul_ps_512:
2650  case Intrinsic::x86_avx512_mul_pd_512:
2651  V = Builder.CreateFMul(Arg0, Arg1);
2652  break;
2653  case Intrinsic::x86_avx512_div_ps_512:
2654  case Intrinsic::x86_avx512_div_pd_512:
2655  V = Builder.CreateFDiv(Arg0, Arg1);
2656  break;
2657  }
2658 
2659  return replaceInstUsesWith(*II, V);
2660  }
2661  }
2662  break;
2663 
2664  case Intrinsic::x86_avx512_mask_add_ss_round:
2665  case Intrinsic::x86_avx512_mask_div_ss_round:
2666  case Intrinsic::x86_avx512_mask_mul_ss_round:
2667  case Intrinsic::x86_avx512_mask_sub_ss_round:
2668  case Intrinsic::x86_avx512_mask_add_sd_round:
2669  case Intrinsic::x86_avx512_mask_div_sd_round:
2670  case Intrinsic::x86_avx512_mask_mul_sd_round:
2671  case Intrinsic::x86_avx512_mask_sub_sd_round:
2672  // If the rounding mode is CUR_DIRECTION(4) we can turn these into regular
2673  // IR operations.
2674  if (auto *R = dyn_cast<ConstantInt>(II->getArgOperand(4))) {
2675  if (R->getValue() == 4) {
2676  // Extract the element as scalars.
2677  Value *Arg0 = II->getArgOperand(0);
2678  Value *Arg1 = II->getArgOperand(1);
2679  Value *LHS = Builder.CreateExtractElement(Arg0, (uint64_t)0);
2680  Value *RHS = Builder.CreateExtractElement(Arg1, (uint64_t)0);
2681 
2682  Value *V;
2683  switch (IID) {
2684  default: llvm_unreachable("Case stmts out of sync!");
2685  case Intrinsic::x86_avx512_mask_add_ss_round:
2686  case Intrinsic::x86_avx512_mask_add_sd_round:
2687  V = Builder.CreateFAdd(LHS, RHS);
2688  break;
2689  case Intrinsic::x86_avx512_mask_sub_ss_round:
2690  case Intrinsic::x86_avx512_mask_sub_sd_round:
2691  V = Builder.CreateFSub(LHS, RHS);
2692  break;
2693  case Intrinsic::x86_avx512_mask_mul_ss_round:
2694  case Intrinsic::x86_avx512_mask_mul_sd_round:
2695  V = Builder.CreateFMul(LHS, RHS);
2696  break;
2697  case Intrinsic::x86_avx512_mask_div_ss_round:
2698  case Intrinsic::x86_avx512_mask_div_sd_round:
2699  V = Builder.CreateFDiv(LHS, RHS);
2700  break;
2701  }
2702 
2703  // Handle the masking aspect of the intrinsic.
2704  Value *Mask = II->getArgOperand(3);
2705  auto *C = dyn_cast<ConstantInt>(Mask);
2706  // We don't need a select if we know the mask bit is a 1.
2707  if (!C || !C->getValue()[0]) {
2708  // Cast the mask to an i1 vector and then extract the lowest element.
2709  auto *MaskTy = VectorType::get(Builder.getInt1Ty(),
2710  cast<IntegerType>(Mask->getType())->getBitWidth());
2711  Mask = Builder.CreateBitCast(Mask, MaskTy);
2712  Mask = Builder.CreateExtractElement(Mask, (uint64_t)0);
2713  // Extract the lowest element from the passthru operand.
2714  Value *Passthru = Builder.CreateExtractElement(II->getArgOperand(2),
2715  (uint64_t)0);
2716  V = Builder.CreateSelect(Mask, V, Passthru);
2717  }
2718 
2719  // Insert the result back into the original argument 0.
2720  V = Builder.CreateInsertElement(Arg0, V, (uint64_t)0);
2721 
2722  return replaceInstUsesWith(*II, V);
2723  }
2724  }
2725  break;
2726 
2727  // Constant fold ashr( <A x Bi>, Ci ).
2728  // Constant fold lshr( <A x Bi>, Ci ).
2729  // Constant fold shl( <A x Bi>, Ci ).
2730  case Intrinsic::x86_sse2_psrai_d:
2731  case Intrinsic::x86_sse2_psrai_w:
2732  case Intrinsic::x86_avx2_psrai_d:
2733  case Intrinsic::x86_avx2_psrai_w:
2734  case Intrinsic::x86_avx512_psrai_q_128:
2735  case Intrinsic::x86_avx512_psrai_q_256:
2736  case Intrinsic::x86_avx512_psrai_d_512:
2737  case Intrinsic::x86_avx512_psrai_q_512:
2738  case Intrinsic::x86_avx512_psrai_w_512:
2739  case Intrinsic::x86_sse2_psrli_d:
2740  case Intrinsic::x86_sse2_psrli_q:
2741  case Intrinsic::x86_sse2_psrli_w:
2742  case Intrinsic::x86_avx2_psrli_d:
2743  case Intrinsic::x86_avx2_psrli_q:
2744  case Intrinsic::x86_avx2_psrli_w:
2745  case Intrinsic::x86_avx512_psrli_d_512:
2746  case Intrinsic::x86_avx512_psrli_q_512:
2747  case Intrinsic::x86_avx512_psrli_w_512:
2748  case Intrinsic::x86_sse2_pslli_d:
2749  case Intrinsic::x86_sse2_pslli_q:
2750  case Intrinsic::x86_sse2_pslli_w:
2751  case Intrinsic::x86_avx2_pslli_d:
2752  case Intrinsic::x86_avx2_pslli_q:
2753  case Intrinsic::x86_avx2_pslli_w:
2754  case Intrinsic::x86_avx512_pslli_d_512:
2755  case Intrinsic::x86_avx512_pslli_q_512:
2756  case Intrinsic::x86_avx512_pslli_w_512:
2757  if (Value *V = simplifyX86immShift(*II, Builder))
2758  return replaceInstUsesWith(*II, V);
2759  break;
2760 
2761  case Intrinsic::x86_sse2_psra_d:
2762  case Intrinsic::x86_sse2_psra_w:
2763  case Intrinsic::x86_avx2_psra_d:
2764  case Intrinsic::x86_avx2_psra_w:
2765  case Intrinsic::x86_avx512_psra_q_128:
2766  case Intrinsic::x86_avx512_psra_q_256:
2767  case Intrinsic::x86_avx512_psra_d_512:
2768  case Intrinsic::x86_avx512_psra_q_512:
2769  case Intrinsic::x86_avx512_psra_w_512:
2770  case Intrinsic::x86_sse2_psrl_d:
2771  case Intrinsic::x86_sse2_psrl_q:
2772  case Intrinsic::x86_sse2_psrl_w:
2773  case Intrinsic::x86_avx2_psrl_d:
2774  case Intrinsic::x86_avx2_psrl_q:
2775  case Intrinsic::x86_avx2_psrl_w:
2776  case Intrinsic::x86_avx512_psrl_d_512:
2777  case Intrinsic::x86_avx512_psrl_q_512:
2778  case Intrinsic::x86_avx512_psrl_w_512:
2779  case Intrinsic::x86_sse2_psll_d:
2780  case Intrinsic::x86_sse2_psll_q:
2781  case Intrinsic::x86_sse2_psll_w:
2782  case Intrinsic::x86_avx2_psll_d:
2783  case Intrinsic::x86_avx2_psll_q:
2784  case Intrinsic::x86_avx2_psll_w:
2785  case Intrinsic::x86_avx512_psll_d_512:
2786  case Intrinsic::x86_avx512_psll_q_512:
2787  case Intrinsic::x86_avx512_psll_w_512: {
2788  if (Value *V = simplifyX86immShift(*II, Builder))
2789  return replaceInstUsesWith(*II, V);
2790 
2791  // SSE2/AVX2 uses only the first 64-bits of the 128-bit vector
2792  // operand to compute the shift amount.
2793  Value *Arg1 = II->getArgOperand(1);
2794  assert(Arg1->getType()->getPrimitiveSizeInBits() == 128 &&
2795  "Unexpected packed shift size");
2796  unsigned VWidth = Arg1->getType()->getVectorNumElements();
2797 
2798  if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, VWidth / 2)) {
2799  II->setArgOperand(1, V);
2800  return II;
2801  }
2802  break;
2803  }
2804 
2805  case Intrinsic::x86_avx2_psllv_d:
2806  case Intrinsic::x86_avx2_psllv_d_256:
2807  case Intrinsic::x86_avx2_psllv_q:
2808  case Intrinsic::x86_avx2_psllv_q_256:
2809  case Intrinsic::x86_avx512_psllv_d_512:
2810  case Intrinsic::x86_avx512_psllv_q_512:
2811  case Intrinsic::x86_avx512_psllv_w_128:
2812  case Intrinsic::x86_avx512_psllv_w_256:
2813  case Intrinsic::x86_avx512_psllv_w_512:
2814  case Intrinsic::x86_avx2_psrav_d:
2815  case Intrinsic::x86_avx2_psrav_d_256:
2816  case Intrinsic::x86_avx512_psrav_q_128:
2817  case Intrinsic::x86_avx512_psrav_q_256:
2818  case Intrinsic::x86_avx512_psrav_d_512:
2819  case Intrinsic::x86_avx512_psrav_q_512:
2820  case Intrinsic::x86_avx512_psrav_w_128:
2821  case Intrinsic::x86_avx512_psrav_w_256:
2822  case Intrinsic::x86_avx512_psrav_w_512:
2823  case Intrinsic::x86_avx2_psrlv_d:
2824  case Intrinsic::x86_avx2_psrlv_d_256:
2825  case Intrinsic::x86_avx2_psrlv_q:
2826  case Intrinsic::x86_avx2_psrlv_q_256:
2827  case Intrinsic::x86_avx512_psrlv_d_512:
2828  case Intrinsic::x86_avx512_psrlv_q_512:
2829  case Intrinsic::x86_avx512_psrlv_w_128:
2830  case Intrinsic::x86_avx512_psrlv_w_256:
2831  case Intrinsic::x86_avx512_psrlv_w_512:
2832  if (Value *V = simplifyX86varShift(*II, Builder))
2833  return replaceInstUsesWith(*II, V);
2834  break;
2835 
2836  case Intrinsic::x86_sse2_packssdw_128:
2837  case Intrinsic::x86_sse2_packsswb_128:
2838  case Intrinsic::x86_avx2_packssdw:
2839  case Intrinsic::x86_avx2_packsswb:
2840  case Intrinsic::x86_avx512_packssdw_512:
2841  case Intrinsic::x86_avx512_packsswb_512:
2842  if (Value *V = simplifyX86pack(*II, Builder, true))
2843  return replaceInstUsesWith(*II, V);
2844  break;
2845 
2846  case Intrinsic::x86_sse2_packuswb_128:
2847  case Intrinsic::x86_sse41_packusdw:
2848  case Intrinsic::x86_avx2_packusdw:
2849  case Intrinsic::x86_avx2_packuswb:
2850  case Intrinsic::x86_avx512_packusdw_512:
2851  case Intrinsic::x86_avx512_packuswb_512:
2852  if (Value *V = simplifyX86pack(*II, Builder, false))
2853  return replaceInstUsesWith(*II, V);
2854  break;
2855 
2856  case Intrinsic::x86_pclmulqdq:
2857  case Intrinsic::x86_pclmulqdq_256:
2858  case Intrinsic::x86_pclmulqdq_512: {
2859  if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
2860  unsigned Imm = C->getZExtValue();
2861 
2862  bool MadeChange = false;
2863  Value *Arg0 = II->getArgOperand(0);
2864  Value *Arg1 = II->getArgOperand(1);
2865  unsigned VWidth = Arg0->getType()->getVectorNumElements();
2866 
2867  APInt UndefElts1(VWidth, 0);
2868  APInt DemandedElts1 = APInt::getSplat(VWidth,
2869  APInt(2, (Imm & 0x01) ? 2 : 1));
2870  if (Value *V = SimplifyDemandedVectorElts(Arg0, DemandedElts1,
2871  UndefElts1)) {
2872  II->setArgOperand(0, V);
2873  MadeChange = true;
2874  }
2875 
2876  APInt UndefElts2(VWidth, 0);
2877  APInt DemandedElts2 = APInt::getSplat(VWidth,
2878  APInt(2, (Imm & 0x10) ? 2 : 1));
2879  if (Value *V = SimplifyDemandedVectorElts(Arg1, DemandedElts2,
2880  UndefElts2)) {
2881  II->setArgOperand(1, V);
2882  MadeChange = true;
2883  }
2884 
2885  // If either input elements are undef, the result is zero.
2886  if (DemandedElts1.isSubsetOf(UndefElts1) ||
2887  DemandedElts2.isSubsetOf(UndefElts2))
2888  return replaceInstUsesWith(*II,
2889  ConstantAggregateZero::get(II->getType()));
2890 
2891  if (MadeChange)
2892  return II;
2893  }
2894  break;
2895  }
2896 
2897  case Intrinsic::x86_sse41_insertps:
2898  if (Value *V = simplifyX86insertps(*II, Builder))
2899  return replaceInstUsesWith(*II, V);
2900  break;
2901 
2902  case Intrinsic::x86_sse4a_extrq: {
2903  Value *Op0 = II->getArgOperand(0);
2904  Value *Op1 = II->getArgOperand(1);
2905  unsigned VWidth0 = Op0->getType()->getVectorNumElements();
2906  unsigned VWidth1 = Op1->getType()->getVectorNumElements();
2907  assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
2908  Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
2909  VWidth1 == 16 && "Unexpected operand sizes");
2910 
2911  // See if we're dealing with constant values.
2912  Constant *C1 = dyn_cast<Constant>(Op1);
2913  ConstantInt *CILength =
2914  C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)0))
2915  : nullptr;
2916  ConstantInt *CIIndex =
2917  C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)1))
2918  : nullptr;
2919 
2920  // Attempt to simplify to a constant, shuffle vector or EXTRQI call.
2921  if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, Builder))
2922  return replaceInstUsesWith(*II, V);
2923 
2924  // EXTRQ only uses the lowest 64-bits of the first 128-bit vector
2925  // operands and the lowest 16-bits of the second.
2926  bool MadeChange = false;
2927  if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
2928  II->setArgOperand(0, V);
2929  MadeChange = true;
2930  }
2931  if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 2)) {
2932  II->setArgOperand(1, V);
2933  MadeChange = true;
2934  }
2935  if (MadeChange)
2936  return II;
2937  break;
2938  }
2939 
2940  case Intrinsic::x86_sse4a_extrqi: {
2941  // EXTRQI: Extract Length bits starting from Index. Zero pad the remaining
2942  // bits of the lower 64-bits. The upper 64-bits are undefined.
2943  Value *Op0 = II->getArgOperand(0);
2944  unsigned VWidth = Op0->getType()->getVectorNumElements();
2945  assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
2946  "Unexpected operand size");
2947 
2948  // See if we're dealing with constant values.
2949  ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(1));
2950  ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(2));
2951 
2952  // Attempt to simplify to a constant or shuffle vector.
2953  if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, Builder))
2954  return replaceInstUsesWith(*II, V);
2955 
2956  // EXTRQI only uses the lowest 64-bits of the first 128-bit vector
2957  // operand.
2958  if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
2959  II->setArgOperand(0, V);
2960  return II;
2961  }
2962  break;
2963  }
2964 
2965  case Intrinsic::x86_sse4a_insertq: {
2966  Value *Op0 = II->getArgOperand(0);
2967  Value *Op1 = II->getArgOperand(1);
2968  unsigned VWidth = Op0->getType()->getVectorNumElements();
2969  assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
2970  Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
2971  Op1->getType()->getVectorNumElements() == 2 &&
2972  "Unexpected operand size");
2973 
2974  // See if we're dealing with constant values.
2975  Constant *C1 = dyn_cast<Constant>(Op1);
2976  ConstantInt *CI11 =
2977  C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)1))
2978  : nullptr;
2979 
2980  // Attempt to simplify to a constant, shuffle vector or INSERTQI call.
2981  if (CI11) {
2982  const APInt &V11 = CI11->getValue();
2983  APInt Len = V11.zextOrTrunc(6);
2984  APInt Idx = V11.lshr(8).zextOrTrunc(6);
2985  if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, Builder))
2986  return replaceInstUsesWith(*II, V);
2987  }
2988 
2989  // INSERTQ only uses the lowest 64-bits of the first 128-bit vector
2990  // operand.
2991  if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
2992  II->setArgOperand(0, V);
2993  return II;
2994  }
2995  break;
2996  }
2997 
2998  case Intrinsic::x86_sse4a_insertqi: {
2999  // INSERTQI: Extract lowest Length bits from lower half of second source and
3000  // insert over first source starting at Index bit. The upper 64-bits are
3001  // undefined.
3002  Value *Op0 = II->getArgOperand(0);
3003  Value *Op1 = II->getArgOperand(1);
3004  unsigned VWidth0 = Op0->getType()->getVectorNumElements();
3005  unsigned VWidth1 = Op1->getType()->getVectorNumElements();
3006  assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
3007  Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
3008  VWidth1 == 2 && "Unexpected operand sizes");
3009 
3010  // See if we're dealing with constant values.
3011  ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(2));
3012  ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(3));
3013 
3014  // Attempt to simplify to a constant or shuffle vector.
3015  if (CILength && CIIndex) {
3016  APInt Len = CILength->getValue().zextOrTrunc(6);
3017  APInt Idx = CIIndex->getValue().zextOrTrunc(6);
3018  if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, Builder))
3019  return replaceInstUsesWith(*II, V);
3020  }
3021 
3022  // INSERTQI only uses the lowest 64-bits of the first two 128-bit vector
3023  // operands.
3024  bool MadeChange = false;
3025  if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
3026  II->setArgOperand(0, V);
3027  MadeChange = true;
3028  }
3029  if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 1)) {
3030  II->setArgOperand(1, V);
3031  MadeChange = true;
3032  }
3033  if (MadeChange)
3034  return II;
3035  break;
3036  }
3037 
3038  case Intrinsic::x86_sse41_pblendvb:
3039  case Intrinsic::x86_sse41_blendvps:
3040  case Intrinsic::x86_sse41_blendvpd:
3041  case Intrinsic::x86_avx_blendv_ps_256:
3042  case Intrinsic::x86_avx_blendv_pd_256:
3043  case Intrinsic::x86_avx2_pblendvb: {
3044  // fold (blend A, A, Mask) -> A
3045  Value *Op0 = II->getArgOperand(0);
3046  Value *Op1 = II->getArgOperand(1);
3047  Value *Mask = II->getArgOperand(2);
3048  if (Op0 == Op1)
3049  return replaceInstUsesWith(CI, Op0);
3050 
3051  // Zero Mask - select 1st argument.
3052  if (isa<ConstantAggregateZero>(Mask))
3053  return replaceInstUsesWith(CI, Op0);
3054 
3055  // Constant Mask - select 1st/2nd argument lane based on top bit of mask.
3056  if (auto *ConstantMask = dyn_cast<ConstantDataVector>(Mask)) {
3057  Constant *NewSelector = getNegativeIsTrueBoolVec(ConstantMask);
3058  return SelectInst::Create(NewSelector, Op1, Op0, "blendv");
3059  }
3060 
3061  // Convert to a vector select if we can bypass casts and find a boolean
3062  // vector condition value.
3063  Value *BoolVec;
3064  Mask = peekThroughBitcast(Mask);
3065  if (match(Mask, m_SExt(m_Value(BoolVec))) &&
3066  BoolVec->getType()->isVectorTy() &&
3067  BoolVec->getType()->getScalarSizeInBits() == 1) {
3068  assert(Mask->getType()->getPrimitiveSizeInBits() ==
3069  II->getType()->getPrimitiveSizeInBits() &&
3070  "Not expecting mask and operands with different sizes");
3071 
3072  unsigned NumMaskElts = Mask->getType()->getVectorNumElements();
3073  unsigned NumOperandElts = II->getType()->getVectorNumElements();
3074  if (NumMaskElts == NumOperandElts)
3075  return SelectInst::Create(BoolVec, Op1, Op0);
3076 
3077  // If the mask has less elements than the operands, each mask bit maps to
3078  // multiple elements of the operands. Bitcast back and forth.
3079  if (NumMaskElts < NumOperandElts) {
3080  Value *CastOp0 = Builder.CreateBitCast(Op0, Mask->getType());
3081  Value *CastOp1 = Builder.CreateBitCast(Op1, Mask->getType());
3082  Value *Sel = Builder.CreateSelect(BoolVec, CastOp1, CastOp0);
3083  return new BitCastInst(Sel, II->getType());
3084  }
3085  }
3086 
3087  break;
3088  }
3089 
3090  case Intrinsic::x86_ssse3_pshuf_b_128:
3091  case Intrinsic::x86_avx2_pshuf_b:
3092  case Intrinsic::x86_avx512_pshuf_b_512:
3093  if (Value *V = simplifyX86pshufb(*II, Builder))
3094  return replaceInstUsesWith(*II, V);
3095  break;
3096 
3097  case Intrinsic::x86_avx_vpermilvar_ps:
3098  case Intrinsic::x86_avx_vpermilvar_ps_256:
3099  case Intrinsic::x86_avx512_vpermilvar_ps_512:
3100  case Intrinsic::x86_avx_vpermilvar_pd:
3101  case Intrinsic::x86_avx_vpermilvar_pd_256:
3102  case Intrinsic::x86_avx512_vpermilvar_pd_512:
3103  if (Value *V = simplifyX86vpermilvar(*II, Builder))
3104  return replaceInstUsesWith(*II, V);
3105  break;
3106 
3107  case Intrinsic::x86_avx2_permd:
3108  case Intrinsic::x86_avx2_permps:
3109  case Intrinsic::x86_avx512_permvar_df_256:
3110  case Intrinsic::x86_avx512_permvar_df_512:
3111  case Intrinsic::x86_avx512_permvar_di_256:
3112  case Intrinsic::x86_avx512_permvar_di_512:
3113  case Intrinsic::x86_avx512_permvar_hi_128:
3114  case Intrinsic::x86_avx512_permvar_hi_256:
3115  case Intrinsic::x86_avx512_permvar_hi_512:
3116  case Intrinsic::x86_avx512_permvar_qi_128:
3117  case Intrinsic::x86_avx512_permvar_qi_256:
3118  case Intrinsic::x86_avx512_permvar_qi_512:
3119  case Intrinsic::x86_avx512_permvar_sf_512:
3120  case Intrinsic::x86_avx512_permvar_si_512:
3121  if (Value *V = simplifyX86vpermv(*II, Builder))
3122  return replaceInstUsesWith(*II, V);
3123  break;
3124 
3125  case Intrinsic::x86_avx_maskload_ps:
3126  case Intrinsic::x86_avx_maskload_pd:
3127  case Intrinsic::x86_avx_maskload_ps_256:
3128  case Intrinsic::x86_avx_maskload_pd_256:
3129  case Intrinsic::x86_avx2_maskload_d:
3130  case Intrinsic::x86_avx2_maskload_q:
3131  case Intrinsic::x86_avx2_maskload_d_256:
3132  case Intrinsic::x86_avx2_maskload_q_256:
3133  if (Instruction *I = simplifyX86MaskedLoad(*II, *this))
3134  return I;
3135  break;
3136 
3137  case Intrinsic::x86_sse2_maskmov_dqu:
3138  case Intrinsic::x86_avx_maskstore_ps:
3139  case Intrinsic::x86_avx_maskstore_pd:
3140  case Intrinsic::x86_avx_maskstore_ps_256:
3141  case Intrinsic::x86_avx_maskstore_pd_256:
3142  case Intrinsic::x86_avx2_maskstore_d:
3143  case Intrinsic::x86_avx2_maskstore_q:
3144  case Intrinsic::x86_avx2_maskstore_d_256:
3145  case Intrinsic::x86_avx2_maskstore_q_256:
3146  if (simplifyX86MaskedStore(*II, *this))
3147  return nullptr;
3148  break;
3149 
3150  case Intrinsic::x86_addcarry_32:
3151  case Intrinsic::x86_addcarry_64:
3152  if (Value *V = simplifyX86addcarry(*II, Builder))
3153  return replaceInstUsesWith(*II, V);
3154  break;
3155 
3156  case Intrinsic::ppc_altivec_vperm:
3157  // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
3158  // Note that ppc_altivec_vperm has a big-endian bias, so when creating
3159  // a vectorshuffle for little endian, we must undo the transformation
3160  // performed on vec_perm in altivec.h. That is, we must complement
3161  // the permutation mask with respect to 31 and reverse the order of
3162  // V1 and V2.
3163  if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
3164  assert(Mask->getType()->getVectorNumElements() == 16 &&
3165  "Bad type for intrinsic!");
3166 
3167  // Check that all of the elements are integer constants or undefs.
3168  bool AllEltsOk = true;
3169  for (unsigned i = 0; i != 16; ++i) {
3170  Constant *Elt = Mask->getAggregateElement(i);
3171  if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
3172  AllEltsOk = false;
3173  break;
3174  }
3175  }
3176 
3177  if (AllEltsOk) {
3178  // Cast the input vectors to byte vectors.
3179  Value *Op0 = Builder.CreateBitCast(II->getArgOperand(0),
3180  Mask->getType());
3181  Value *Op1 = Builder.CreateBitCast(II->getArgOperand(1),
3182  Mask->getType());
3183  Value *Result = UndefValue::get(Op0->getType());
3184 
3185  // Only extract each element once.
3186  Value *ExtractedElts[32];
3187  memset(ExtractedElts, 0, sizeof(ExtractedElts));
3188 
3189  for (unsigned i = 0; i != 16; ++i) {
3190  if (isa<UndefValue>(Mask->getAggregateElement(i)))
3191  continue;
3192  unsigned Idx =
3193  cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
3194  Idx &= 31; // Match the hardware behavior.
3195  if (DL.isLittleEndian())
3196  Idx = 31 - Idx;
3197 
3198  if (!ExtractedElts[Idx]) {
3199  Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
3200  Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
3201  ExtractedElts[Idx] =
3202  Builder.CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
3203  Builder.getInt32(Idx&15));
3204  }
3205 
3206  // Insert this value into the result vector.
3207  Result = Builder.CreateInsertElement(Result, ExtractedElts[Idx],
3208  Builder.getInt32(i));
3209  }
3210  return CastInst::Create(Instruction::BitCast, Result, CI.getType());
3211  }
3212  }
3213  break;
3214 
3215  case Intrinsic::arm_neon_vld1: {
3216  unsigned MemAlign = getKnownAlignment(II->getArgOperand(0),
3217  DL, II, &AC, &DT);
3218  if (Value *V = simplifyNeonVld1(*II, MemAlign, Builder))
3219  return replaceInstUsesWith(*II, V);
3220  break;
3221  }
3222 
3223  case Intrinsic::arm_neon_vld2:
3224  case Intrinsic::arm_neon_vld3:
3225  case Intrinsic::arm_neon_vld4:
3226  case Intrinsic::arm_neon_vld2lane:
3227  case Intrinsic::arm_neon_vld3lane:
3228  case Intrinsic::arm_neon_vld4lane:
3229  case Intrinsic::arm_neon_vst1:
3230  case Intrinsic::arm_neon_vst2:
3231  case Intrinsic::arm_neon_vst3:
3232  case Intrinsic::arm_neon_vst4:
3233  case Intrinsic::arm_neon_vst2lane:
3234  case Intrinsic::arm_neon_vst3lane:
3235  case Intrinsic::arm_neon_vst4lane: {
3236  unsigned MemAlign =
3237  getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT);
3238  unsigned AlignArg = II->getNumArgOperands() - 1;
3239  ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
3240  if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
3241  II->setArgOperand(AlignArg,
3242  ConstantInt::get(Type::getInt32Ty(II->getContext()),
3243  MemAlign, false));
3244  return II;
3245  }
3246  break;
3247  }
3248 
3249  case Intrinsic::arm_neon_vtbl1:
3250  case Intrinsic::aarch64_neon_tbl1:
3251  if (Value *V = simplifyNeonTbl1(*II, Builder))
3252  return replaceInstUsesWith(*II, V);
3253  break;
3254 
3255  case Intrinsic::arm_neon_vmulls:
3256  case Intrinsic::arm_neon_vmullu:
3257  case Intrinsic::aarch64_neon_smull:
3258  case Intrinsic::aarch64_neon_umull: {
3259  Value *Arg0 = II->getArgOperand(0);
3260  Value *Arg1 = II->getArgOperand(1);
3261 
3262  // Handle mul by zero first:
3263  if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
3264  return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
3265  }
3266 
3267  // Check for constant LHS & RHS - in this case we just simplify.
3268  bool Zext = (IID == Intrinsic::arm_neon_vmullu ||
3269  IID == Intrinsic::aarch64_neon_umull);
3270  VectorType *NewVT = cast<VectorType>(II->getType());
3271  if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
3272  if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
3273  CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
3274  CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
3275 
3276  return replaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
3277  }
3278 
3279  // Couldn't simplify - canonicalize constant to the RHS.
3280  std::swap(Arg0, Arg1);
3281  }
3282 
3283  // Handle mul by one:
3284  if (Constant *CV1 = dyn_cast<Constant>(Arg1))
3285  if (ConstantInt *Splat =
3286  dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
3287  if (Splat->isOne())
3288  return CastInst::CreateIntegerCast(Arg0, II->getType(),
3289  /*isSigned=*/!Zext);
3290 
3291  break;
3292  }
3293  case Intrinsic::arm_neon_aesd:
3294  case Intrinsic::arm_neon_aese:
3295  case Intrinsic::aarch64_crypto_aesd:
3296  case Intrinsic::aarch64_crypto_aese: {
3297  Value *DataArg = II->getArgOperand(0);
3298  Value *KeyArg = II->getArgOperand(1);
3299 
3300  // Try to use the builtin XOR in AESE and AESD to eliminate a prior XOR
3301  Value *Data, *Key;
3302  if (match(KeyArg, m_ZeroInt()) &&
3303  match(DataArg, m_Xor(m_Value(Data), m_Value(Key)))) {
3304  II->setArgOperand(0, Data);
3305  II->setArgOperand(1, Key);
3306  return II;
3307  }
3308  break;
3309  }
3310  case Intrinsic::amdgcn_rcp: {
3311  Value *Src = II->getArgOperand(0);
3312 
3313  // TODO: Move to ConstantFolding/InstSimplify?
3314  if (isa<UndefValue>(Src))
3315  return replaceInstUsesWith(CI, Src);
3316 
3317  if (const ConstantFP *C = dyn_cast<ConstantFP>(Src)) {
3318  const APFloat &ArgVal = C->getValueAPF();
3319  APFloat Val(ArgVal.getSemantics(), 1.0);
3320  APFloat::opStatus Status = Val.divide(ArgVal,
3322  // Only do this if it was exact and therefore not dependent on the
3323  // rounding mode.
3324  if (Status == APFloat::opOK)
3325  return replaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
3326  }
3327 
3328  break;
3329  }
3330  case Intrinsic::amdgcn_rsq: {
3331  Value *Src = II->getArgOperand(0);
3332 
3333  // TODO: Move to ConstantFolding/InstSimplify?
3334  if (isa<UndefValue>(Src))
3335  return replaceInstUsesWith(CI, Src);
3336  break;
3337  }
3338  case Intrinsic::amdgcn_frexp_mant:
3339  case Intrinsic::amdgcn_frexp_exp: {
3340  Value *Src = II->getArgOperand(0);
3341  if (const ConstantFP *C = dyn_cast<ConstantFP>(Src)) {
3342  int Exp;
3343  APFloat Significand = frexp(C->getValueAPF(), Exp,
3345 
3346  if (IID == Intrinsic::amdgcn_frexp_mant) {
3347  return replaceInstUsesWith(CI, ConstantFP::get(II->getContext(),
3348  Significand));
3349  }
3350 
3351  // Match instruction special case behavior.
3352  if (Exp == APFloat::IEK_NaN || Exp == APFloat::IEK_Inf)
3353  Exp = 0;
3354 
3355  return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Exp));
3356  }
3357 
3358  if (isa<UndefValue>(Src))
3359  return replaceInstUsesWith(CI, UndefValue::get(II->getType()));
3360 
3361  break;
3362  }
3363  case Intrinsic::amdgcn_class: {
3364  enum {
3365  S_NAN = 1 << 0, // Signaling NaN
3366  Q_NAN = 1 << 1, // Quiet NaN
3367  N_INFINITY = 1 << 2, // Negative infinity
3368  N_NORMAL = 1 << 3, // Negative normal
3369  N_SUBNORMAL = 1 << 4, // Negative subnormal
3370  N_ZERO = 1 << 5, // Negative zero
3371  P_ZERO = 1 << 6, // Positive zero
3372  P_SUBNORMAL = 1 << 7, // Positive subnormal
3373  P_NORMAL = 1 << 8, // Positive normal
3374  P_INFINITY = 1 << 9 // Positive infinity
3375  };
3376 
3377  const uint32_t FullMask = S_NAN | Q_NAN | N_INFINITY | N_NORMAL |
3379 
3380  Value *Src0 = II->getArgOperand(0);
3381  Value *Src1 = II->getArgOperand(1);
3382  const ConstantInt *CMask = dyn_cast<ConstantInt>(Src1);
3383  if (!CMask) {
3384  if (isa<UndefValue>(Src0))
3385  return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3386 
3387  if (isa<UndefValue>(Src1))
3388  return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), false));
3389  break;
3390  }
3391 
3392  uint32_t Mask = CMask->getZExtValue();
3393 
3394  // If all tests are made, it doesn't matter what the value is.
3395  if ((Mask & FullMask) == FullMask)
3396  return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), true));
3397 
3398  if ((Mask & FullMask) == 0)
3399  return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), false));
3400 
3401  if (Mask == (S_NAN | Q_NAN)) {
3402  // Equivalent of isnan. Replace with standard fcmp.
3403  Value *FCmp = Builder.CreateFCmpUNO(Src0, Src0);
3404  FCmp->takeName(II);
3405  return replaceInstUsesWith(*II, FCmp);
3406  }
3407 
3408  if (Mask == (N_ZERO | P_ZERO)) {
3409  // Equivalent of == 0.
3410  Value *FCmp = Builder.CreateFCmpOEQ(
3411  Src0, ConstantFP::get(Src0->getType(), 0.0));
3412 
3413  FCmp->takeName(II);
3414  return replaceInstUsesWith(*II, FCmp);
3415  }
3416 
3417  // fp_class (nnan x), qnan|snan|other -> fp_class (nnan x), other
3418  if (((Mask & S_NAN) || (Mask & Q_NAN)) && isKnownNeverNaN(Src0, &TLI)) {
3419  II->setArgOperand(1, ConstantInt::get(Src1->getType(),
3420  Mask & ~(S_NAN | Q_NAN)));
3421  return II;
3422  }
3423 
3424  const ConstantFP *CVal = dyn_cast<ConstantFP>(Src0);
3425  if (!CVal) {
3426  if (isa<UndefValue>(Src0))
3427  return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3428 
3429  // Clamp mask to used bits
3430  if ((Mask & FullMask) != Mask) {
3431  CallInst *NewCall = Builder.CreateCall(II->getCalledFunction(),
3432  { Src0, ConstantInt::get(Src1->getType(), Mask & FullMask) }
3433  );
3434 
3435  NewCall->takeName(II);
3436  return replaceInstUsesWith(*II, NewCall);
3437  }
3438 
3439  break;
3440  }
3441 
3442  const APFloat &Val = CVal->getValueAPF();
3443 
3444  bool Result =
3445  ((Mask & S_NAN) && Val.isNaN() && Val.isSignaling()) ||
3446  ((Mask & Q_NAN) && Val.isNaN() && !Val.isSignaling()) ||
3447  ((Mask & N_INFINITY) && Val.isInfinity() && Val.isNegative()) ||
3448  ((Mask & N_NORMAL) && Val.isNormal() && Val.isNegative()) ||
3449  ((Mask & N_SUBNORMAL) && Val.isDenormal() && Val.isNegative()) ||
3450  ((Mask & N_ZERO) && Val.isZero() && Val.isNegative()) ||
3451  ((Mask & P_ZERO) && Val.isZero() && !Val.isNegative()) ||
3452  ((Mask & P_SUBNORMAL) && Val.isDenormal() && !Val.isNegative()) ||
3453  ((Mask & P_NORMAL) && Val.isNormal() && !Val.isNegative()) ||
3454  ((Mask & P_INFINITY) && Val.isInfinity() && !Val.isNegative());
3455 
3456  return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), Result));
3457  }
3458  case Intrinsic::amdgcn_cvt_pkrtz: {
3459  Value *Src0 = II->getArgOperand(0);
3460  Value *Src1 = II->getArgOperand(1);
3461  if (const ConstantFP *C0 = dyn_cast<ConstantFP>(Src0)) {
3462  if (const ConstantFP *C1 = dyn_cast<ConstantFP>(Src1)) {
3463  const fltSemantics &HalfSem
3464  = II->getType()->getScalarType()->getFltSemantics();
3465  bool LosesInfo;
3466  APFloat Val0 = C0->getValueAPF();
3467  APFloat Val1 = C1->getValueAPF();
3468  Val0.convert(HalfSem, APFloat::rmTowardZero, &LosesInfo);
3469  Val1.convert(HalfSem, APFloat::rmTowardZero, &LosesInfo);
3470 
3471  Constant *Folded = ConstantVector::get({
3472  ConstantFP::get(II->getContext(), Val0),
3473  ConstantFP::get(II->getContext(), Val1) });
3474  return replaceInstUsesWith(*II, Folded);
3475  }
3476  }
3477 
3478  if (isa<UndefValue>(Src0) && isa<UndefValue>(Src1))
3479  return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3480 
3481  break;
3482  }
3483  case Intrinsic::amdgcn_cvt_pknorm_i16:
3484  case Intrinsic::amdgcn_cvt_pknorm_u16:
3485  case Intrinsic::amdgcn_cvt_pk_i16:
3486  case Intrinsic::amdgcn_cvt_pk_u16: {
3487  Value *Src0 = II->getArgOperand(0);
3488  Value *Src1 = II->getArgOperand(1);
3489 
3490  if (isa<UndefValue>(Src0) && isa<UndefValue>(Src1))
3491  return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3492 
3493  break;
3494  }
3495  case Intrinsic::amdgcn_ubfe:
3496  case Intrinsic::amdgcn_sbfe: {
3497  // Decompose simple cases into standard shifts.
3498  Value *Src = II->getArgOperand(0);
3499  if (isa<UndefValue>(Src))
3500  return replaceInstUsesWith(*II, Src);
3501 
3502  unsigned Width;
3503  Type *Ty = II->getType();
3504  unsigned IntSize = Ty->getIntegerBitWidth();
3505 
3506  ConstantInt *CWidth = dyn_cast<ConstantInt>(II->getArgOperand(2));
3507  if (CWidth) {
3508  Width = CWidth->getZExtValue();
3509  if ((Width & (IntSize - 1)) == 0)
3510  return replaceInstUsesWith(*II, ConstantInt::getNullValue(Ty));
3511 
3512  if (Width >= IntSize) {
3513  // Hardware ignores high bits, so remove those.
3514  II->setArgOperand(2, ConstantInt::get(CWidth->getType(),
3515  Width & (IntSize - 1)));
3516  return II;
3517  }
3518  }
3519 
3520  unsigned Offset;
3521  ConstantInt *COffset = dyn_cast<ConstantInt>(II->getArgOperand(1));
3522  if (COffset) {
3523  Offset = COffset->getZExtValue();
3524  if (Offset >= IntSize) {
3525  II->setArgOperand(1, ConstantInt::get(COffset->getType(),
3526  Offset & (IntSize - 1)));
3527  return II;
3528  }
3529  }
3530 
3531  bool Signed = IID == Intrinsic::amdgcn_sbfe;
3532 
3533  if (!CWidth || !COffset)
3534  break;
3535 
3536  // The case of Width == 0 is handled above, which makes this tranformation
3537  // safe. If Width == 0, then the ashr and lshr instructions become poison
3538  // value since the shift amount would be equal to the bit size.
3539  assert(Width != 0);
3540 
3541  // TODO: This allows folding to undef when the hardware has specific
3542  // behavior?
3543  if (Offset + Width < IntSize) {
3544  Value *Shl = Builder.CreateShl(Src, IntSize - Offset - Width);
3545  Value *RightShift = Signed ? Builder.CreateAShr(Shl, IntSize - Width)
3546  : Builder.CreateLShr(Shl, IntSize - Width);
3547  RightShift->takeName(II);
3548  return replaceInstUsesWith(*II, RightShift);
3549  }
3550 
3551  Value *RightShift = Signed ? Builder.CreateAShr(Src, Offset)
3552  : Builder.CreateLShr(Src, Offset);
3553 
3554  RightShift->takeName(II);
3555  return replaceInstUsesWith(*II, RightShift);
3556  }
3557  case Intrinsic::amdgcn_exp:
3558  case Intrinsic::amdgcn_exp_compr: {
3559  ConstantInt *En = cast<ConstantInt>(II->getArgOperand(1));
3560  unsigned EnBits = En->getZExtValue();
3561  if (EnBits == 0xf)
3562  break; // All inputs enabled.
3563 
3564  bool IsCompr = IID == Intrinsic::amdgcn_exp_compr;
3565  bool Changed = false;
3566  for (int I = 0; I < (IsCompr ? 2 : 4); ++I) {
3567  if ((!IsCompr && (EnBits & (1 << I)) == 0) ||
3568  (IsCompr && ((EnBits & (0x3 << (2 * I))) == 0))) {
3569  Value *Src = II->getArgOperand(I + 2);
3570  if (!isa<UndefValue>(Src)) {
3571  II->setArgOperand(I + 2, UndefValue::get(Src->getType()));
3572  Changed = true;
3573  }
3574  }
3575  }
3576 
3577  if (Changed)
3578  return II;
3579 
3580  break;
3581  }
3582  case Intrinsic::amdgcn_fmed3: {
3583  // Note this does not preserve proper sNaN behavior if IEEE-mode is enabled
3584  // for the shader.
3585 
3586  Value *Src0 = II->getArgOperand(0);
3587  Value *Src1 = II->getArgOperand(1);
3588  Value *Src2 = II->getArgOperand(2);
3589 
3590  // Checking for NaN before canonicalization provides better fidelity when
3591  // mapping other operations onto fmed3 since the order of operands is
3592  // unchanged.
3593  CallInst *NewCall = nullptr;
3594  if (match(Src0, m_NaN()) || isa<UndefValue>(Src0)) {
3595  NewCall = Builder.CreateMinNum(Src1, Src2);
3596  } else if (match(Src1, m_NaN()) || isa<UndefValue>(Src1)) {
3597  NewCall = Builder.CreateMinNum(Src0, Src2);
3598  } else if (match(Src2, m_NaN()) || isa<UndefValue>(Src2)) {
3599  NewCall = Builder.CreateMaxNum(Src0, Src1);
3600  }
3601 
3602  if (NewCall) {
3603  NewCall->copyFastMathFlags(II);
3604  NewCall->takeName(II);
3605  return replaceInstUsesWith(*II, NewCall);
3606  }
3607 
3608  bool Swap = false;
3609  // Canonicalize constants to RHS operands.
3610  //
3611  // fmed3(c0, x, c1) -> fmed3(x, c0, c1)
3612  if (isa<Constant>(Src0) && !isa<Constant>(Src1)) {
3613  std::swap(Src0, Src1);
3614  Swap = true;
3615  }
3616 
3617  if (isa<Constant>(Src1) && !isa<Constant>(Src2)) {
3618  std::swap(Src1, Src2);
3619  Swap = true;
3620  }
3621 
3622  if (isa<Constant>(Src0) && !isa<Constant>(Src1)) {
3623  std::swap(Src0, Src1);
3624  Swap = true;
3625  }
3626 
3627  if (Swap) {
3628  II->setArgOperand(0, Src0);
3629  II->setArgOperand(1, Src1);
3630  II->setArgOperand(2, Src2);
3631  return II;
3632  }
3633 
3634  if (const ConstantFP *C0 = dyn_cast<ConstantFP>(Src0)) {
3635  if (const ConstantFP *C1 = dyn_cast<ConstantFP>(Src1)) {
3636  if (const ConstantFP *C2 = dyn_cast<ConstantFP>(Src2)) {
3637  APFloat Result = fmed3AMDGCN(C0->getValueAPF(), C1->getValueAPF(),
3638  C2->getValueAPF());
3639  return replaceInstUsesWith(*II,
3640  ConstantFP::get(Builder.getContext(), Result));
3641  }
3642  }
3643  }
3644 
3645  break;
3646  }
3647  case Intrinsic::amdgcn_icmp:
3648  case Intrinsic::amdgcn_fcmp: {
3649  const ConstantInt *CC = cast<ConstantInt>(II->getArgOperand(2));
3650  // Guard against invalid arguments.
3651  int64_t CCVal = CC->getZExtValue();
3652  bool IsInteger = IID == Intrinsic::amdgcn_icmp;
3653  if ((IsInteger && (CCVal < CmpInst::FIRST_ICMP_PREDICATE ||
3654  CCVal > CmpInst::LAST_ICMP_PREDICATE)) ||
3655  (!IsInteger && (CCVal < CmpInst::FIRST_FCMP_PREDICATE ||
3656  CCVal > CmpInst::LAST_FCMP_PREDICATE)))
3657  break;
3658 
3659  Value *Src0 = II->getArgOperand(0);
3660  Value *Src1 = II->getArgOperand(1);
3661 
3662  if (auto *CSrc0 = dyn_cast<Constant>(Src0)) {
3663  if (auto *CSrc1 = dyn_cast<Constant>(Src1)) {
3664  Constant *CCmp = ConstantExpr::getCompare(CCVal, CSrc0, CSrc1);
3665  if (CCmp->isNullValue()) {
3666  return replaceInstUsesWith(
3667  *II, ConstantExpr::getSExt(CCmp, II->getType()));
3668  }
3669 
3670  // The result of V_ICMP/V_FCMP assembly instructions (which this
3671  // intrinsic exposes) is one bit per thread, masked with the EXEC
3672  // register (which contains the bitmask of live threads). So a
3673  // comparison that always returns true is the same as a read of the
3674  // EXEC register.
3676  II->getModule(), Intrinsic::read_register, II->getType());
3677  Metadata *MDArgs[] = {MDString::get(II->getContext(), "exec")};
3678  MDNode *MD = MDNode::get(II->getContext(), MDArgs);
3679  Value *Args[] = {MetadataAsValue::get(II->getContext(), MD)};
3680  CallInst *NewCall = Builder.CreateCall(NewF, Args);
3683  NewCall->takeName(II);
3684  return replaceInstUsesWith(*II, NewCall);
3685  }
3686 
3687  // Canonicalize constants to RHS.
3688  CmpInst::Predicate SwapPred
3689  = CmpInst::getSwappedPredicate(static_cast<CmpInst::Predicate>(CCVal));
3690  II->setArgOperand(0, Src1);
3691  II->setArgOperand(1, Src0);
3692  II->setArgOperand(2, ConstantInt::get(CC->getType(),
3693  static_cast<int>(SwapPred)));
3694  return II;
3695  }
3696 
3697  if (CCVal != CmpInst::ICMP_EQ && CCVal != CmpInst::ICMP_NE)
3698  break;
3699 
3700  // Canonicalize compare eq with true value to compare != 0
3701  // llvm.amdgcn.icmp(zext (i1 x), 1, eq)
3702  // -> llvm.amdgcn.icmp(zext (i1 x), 0, ne)
3703  // llvm.amdgcn.icmp(sext (i1 x), -1, eq)
3704  // -> llvm.amdgcn.icmp(sext (i1 x), 0, ne)
3705  Value *ExtSrc;
3706  if (CCVal == CmpInst::ICMP_EQ &&
3707  ((match(Src1, m_One()) && match(Src0, m_ZExt(m_Value(ExtSrc)))) ||
3708  (match(Src1, m_AllOnes()) && match(Src0, m_SExt(m_Value(ExtSrc))))) &&
3709  ExtSrc->getType()->isIntegerTy(1)) {
3710  II->setArgOperand(1, ConstantInt::getNullValue(Src1->getType()));
3711  II->setArgOperand(2, ConstantInt::get(CC->getType(), CmpInst::ICMP_NE));
3712  return II;
3713  }
3714 
3715  CmpInst::Predicate SrcPred;
3716  Value *SrcLHS;
3717  Value *SrcRHS;
3718 
3719  // Fold compare eq/ne with 0 from a compare result as the predicate to the
3720  // intrinsic. The typical use is a wave vote function in the library, which
3721  // will be fed from a user code condition compared with 0. Fold in the
3722  // redundant compare.
3723 
3724  // llvm.amdgcn.icmp([sz]ext ([if]cmp pred a, b), 0, ne)
3725  // -> llvm.amdgcn.[if]cmp(a, b, pred)
3726  //
3727  // llvm.amdgcn.icmp([sz]ext ([if]cmp pred a, b), 0, eq)
3728  // -> llvm.amdgcn.[if]cmp(a, b, inv pred)
3729  if (match(Src1, m_Zero()) &&
3730  match(Src0,
3731  m_ZExtOrSExt(m_Cmp(SrcPred, m_Value(SrcLHS), m_Value(SrcRHS))))) {
3732  if (CCVal == CmpInst::ICMP_EQ)
3733  SrcPred = CmpInst::getInversePredicate(SrcPred);
3734 
3735  Intrinsic::ID NewIID = CmpInst::isFPPredicate(SrcPred) ?
3736  Intrinsic::amdgcn_fcmp : Intrinsic::amdgcn_icmp;
3737 
3738  Type *Ty = SrcLHS->getType();
3739  if (auto *CmpType = dyn_cast<IntegerType>(Ty)) {
3740  // Promote to next legal integer type.
3741  unsigned Width = CmpType->getBitWidth();
3742  unsigned NewWidth = Width;
3743 
3744  // Don't do anything for i1 comparisons.
3745  if (Width == 1)
3746  break;
3747 
3748  if (Width <= 16)
3749  NewWidth = 16;
3750  else if (Width <= 32)
3751  NewWidth = 32;
3752  else if (Width <= 64)
3753  NewWidth = 64;
3754  else if (Width > 64)
3755  break; // Can't handle this.
3756 
3757  if (Width != NewWidth) {
3758  IntegerType *CmpTy = Builder.getIntNTy(NewWidth);
3759  if (CmpInst::isSigned(SrcPred)) {
3760  SrcLHS = Builder.CreateSExt(SrcLHS, CmpTy);
3761  SrcRHS = Builder.CreateSExt(SrcRHS, CmpTy);
3762  } else {
3763  SrcLHS = Builder.CreateZExt(SrcLHS, CmpTy);
3764  SrcRHS = Builder.CreateZExt(SrcRHS, CmpTy);
3765  }
3766  }
3767  } else if (!Ty->isFloatTy() && !Ty->isDoubleTy() && !Ty->isHalfTy())
3768  break;
3769 
3770  Function *NewF =
3771  Intrinsic::getDeclaration(II->getModule(), NewIID,
3772  { II->getType(),
3773  SrcLHS->getType() });
3774  Value *Args[] = { SrcLHS, SrcRHS,
3775  ConstantInt::get(CC->getType(), SrcPred) };
3776  CallInst *NewCall = Builder.CreateCall(NewF, Args);
3777  NewCall->takeName(II);
3778  return replaceInstUsesWith(*II, NewCall);
3779  }
3780 
3781  break;
3782  }
3783  case Intrinsic::amdgcn_wqm_vote: {
3784  // wqm_vote is identity when the argument is constant.
3785  if (!isa<Constant>(II->getArgOperand(0)))
3786  break;
3787 
3788  return replaceInstUsesWith(*II, II->getArgOperand(0));
3789  }
3790  case Intrinsic::amdgcn_kill: {
3791  const ConstantInt *C = dyn_cast<ConstantInt>(II->getArgOperand(0));
3792  if (!C || !C->getZExtValue())
3793  break;
3794 
3795  // amdgcn.kill(i1 1) is a no-op
3796  return eraseInstFromFunction(CI);
3797  }
3798  case Intrinsic::amdgcn_update_dpp: {
3799  Value *Old = II->getArgOperand(0);
3800 
3801  auto BC = cast<ConstantInt>(II->getArgOperand(5));
3802  auto RM = cast<ConstantInt>(II->getArgOperand(3));
3803  auto BM = cast<ConstantInt>(II->getArgOperand(4));
3804  if (BC->isZeroValue() ||
3805  RM->getZExtValue() != 0xF ||
3806  BM->getZExtValue() != 0xF ||
3807  isa<UndefValue>(Old))
3808  break;
3809 
3810  // If bound_ctrl = 1, row mask = bank mask = 0xf we can omit old value.
3811  II->setOperand(0, UndefValue::get(Old->getType()));
3812  return II;
3813  }
3814  case Intrinsic::amdgcn_readfirstlane:
3815  case Intrinsic::amdgcn_readlane: {
3816  // A constant value is trivially uniform.
3817  if (Constant *C = dyn_cast<Constant>(II->getArgOperand(0)))
3818  return replaceInstUsesWith(*II, C);
3819 
3820  // The rest of these may not be safe if the exec may not be the same between
3821  // the def and use.
3822  Value *Src = II->getArgOperand(0);
3823  Instruction *SrcInst = dyn_cast<Instruction>(Src);
3824  if (SrcInst && SrcInst->getParent() != II->getParent())
3825  break;
3826 
3827  // readfirstlane (readfirstlane x) -> readfirstlane x
3828  // readlane (readfirstlane x), y -> readfirstlane x
3829  if (match(Src, m_Intrinsic<Intrinsic::amdgcn_readfirstlane>()))
3830  return replaceInstUsesWith(*II, Src);
3831 
3832  if (IID == Intrinsic::amdgcn_readfirstlane) {
3833  // readfirstlane (readlane x, y) -> readlane x, y
3834  if (match(Src, m_Intrinsic<Intrinsic::amdgcn_readlane>()))
3835  return replaceInstUsesWith(*II, Src);
3836  } else {
3837  // readlane (readlane x, y), y -> readlane x, y
3838  if (match(Src, m_Intrinsic<Intrinsic::amdgcn_readlane>(
3839  m_Value(), m_Specific(II->getArgOperand(1)))))
3840  return replaceInstUsesWith(*II, Src);
3841  }
3842 
3843  break;
3844  }
3845  case Intrinsic::stackrestore: {
3846  // If the save is right next to the restore, remove the restore. This can
3847  // happen when variable allocas are DCE'd.
3848  if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
3849  if (SS->getIntrinsicID() == Intrinsic::stacksave) {
3850  // Skip over debug info.
3851  if (SS->getNextNonDebugInstruction() == II) {
3852  return eraseInstFromFunction(CI);
3853  }
3854  }
3855  }
3856 
3857  // Scan down this block to see if there is another stack restore in the
3858  // same block without an intervening call/alloca.
3859  BasicBlock::iterator BI(II);
3860  Instruction *TI = II->getParent()->getTerminator();
3861  bool CannotRemove = false;
3862  for (++BI; &*BI != TI; ++BI) {
3863  if (isa<AllocaInst>(BI)) {
3864  CannotRemove = true;
3865  break;
3866  }
3867  if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
3868  if (auto *II2 = dyn_cast<IntrinsicInst>(BCI)) {
3869  // If there is a stackrestore below this one, remove this one.
3870  if (II2->getIntrinsicID() == Intrinsic::stackrestore)
3871  return eraseInstFromFunction(CI);
3872 
3873  // Bail if we cross over an intrinsic with side effects, such as
3874  // llvm.stacksave, llvm.read_register, or llvm.setjmp.
3875  if (II2->mayHaveSideEffects()) {
3876  CannotRemove = true;
3877  break;
3878  }
3879  } else {
3880  // If we found a non-intrinsic call, we can't remove the stack
3881  // restore.
3882  CannotRemove = true;
3883  break;
3884  }
3885  }
3886  }
3887 
3888  // If the stack restore is in a return, resume, or unwind block and if there
3889  // are no allocas or calls between the restore and the return, nuke the
3890  // restore.
3891  if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
3892  return eraseInstFromFunction(CI);
3893  break;
3894  }
3895  case Intrinsic::lifetime_start:
3896  // Asan needs to poison memory to detect invalid access which is possible
3897  // even for empty lifetime range.
3898  if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3899  II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) ||
3900  II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
3901  break;
3902 
3903  if (removeTriviallyEmptyRange(*II, Intrinsic::lifetime_start,
3904  Intrinsic::lifetime_end, *this))
3905  return nullptr;
3906  break;
3907  case Intrinsic::assume: {
3908  Value *IIOperand = II->getArgOperand(0);
3909  // Remove an assume if it is followed by an identical assume.
3910  // TODO: Do we need this? Unless there are conflicting assumptions, the
3911  // computeKnownBits(IIOperand) below here eliminates redundant assumes.
3913  if (match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))))
3914  return eraseInstFromFunction(CI);
3915 
3916  // Canonicalize assume(a && b) -> assume(a); assume(b);
3917  // Note: New assumption intrinsics created here are registered by
3918  // the InstCombineIRInserter object.
3919  FunctionType *AssumeIntrinsicTy = II->getFunctionType();
3920  Value *AssumeIntrinsic = II->getCalledValue();
3921  Value *A, *B;
3922  if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
3923  Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, II->getName());
3924  Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName());
3925  return eraseInstFromFunction(*II);
3926  }
3927  // assume(!(a || b)) -> assume(!a); assume(!b);
3928  if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
3929  Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
3930  Builder.CreateNot(A), II->getName());
3931  Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
3932  Builder.CreateNot(B), II->getName());
3933  return eraseInstFromFunction(*II);
3934  }
3935 
3936  // assume( (load addr) != null ) -> add 'nonnull' metadata to load
3937  // (if assume is valid at the load)
3938  CmpInst::Predicate Pred;
3939  Instruction *LHS;
3940  if (match(IIOperand, m_ICmp(Pred, m_Instruction(LHS), m_Zero())) &&
3941  Pred == ICmpInst::ICMP_NE && LHS->getOpcode() == Instruction::Load &&
3942  LHS->getType()->isPointerTy() &&
3943  isValidAssumeForContext(II, LHS, &DT)) {
3944  MDNode *MD = MDNode::get(II->getContext(), None);
3945  LHS->setMetadata(LLVMContext::MD_nonnull, MD);
3946  return eraseInstFromFunction(*II);
3947 
3948  // TODO: apply nonnull return attributes to calls and invokes
3949  // TODO: apply range metadata for range check patterns?
3950  }
3951 
3952  // If there is a dominating assume with the same condition as this one,
3953  // then this one is redundant, and should be removed.
3954  KnownBits Known(1);
3955  computeKnownBits(IIOperand, Known, 0, II);
3956  if (Known.isAllOnes())
3957  return eraseInstFromFunction(*II);
3958 
3959  // Update the cache of affected values for this assumption (we might be
3960  // here because we just simplified the condition).
3961  AC.updateAffectedValues(II);
3962  break;
3963  }
3964  case Intrinsic::experimental_gc_relocate: {
3965  auto &GCR = *cast<GCRelocateInst>(II);
3966 
3967  // If we have two copies of the same pointer in the statepoint argument
3968  // list, canonicalize to one. This may let us common gc.relocates.
3969  if (GCR.getBasePtr() == GCR.getDerivedPtr() &&
3970  GCR.getBasePtrIndex() != GCR.getDerivedPtrIndex()) {
3971  auto *OpIntTy = GCR.getOperand(2)->getType();
3972  II->setOperand(2, ConstantInt::get(OpIntTy, GCR.getBasePtrIndex()));
3973  return II;
3974  }
3975 
3976  // Translate facts known about a pointer before relocating into
3977  // facts about the relocate value, while being careful to
3978  // preserve relocation semantics.
3979  Value *DerivedPtr = GCR.getDerivedPtr();
3980 
3981  // Remove the relocation if unused, note that this check is required
3982  // to prevent the cases below from looping forever.
3983  if (II->use_empty())
3984  return eraseInstFromFunction(*II);
3985 
3986  // Undef is undef, even after relocation.
3987  // TODO: provide a hook for this in GCStrategy. This is clearly legal for
3988  // most practical collectors, but there was discussion in the review thread
3989  // about whether it was legal for all possible collectors.
3990  if (isa<UndefValue>(DerivedPtr))
3991  // Use undef of gc_relocate's type to replace it.
3992  return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3993 
3994  if (auto *PT = dyn_cast<PointerType>(II->getType())) {
3995  // The relocation of null will be null for most any collector.
3996  // TODO: provide a hook for this in GCStrategy. There might be some
3997  // weird collector this property does not hold for.
3998  if (isa<ConstantPointerNull>(DerivedPtr))
3999  // Use null-pointer of gc_relocate's type to replace it.
4000  return replaceInstUsesWith(*II, ConstantPointerNull::get(PT));
4001 
4002  // isKnownNonNull -> nonnull attribute
4003  if (!II->hasRetAttr(Attribute::NonNull) &&
4004  isKnownNonZero(DerivedPtr, DL, 0, &AC, II, &DT)) {
4005  II->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
4006  return II;
4007  }
4008  }
4009 
4010  // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
4011  // Canonicalize on the type from the uses to the defs
4012 
4013  // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
4014  break;
4015  }
4016 
4017  case Intrinsic::experimental_guard: {
4018  // Is this guard followed by another guard? We scan forward over a small
4019  // fixed window of instructions to handle common cases with conditions
4020  // computed between guards.
4021  Instruction *NextInst = II->getNextNode();
4022  for (unsigned i = 0; i < GuardWideningWindow; i++) {
4023  // Note: Using context-free form to avoid compile time blow up
4024  if (!isSafeToSpeculativelyExecute(NextInst))
4025  break;
4026  NextInst = NextInst->getNextNode();
4027  }
4028  Value *NextCond = nullptr;
4029  if (match(NextInst,
4030  m_Intrinsic<Intrinsic::experimental_guard>(m_Value(NextCond)))) {
4031  Value *CurrCond = II->getArgOperand(0);
4032 
4033  // Remove a guard that it is immediately preceded by an identical guard.
4034  if (CurrCond == NextCond)
4035  return eraseInstFromFunction(*NextInst);
4036 
4037  // Otherwise canonicalize guard(a); guard(b) -> guard(a & b).
4038  Instruction* MoveI = II->getNextNode();
4039  while (MoveI != NextInst) {
4040  auto *Temp = MoveI;
4041  MoveI = MoveI->getNextNode();
4042  Temp->moveBefore(II);
4043  }
4044  II->setArgOperand(0, Builder.CreateAnd(CurrCond, NextCond));
4045  return eraseInstFromFunction(*NextInst);
4046  }
4047  break;
4048  }
4049  }
4050  return visitCallBase(*II);
4051 }
4052 
4053 // Fence instruction simplification
4055  // Remove identical consecutive fences.
4057  if (auto *NFI = dyn_cast<FenceInst>(Next))
4058  if (FI.isIdenticalTo(NFI))
4059  return eraseInstFromFunction(FI);
4060  return nullptr;
4061 }
4062 
4063 // InvokeInst simplification
4065  return visitCallBase(II);
4066 }
4067 
4068 // CallBrInst simplification
4070  return visitCallBase(CBI);
4071 }
4072 
4073 /// If this cast does not affect the value passed through the varargs area, we
4074 /// can eliminate the use of the cast.
4075 static bool isSafeToEliminateVarargsCast(const CallBase &Call,
4076  const DataLayout &DL,
4077  const CastInst *const CI,
4078  const int ix) {
4079  if (!CI->isLosslessCast())
4080  return false;
4081 
4082  // If this is a GC intrinsic, avoid munging types. We need types for
4083  // statepoint reconstruction in SelectionDAG.
4084  // TODO: This is probably something which should be expanded to all
4085  // intrinsics since the entire point of intrinsics is that
4086  // they are understandable by the optimizer.
4087  if (isStatepoint(&Call) || isGCRelocate(&Call) || isGCResult(&Call))
4088  return false;
4089 
4090  // The size of ByVal or InAlloca arguments is derived from the type, so we
4091  // can't change to a type with a different size. If the size were
4092  // passed explicitly we could avoid this check.
4093  if (!Call.isByValOrInAllocaArgument(ix))
4094  return true;
4095 
4096  Type* SrcTy =
4097  cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
4098  Type *DstTy = Call.isByValArgument(ix)
4099  ? Call.getParamByValType(ix)
4100  : cast<PointerType>(CI->getType())->getElementType();
4101  if (!SrcTy->isSized() || !DstTy->isSized())
4102  return false;
4103  if (DL.getTypeAllocSize(SrcTy) != DL.getTypeAllocSize(DstTy))
4104  return false;
4105  return true;
4106 }
4107 
4108 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI) {
4109  if (!CI->getCalledFunction()) return nullptr;
4110 
4111  auto InstCombineRAUW = [this](Instruction *From, Value *With) {
4112  replaceInstUsesWith(*From, With);
4113  };
4114  auto InstCombineErase = [this](Instruction *I) {
4115  eraseInstFromFunction(*I);
4116  };
4117  LibCallSimplifier Simplifier(DL, &TLI, ORE, BFI, PSI, InstCombineRAUW,
4118  InstCombineErase);
4119  if (Value *With = Simplifier.optimizeCall(CI)) {
4120  ++NumSimplified;
4121  return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With);
4122  }
4123 
4124  return nullptr;
4125 }
4126 
4128  // Strip off at most one level of pointer casts, looking for an alloca. This
4129  // is good enough in practice and simpler than handling any number of casts.
4130  Value *Underlying = TrampMem->stripPointerCasts();
4131  if (Underlying != TrampMem &&
4132  (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
4133  return nullptr;
4134  if (!isa<AllocaInst>(Underlying))
4135  return nullptr;
4136 
4137  IntrinsicInst *InitTrampoline = nullptr;
4138  for (User *U : TrampMem->users()) {
4140  if (!II)
4141  return nullptr;
4142  if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
4143  if (InitTrampoline)
4144  // More than one init_trampoline writes to this value. Give up.
4145  return nullptr;
4146  InitTrampoline = II;
4147  continue;
4148  }
4149  if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
4150  // Allow any number of calls to adjust.trampoline.
4151  continue;
4152  return nullptr;
4153  }
4154 
4155  // No call to init.trampoline found.
4156  if (!InitTrampoline)
4157  return nullptr;
4158 
4159  // Check that the alloca is being used in the expected way.
4160  if (InitTrampoline->getOperand(0) != TrampMem)
4161  return nullptr;
4162 
4163  return InitTrampoline;
4164 }
4165 
4167  Value *TrampMem) {
4168  // Visit all the previous instructions in the basic block, and try to find a
4169  // init.trampoline which has a direct path to the adjust.trampoline.
4170  for (BasicBlock::iterator I = AdjustTramp->getIterator(),
4171  E = AdjustTramp->getParent()->begin();
4172  I != E;) {
4173  Instruction *Inst = &*--I;
4174  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
4175  if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
4176  II->getOperand(0) == TrampMem)
4177  return II;
4178  if (Inst->mayWriteToMemory())
4179  return nullptr;
4180  }
4181  return nullptr;
4182 }
4183 
4184 // Given a call to llvm.adjust.trampoline, find and return the corresponding
4185 // call to llvm.init.trampoline if the call to the trampoline can be optimized
4186 // to a direct call to a function. Otherwise return NULL.
4188  Callee = Callee->stripPointerCasts();
4189  IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
4190  if (!AdjustTramp ||
4191  AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
4192  return nullptr;
4193 
4194  Value *TrampMem = AdjustTramp->getOperand(0);
4195 
4197  return IT;
4198  if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem))
4199  return IT;
4200  return nullptr;
4201 }
4202 
4203 static void annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI) {
4204  unsigned NumArgs = Call.getNumArgOperands();
4205  ConstantInt *Op0C = dyn_cast<ConstantInt>(Call.getOperand(0));
4206  ConstantInt *Op1C =
4207  (NumArgs == 1) ? nullptr : dyn_cast<ConstantInt>(Call.getOperand(1));
4208  // Bail out if the allocation size is zero.
4209  if ((Op0C && Op0C->isNullValue()) || (Op1C && Op1C->isNullValue()))
4210  return;
4211 
4212  if (isMallocLikeFn(&Call, TLI) && Op0C) {
4213  if (isOpNewLikeFn(&Call, TLI))
4216  Call.getContext(), Op0C->getZExtValue()));
4217  else
4220  Call.getContext(), Op0C->getZExtValue()));
4221  } else if (isReallocLikeFn(&Call, TLI) && Op1C) {
4224  Call.getContext(), Op1C->getZExtValue()));
4225  } else if (isCallocLikeFn(&Call, TLI) && Op0C && Op1C) {
4226  bool Overflow;
4227  const APInt &N = Op0C->getValue();
4228  APInt Size = N.umul_ov(Op1C->getValue(), Overflow);
4229  if (!Overflow)
4232  Call.getContext(), Size.getZExtValue()));
4233  } else if (isStrdupLikeFn(&Call, TLI)) {
4234  uint64_t Len = GetStringLength(Call.getOperand(0));
4235  if (Len) {
4236  // strdup
4237  if (NumArgs == 1)
4240  Call.getContext(), Len));
4241  // strndup
4242  else if (NumArgs == 2 && Op1C)
4243  Call.addAttribute(
4246  Call.getContext(), std::min(Len, Op1C->getZExtValue() + 1)));
4247  }
4248  }
4249 }
4250 
4251 /// Improvements for call, callbr and invoke instructions.
4252 Instruction *InstCombiner::visitCallBase(CallBase &Call) {
4253  if (isAllocationFn(&Call, &TLI))
4254  annotateAnyAllocSite(Call, &TLI);
4255 
4256  bool Changed = false;
4257 
4258  // Mark any parameters that are known to be non-null with the nonnull
4259  // attribute. This is helpful for inlining calls to functions with null
4260  // checks on their arguments.
4261  SmallVector<unsigned, 4> ArgNos;
4262  unsigned ArgNo = 0;
4263 
4264  for (Value *V : Call.args()) {
4265  if (V->getType()->isPointerTy() &&
4266  !Call.paramHasAttr(ArgNo, Attribute::NonNull) &&
4267  isKnownNonZero(V, DL, 0, &AC, &Call, &DT))
4268  ArgNos.push_back(ArgNo);
4269  ArgNo++;
4270  }
4271 
4272  assert(ArgNo == Call.arg_size() && "sanity check");
4273 
4274  if (!ArgNos.empty()) {
4275  AttributeList AS = Call.getAttributes();
4276  LLVMContext &Ctx = Call.getContext();
4277  AS = AS.addParamAttribute(Ctx, ArgNos,
4278  Attribute::get(Ctx, Attribute::NonNull));
4279  Call.setAttributes(AS);
4280  Changed = true;
4281  }
4282 
4283  // If the callee is a pointer to a function, attempt to move any casts to the
4284  // arguments of the call/callbr/invoke.
4285  Value *Callee = Call.getCalledValue();
4286  if (!isa<Function>(Callee) && transformConstExprCastCall(Call))
4287  return nullptr;
4288 
4289  if (Function *CalleeF = dyn_cast<Function>(Callee)) {
4290  // Remove the convergent attr on calls when the callee is not convergent.
4291  if (Call.isConvergent() && !CalleeF->isConvergent() &&
4292  !CalleeF->isIntrinsic()) {
4293  LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " << Call
4294  << "\n");
4295  Call.setNotConvergent();
4296  return &Call;
4297  }
4298 
4299  // If the call and callee calling conventions don't match, this call must
4300  // be unreachable, as the call is undefined.
4301  if (CalleeF->getCallingConv() != Call.getCallingConv() &&
4302  // Only do this for calls to a function with a body. A prototype may
4303  // not actually end up matching the implementation's calling conv for a
4304  // variety of reasons (e.g. it may be written in assembly).
4305  !CalleeF->isDeclaration()) {
4306  Instruction *OldCall = &Call;
4307  CreateNonTerminatorUnreachable(OldCall);
4308  // If OldCall does not return void then replaceAllUsesWith undef.
4309  // This allows ValueHandlers and custom metadata to adjust itself.
4310  if (!OldCall->getType()->isVoidTy())
4311  replaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
4312  if (isa<CallInst>(OldCall))
4313  return eraseInstFromFunction(*OldCall);
4314 
4315  // We cannot remove an invoke or a callbr, because it would change thexi
4316  // CFG, just change the callee to a null pointer.
4317  cast<CallBase>(OldCall)->setCalledFunction(
4318  CalleeF->getFunctionType(),
4319  Constant::getNullValue(CalleeF->getType()));
4320  return nullptr;
4321  }
4322  }
4323 
4324  if ((isa<ConstantPointerNull>(Callee) &&
4325  !NullPointerIsDefined(Call.getFunction())) ||
4326  isa<UndefValue>(Callee)) {
4327  // If Call does not return void then replaceAllUsesWith undef.
4328  // This allows ValueHandlers and custom metadata to adjust itself.
4329  if (!Call.getType()->isVoidTy())
4330  replaceInstUsesWith(Call, UndefValue::get(Call.getType()));
4331 
4332  if (Call.isTerminator()) {
4333  // Can't remove an invoke or callbr because we cannot change the CFG.
4334  return nullptr;
4335  }
4336 
4337  // This instruction is not reachable, just remove it.
4338  CreateNonTerminatorUnreachable(&Call);
4339  return eraseInstFromFunction(Call);
4340  }
4341 
4342  if (IntrinsicInst *II = findInitTrampoline(Callee))
4343  return transformCallThroughTrampoline(Call, *II);
4344 
4345  PointerType *PTy = cast<PointerType>(Callee->getType());
4346  FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
4347  if (FTy->isVarArg()) {
4348  int ix = FTy->getNumParams();
4349  // See if we can optimize any arguments passed through the varargs area of
4350  // the call.
4351  for (auto I = Call.arg_begin() + FTy->getNumParams(), E = Call.arg_end();
4352  I != E; ++I, ++ix) {
4353  CastInst *CI = dyn_cast<CastInst>(*I);
4354  if (CI && isSafeToEliminateVarargsCast(Call, DL, CI, ix)) {
4355  *I = CI->getOperand(0);
4356 
4357  // Update the byval type to match the argument type.
4358  if (Call.isByValArgument(ix)) {
4359  Call.removeParamAttr(ix, Attribute::ByVal);
4360  Call.addParamAttr(
4362  Call.getContext(),
4363  CI->getOperand(0)->getType()->getPointerElementType()));
4364  }
4365  Changed = true;
4366  }
4367  }
4368  }
4369 
4370  if (isa<InlineAsm>(Callee) && !Call.doesNotThrow()) {
4371  // Inline asm calls cannot throw - mark them 'nounwind'.
4372  Call.setDoesNotThrow();
4373  Changed = true;
4374  }
4375 
4376  // Try to optimize the call if possible, we require DataLayout for most of
4377  // this. None of these calls are seen as possibly dead so go ahead and
4378  // delete the instruction now.
4379  if (CallInst *CI = dyn_cast<CallInst>(&Call)) {
4380  Instruction *I = tryOptimizeCall(CI);
4381  // If we changed something return the result, etc. Otherwise let
4382  // the fallthrough check.
4383  if (I) return eraseInstFromFunction(*I);
4384  }
4385 
4386  if (isAllocLikeFn(&Call, &TLI))
4387  return visitAllocSite(Call);
4388 
4389  return Changed ? &Call : nullptr;
4390 }
4391 
4392 /// If the callee is a constexpr cast of a function, attempt to move the cast to
4393 /// the arguments of the call/callbr/invoke.
4394 bool InstCombiner::transformConstExprCastCall(CallBase &Call) {
4396  if (!Callee)
4397  return false;
4398 
4399  // If this is a call to a thunk function, don't remove the cast. Thunks are
4400  // used to transparently forward all incoming parameters and outgoing return
4401  // values, so it's important to leave the cast in place.
4402  if (Callee->hasFnAttribute("thunk"))
4403  return false;
4404 
4405  // If this is a musttail call, the callee's prototype must match the caller's
4406  // prototype with the exception of pointee types. The code below doesn't
4407  // implement that, so we can't do this transform.
4408  // TODO: Do the transform if it only requires adding pointer casts.
4409  if (Call.isMustTailCall())
4410  return false;
4411 
4412  Instruction *Caller = &Call;
4413  const AttributeList &CallerPAL = Call.getAttributes();
4414 
4415  // Okay, this is a cast from a function to a different type. Unless doing so
4416  // would cause a type conversion of one of our arguments, change this call to
4417  // be a direct call with arguments casted to the appropriate types.
4419  Type *OldRetTy = Caller->getType();
4420  Type *NewRetTy = FT->getReturnType();
4421 
4422  // Check to see if we are changing the return type...
4423  if (OldRetTy != NewRetTy) {
4424 
4425  if (NewRetTy->isStructTy())
4426  return false; // TODO: Handle multiple return values.
4427 
4428  if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
4429  if (Callee->isDeclaration())
4430  return false; // Cannot transform this return value.
4431 
4432  if (!Caller->use_empty() &&
4433  // void -> non-void is handled specially
4434  !NewRetTy->isVoidTy())
4435  return false; // Cannot transform this return value.
4436  }
4437 
4438  if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
4439  AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
4440  if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy)))
4441  return false; // Attribute not compatible with transformed value.
4442  }
4443 
4444  // If the callbase is an invoke/callbr instruction, and the return value is
4445  // used by a PHI node in a successor, we cannot change the return type of
4446  // the call because there is no place to put the cast instruction (without
4447  // breaking the critical edge). Bail out in this case.
4448  if (!Caller->use_empty()) {
4449  if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
4450  for (User *U : II->users())
4451  if (PHINode *PN = dyn_cast<PHINode>(U))
4452  if (PN->getParent() == II->getNormalDest() ||
4453  PN->getParent() == II->getUnwindDest())
4454  return false;
4455  // FIXME: Be conservative for callbr to avoid a quadratic search.
4456  if (isa<CallBrInst>(Caller))
4457  return false;
4458  }
4459  }
4460 
4461  unsigned NumActualArgs = Call.arg_size();
4462  unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
4463 
4464  // Prevent us turning:
4465  // declare void @takes_i32_inalloca(i32* inalloca)
4466  // call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
4467  //
4468  // into:
4469  // call void @takes_i32_inalloca(i32* null)
4470  //
4471  // Similarly, avoid folding away bitcasts of byval calls.
4472  if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
4473  Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal))
4474  return false;
4475 
4476  auto AI = Call.arg_begin();
4477  for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
4478  Type *ParamTy = FT->getParamType(i);
4479  Type *ActTy = (*AI)->getType();
4480 
4481  if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
4482  return false; // Cannot transform this parameter value.
4483 
4484  if (AttrBuilder(CallerPAL.getParamAttributes(i))
4485  .overlaps(AttributeFuncs::typeIncompatible(ParamTy)))
4486  return false; // Attribute not compatible with transformed value.
4487 
4488  if (Call.isInAllocaArgument(i))
4489  return false; // Cannot transform to and from inalloca.
4490 
4491  // If the parameter is passed as a byval argument, then we have to have a
4492  // sized type and the sized type has to have the same size as the old type.
4493  if (ParamTy != ActTy && CallerPAL.hasParamAttribute(i, Attribute::ByVal)) {
4494  PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
4495  if (!ParamPTy || !ParamPTy->getElementType()->isSized())
4496  return false;
4497 
4498  Type *CurElTy = Call.getParamByValType(i);
4499  if (DL.getTypeAllocSize(CurElTy) !=
4500  DL.getTypeAllocSize(ParamPTy->getElementType()))
4501  return false;
4502  }
4503  }
4504 
4505  if (Callee->isDeclaration()) {
4506  // Do not delete arguments unless we have a function body.
4507  if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
4508  return false;
4509 
4510  // If the callee is just a declaration, don't change the varargsness of the
4511  // call. We don't want to introduce a varargs call where one doesn't
4512  // already exist.
4513  PointerType *APTy = cast<PointerType>(Call.getCalledValue()->getType());
4514  if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
4515  return false;
4516 
4517  // If both the callee and the cast type are varargs, we still have to make
4518  // sure the number of fixed parameters are the same or we have the same
4519  // ABI issues as if we introduce a varargs call.
4520  if (FT->isVarArg() &&
4521  cast<FunctionType>(APTy->getElementType())->isVarArg() &&
4522  FT->getNumParams() !=
4523  cast<FunctionType>(APTy->getElementType())->getNumParams())
4524  return false;
4525  }
4526 
4527  if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
4528  !CallerPAL.isEmpty()) {
4529  // In this case we have more arguments than the new function type, but we
4530  // won't be dropping them. Check that these extra arguments have attributes
4531  // that are compatible with being a vararg call argument.
4532  unsigned SRetIdx;
4533  if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) &&
4534  SRetIdx > FT->getNumParams())
4535  return false;
4536  }
4537 
4538  // Okay, we decided that this is a safe thing to do: go ahead and start
4539  // inserting cast instructions as necessary.
4542  Args.reserve(NumActualArgs);
4543  ArgAttrs.reserve(NumActualArgs);
4544 
4545  // Get any return attributes.
4546  AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
4547 
4548  // If the return value is not being used, the type may not be compatible
4549  // with the existing attributes. Wipe out any problematic attributes.
4550  RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy));
4551 
4552  LLVMContext &Ctx = Call.getContext();
4553  AI = Call.arg_begin();
4554  for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
4555  Type *ParamTy = FT->getParamType(i);
4556 
4557  Value *NewArg = *AI;
4558  if ((*AI)->getType() != ParamTy)
4559  NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy);
4560  Args.push_back(NewArg);
4561 
4562  // Add any parameter attributes.
4563  if (CallerPAL.hasParamAttribute(i, Attribute::ByVal)) {
4564  AttrBuilder AB(CallerPAL.getParamAttributes(i));
4565  AB.addByValAttr(NewArg->getType()->getPointerElementType());
4566  ArgAttrs.push_back(AttributeSet::get(Ctx, AB));
4567  } else
4568  ArgAttrs.push_back(CallerPAL.getParamAttributes(i));
4569  }
4570 
4571  // If the function takes more arguments than the call was taking, add them
4572  // now.
4573  for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
4575  ArgAttrs.push_back(AttributeSet());
4576  }
4577 
4578  // If we are removing arguments to the function, emit an obnoxious warning.
4579  if (FT->getNumParams() < NumActualArgs) {
4580  // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
4581  if (FT->isVarArg()) {
4582  // Add all of the arguments in their promoted form to the arg list.
4583  for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
4584  Type *PTy = getPromotedType((*AI)->getType());
4585  Value *NewArg = *AI;
4586  if (PTy != (*AI)->getType()) {
4587  // Must promote to pass through va_arg area!
4588  Instruction::CastOps opcode =
4589  CastInst::getCastOpcode(*AI, false, PTy, false);
4590  NewArg = Builder.CreateCast(opcode, *AI, PTy);
4591  }
4592  Args.push_back(NewArg);
4593 
4594  // Add any parameter attributes.
4595  ArgAttrs.push_back(CallerPAL.getParamAttributes(i));
4596  }
4597  }
4598  }
4599 
4600  AttributeSet FnAttrs = CallerPAL.getFnAttributes();
4601 
4602  if (NewRetTy->isVoidTy())
4603  Caller->setName(""); // Void type should not have a name.
4604 
4605  assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) &&
4606  "missing argument attributes");
4607  AttributeList NewCallerPAL = AttributeList::get(
4608  Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs);
4609 
4611  Call.getOperandBundlesAsDefs(OpBundles);
4612 
4613  CallBase *NewCall;
4614  if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
4615  NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(),
4616  II->getUnwindDest(), Args, OpBundles);
4617  } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) {
4618  NewCall = Builder.CreateCallBr(Callee, CBI->getDefaultDest(),
4619  CBI->getIndirectDests(), Args, OpBundles);
4620  } else {
4621  NewCall = Builder.CreateCall(Callee, Args, OpBundles);
4622  cast<CallInst>(NewCall)->setTailCallKind(
4623  cast<CallInst>(Caller)->getTailCallKind());
4624  }
4625  NewCall->takeName(Caller);
4626  NewCall->setCallingConv(Call.getCallingConv());
4627  NewCall->setAttributes(NewCallerPAL);
4628 
4629  // Preserve the weight metadata for the new call instruction. The metadata
4630  // is used by SamplePGO to check callsite's hotness.
4631  uint64_t W;
4632  if (Caller->extractProfTotalWeight(W))
4633  NewCall->setProfWeight(W);
4634 
4635  // Insert a cast of the return type as necessary.
4636  Instruction *NC = NewCall;
4637  Value *NV = NC;
4638  if (OldRetTy != NV->getType() && !Caller->use_empty()) {
4639  if (!NV->getType()->isVoidTy()) {
4640  NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
4641  NC->setDebugLoc(Caller->getDebugLoc());
4642 
4643  // If this is an invoke/callbr instruction, we should insert it after the
4644  // first non-phi instruction in the normal successor block.
4645  if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
4646  BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
4647  InsertNewInstBefore(NC, *I);
4648  } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) {
4649  BasicBlock::iterator I = CBI->getDefaultDest()->getFirstInsertionPt();
4650  InsertNewInstBefore(NC, *I);
4651  } else {
4652  // Otherwise, it's a call, just insert cast right after the call.
4653  InsertNewInstBefore(NC, *Caller);
4654  }
4655  Worklist.AddUsersToWorkList(*Caller);
4656  } else {
4657  NV = UndefValue::get(Caller->getType());
4658  }
4659  }
4660 
4661  if (!Caller->use_empty())
4662  replaceInstUsesWith(*Caller, NV);
4663  else if (Caller->hasValueHandle()) {
4664  if (OldRetTy == NV->getType())
4665  ValueHandleBase::ValueIsRAUWd(Caller, NV);
4666  else
4667  // We cannot call ValueIsRAUWd with a different type, and the
4668  // actual tracked value will disappear.
4670  }
4671 
4672  eraseInstFromFunction(*Caller);
4673  return true;
4674 }
4675 
4676 /// Turn a call to a function created by init_trampoline / adjust_trampoline
4677 /// intrinsic pair into a direct call to the underlying function.
4678 Instruction *
4679 InstCombiner::transformCallThroughTrampoline(CallBase &Call,
4680  IntrinsicInst &Tramp) {
4681  Value *Callee = Call.getCalledValue();
4682  Type *CalleeTy = Callee->getType();
4683  FunctionType *FTy = Call.getFunctionType();
4685 
4686  // If the call already has the 'nest' attribute somewhere then give up -
4687  // otherwise 'nest' would occur twice after splicing in the chain.
4688  if (Attrs.hasAttrSomewhere(Attribute::Nest))
4689  return nullptr;
4690 
4691  Function *NestF = cast<Function>(Tramp.getArgOperand(1)->stripPointerCasts());
4692  FunctionType *NestFTy = NestF->getFunctionType();
4693 
4694  AttributeList NestAttrs = NestF->getAttributes();
4695  if (!NestAttrs.isEmpty()) {
4696  unsigned NestArgNo = 0;
4697  Type *NestTy = nullptr;
4698  AttributeSet NestAttr;
4699 
4700  // Look for a parameter marked with the 'nest' attribute.
4701  for (FunctionType::param_iterator I = NestFTy->param_begin(),
4702  E = NestFTy->param_end();
4703  I != E; ++NestArgNo, ++I) {
4704  AttributeSet AS = NestAttrs.getParamAttributes(NestArgNo);
4705  if (AS.hasAttribute(Attribute::Nest)) {
4706  // Record the parameter type and any other attributes.
4707  NestTy = *I;
4708  NestAttr = AS;
4709  break;
4710  }
4711  }
4712 
4713  if (NestTy) {
4714  std::vector<Value*> NewArgs;
4715  std::vector<AttributeSet> NewArgAttrs;
4716  NewArgs.reserve(Call.arg_size() + 1);
4717  NewArgAttrs.reserve(Call.arg_size());
4718 
4719  // Insert the nest argument into the call argument list, which may
4720  // mean appending it. Likewise for attributes.
4721 
4722  {
4723  unsigned ArgNo = 0;
4724  auto I = Call.arg_begin(), E = Call.arg_end();
4725  do {
4726  if (ArgNo == NestArgNo) {
4727  // Add the chain argument and attributes.
4728  Value *NestVal = Tramp.getArgOperand(2);
4729  if (NestVal->getType() != NestTy)
4730  NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest");
4731  NewArgs.push_back(NestVal);
4732  NewArgAttrs.push_back(NestAttr);
4733  }
4734 
4735  if (I == E)
4736  break;
4737 
4738  // Add the original argument and attributes.
4739  NewArgs.push_back(*I);
4740  NewArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
4741 
4742  ++ArgNo;
4743  ++I;
4744  } while (true);
4745  }
4746 
4747  // The trampoline may have been bitcast to a bogus type (FTy).
4748  // Handle this by synthesizing a new function type, equal to FTy
4749  // with the chain parameter inserted.
4750 
4751  std::vector<Type*> NewTypes;
4752  NewTypes.reserve(FTy->getNumParams()+1);
4753 
4754  // Insert the chain's type into the list of parameter types, which may
4755  // mean appending it.
4756  {
4757  unsigned ArgNo = 0;
4759  E = FTy->param_end();
4760 
4761  do {
4762  if (ArgNo == NestArgNo)
4763  // Add the chain's type.
4764  NewTypes.push_back(NestTy);
4765 
4766  if (I == E)
4767  break;
4768 
4769  // Add the original type.
4770  NewTypes.push_back(*I);
4771 
4772  ++ArgNo;
4773  ++I;
4774  } while (true);
4775  }
4776 
4777  // Replace the trampoline call with a direct call. Let the generic
4778  // code sort out any function type mismatches.
4779  FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
4780  FTy->isVarArg());
4781  Constant *NewCallee =
4782  NestF->getType() == PointerType::getUnqual(NewFTy) ?
4783  NestF : ConstantExpr::getBitCast(NestF,
4784  PointerType::getUnqual(NewFTy));
4785  AttributeList NewPAL =
4787  Attrs.getRetAttributes(), NewArgAttrs);
4788 
4790  Call.getOperandBundlesAsDefs(OpBundles);
4791 
4792  Instruction *NewCaller;
4793  if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
4794  NewCaller = InvokeInst::Create(NewFTy, NewCallee,
4795  II->getNormalDest(), II->getUnwindDest(),
4796  NewArgs, OpBundles);
4797  cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
4798  cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
4799  } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) {
4800  NewCaller =
4801  CallBrInst::Create(NewFTy, NewCallee, CBI->getDefaultDest(),
4802  CBI->getIndirectDests(), NewArgs, OpBundles);
4803  cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv());
4804  cast<CallBrInst>(NewCaller)->setAttributes(NewPAL);
4805  } else {
4806  NewCaller = CallInst::Create(NewFTy, NewCallee, NewArgs, OpBundles);
4807  cast<CallInst>(NewCaller)->setTailCallKind(
4808  cast<CallInst>(Call).getTailCallKind());
4809  cast<CallInst>(NewCaller)->setCallingConv(
4810  cast<CallInst>(Call).getCallingConv());
4811  cast<CallInst>(NewCaller)->setAttributes(NewPAL);
4812  }
4813  NewCaller->setDebugLoc(Call.getDebugLoc());
4814 
4815  return NewCaller;
4816  }
4817  }
4818 
4819  // Replace the trampoline call with a direct call. Since there is no 'nest'
4820  // parameter, there is no need to adjust the argument list. Let the generic
4821  // code sort out any function type mismatches.
4822  Constant *NewCallee = ConstantExpr::getBitCast(NestF, CalleeTy);
4823  Call.setCalledFunction(FTy, NewCallee);
4824  return &Call;
4825 }
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
bool isFPPredicate() const
Definition: InstrTypes.h:824
const NoneType None
Definition: None.h:23
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double, and whose elements are just simple data values (i.e.
Definition: Constants.h:761
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
Definition: PatternMatch.h:874
uint64_t CallInst * C
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LibCallSimplifier - This class implements a collection of optimizations that replace well formed call...
IntegerType * getType() const
getType - Specialize the getType() method to always return an IntegerType, which reduces the amount o...
Definition: Constants.h:171
bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates or reallocates memory (eith...
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:588
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:112
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
void copyFastMathFlags(FastMathFlags FMF)
Convenience function for transferring all fast-math flag values to this instruction, which must be an operator which supports these flags.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:70
static void ValueIsDeleted(Value *V)
Definition: Value.cpp:870
class_match< UndefValue > m_Undef()
Match an arbitrary undef constant.
Definition: PatternMatch.h:86
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
bool isZero() const
Definition: APFloat.h:1158
static IntegerType * getInt1Ty(LLVMContext &C)
Definition: Type.cpp:177
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
Definition: PatternMatch.h:78
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1571
unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition: Local.cpp:1181
APInt sext(unsigned width) const
Sign extend to a new width.
Definition: APInt.cpp:888
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
static Value * simplifyX86immShift(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
Definition: APInt.h:561
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:403
DiagnosticInfoOptimizationBase::Argument NV
Atomic ordering constants.
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1969
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
Definition: IRBuilder.h:1888
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:288
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:975
This class represents lattice values for constants.
Definition: AllocatorList.h:23
Type * getParamType(unsigned i) const
Parameter type accessors.
Definition: DerivedTypes.h:140
Constant * getElementAsConstant(unsigned i) const
Return a Constant for a specified index&#39;s element.
Definition: Constants.cpp:2773
unsigned countMinPopulation() const
Returns the number of bits known to be one.
Definition: KnownBits.h:196
static Value * simplifyX86pack(IntrinsicInst &II, InstCombiner::BuilderTy &Builder, bool IsSigned)
Represents an op.with.overflow intrinsic.
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:65
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
Instruction * visitCallInst(CallInst &CI)
CallInst simplification.
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:265
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align, const char *Name)
Provided to resolve &#39;CreateAlignedLoad(Ptr, Align, "...")&#39; correctly, instead of converting the strin...
Definition: IRBuilder.h:1612
bool isConvergent() const
Determine if the invoke is convergent.
Definition: InstrTypes.h:1695
static Attribute getWithDereferenceableBytes(LLVMContext &Context, uint64_t Bytes)
Definition: Attributes.cpp:158
An instruction for ordering other memory operations.
Definition: Instructions.h:462
static MDString * get(LLVMContext &Context, StringRef Str)
Definition: Metadata.cpp:453
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Instruction * visitVACopyInst(VACopyInst &I)
static Instruction * simplifyInvariantGroupIntrinsic(IntrinsicInst &II, InstCombiner &IC)
This function transforms launder.invariant.group and strip.invariant.group like: launder(launder(x)) ...
static ConstantAggregateZero * get(Type *Ty)
Definition: Constants.cpp:1363
void setProfWeight(uint64_t W)
Sets the branch_weights metadata to W for CallInst.
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2134
APInt uadd_sat(const APInt &RHS) const
Definition: APInt.cpp:2023
static bool simplifyX86MaskedStore(IntrinsicInst &II, InstCombiner &IC)
This class represents a function call, abstracting a target machine&#39;s calling convention.
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
This file contains the declarations for metadata subclasses.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
Definition: APInt.h:647
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this load instruction.
Definition: Instructions.h:258
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
Definition: PatternMatch.h:89
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space...
Definition: Type.cpp:637
static uint64_t round(uint64_t Acc, uint64_t Input)
Definition: xxhash.cpp:57
Optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:953
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
bool hasValueHandle() const
Return true if there is a value handle associated with this value.
Definition: Value.h:505
bool mayWriteToMemory() const
Return true if this instruction may modify memory.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition: APInt.h:1328
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
static Instruction * foldCttzCtlz(IntrinsicInst &II, InstCombiner &IC)
Always overflows in the direction of signed/unsigned min value.
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:743
bool isTerminator() const
Definition: Instruction.h:128
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1165
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr)
Return true if it is valid to use the assumptions provided by an assume intrinsic, I, at the point in the control-flow identified by the context instruction, CxtI.
STATISTIC(NumFunctions, "Total number of functions")
void setArgOperand(unsigned i, Value *v)
Definition: InstrTypes.h:1246
Metadata node.
Definition: Metadata.h:863
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1100
F(f)
Type * getStructElementType(unsigned N) const
Definition: DerivedTypes.h:370
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1212
const fltSemantics & getSemantics() const
Definition: APFloat.h:1170
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:621
param_iterator param_end() const
Definition: DerivedTypes.h:134
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
Definition: PatternMatch.h:777
An instruction for reading from memory.
Definition: Instructions.h:169
bool isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that reallocates memory (e...
static IntegerType * getInt64Ty(LLVMContext &C)
Definition: Type.cpp:181
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:930
static Constant * getCompare(unsigned short pred, Constant *C1, Constant *C2, bool OnlyIfReduced=false)
Return an ICmp or FCmp comparison operator constant expression.
Definition: Constants.cpp:1968
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2261
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:144
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
Definition: KnownBits.h:176
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:230
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 maximum semantics.
Definition: APFloat.h:1277
void reserve(size_type N)
Definition: SmallVector.h:369
void addAttribute(unsigned i, Attribute::AttrKind Kind)
adds the attribute to the list of attributes.
Definition: InstrTypes.h:1383
Value * getLength() const
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
void setAlignment(MaybeAlign Align)
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock *> IndirectDests, ArrayRef< Value *> Args, const Twine &NameStr, Instruction *InsertBefore=nullptr)
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
Definition: PatternMatch.h:391
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, unsigned Align, const DataLayout &DL, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested...
Definition: Loads.cpp:135
Instruction * visitVAStartInst(VAStartInst &I)
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
Definition: APInt.h:534
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1517
Value * CreateLaunderInvariantGroup(Value *Ptr)
Create a launder.invariant.group intrinsic call.
Definition: IRBuilder.h:2411
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:130
const CallInst * isFreeCall(const Value *I, const TargetLibraryInfo *TLI)
isFreeCall - Returns non-null if the value is a call to the builtin free()
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:289
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition: KnownBits.h:146
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op...
Value * getDest() const
This is just like getRawDest, but it strips off any cast instructions (including addrspacecast) that ...
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:273
bool isIdenticalTo(const Instruction *I) const
Return true if the specified instruction is exactly identical to the current one. ...
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1241
static BinaryOperator * CreateNSW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition: InstrTypes.h:289
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
opStatus divide(const APFloat &RHS, roundingMode RM)
Definition: APFloat.h:983
static Instruction * SimplifyNVVMIntrinsic(IntrinsicInst *II, InstCombiner &IC)
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:47
Instruction * visitInvokeInst(InvokeInst &II)
static Constant * getIntegerCast(Constant *C, Type *Ty, bool isSigned)
Create a ZExt, Bitcast or Trunc for integer -> integer casts.
Definition: Constants.cpp:1644
bool isSigned() const
Definition: InstrTypes.h:902
APInt getLoBits(unsigned numBits) const
Compute an APInt containing numBits lowbits from this APInt.
Definition: APInt.cpp:570
static APFloat fmed3AMDGCN(const APFloat &Src0, const APFloat &Src1, const APFloat &Src2)
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
Definition: PatternMatch.h:886
Type * getPointerElementType() const
Definition: Type.h:380
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
Definition: InstrTypes.h:831
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:80
static Value * simplifyX86movmsk(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
Absolute value.
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:439
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition: APInt.h:368
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:450
APInt shl(unsigned shiftAmt) const
Left-shift function.
Definition: APInt.h:992
static Value * simplifyNeonTbl1(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
Convert a table lookup to shufflevector if the mask is constant.
Instruction * eraseInstFromFunction(Instruction &I)
Combiner aware instruction erasure.
CastClass_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:197
static Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context, uint64_t Bytes)
Definition: Attributes.cpp:164
The core instruction combiner logic.
This file contains the simple types necessary to represent the attributes associated with functions a...
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2018 minimum semantics.
Definition: APFloat.h:1264
AttributeSet getRetAttributes() const
The attributes for the ret value are returned.
static Constant * getSExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1682
AttrBuilder & addByValAttr(Type *Ty)
This turns a byval type into the form used internally in Attribute.
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:285
Value * CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2126
uint64_t getNumElements() const
For scalable vectors, this will return the minimum number of elements in the vector.
Definition: DerivedTypes.h:398
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
Definition: APInt.h:977
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
This file implements a class to represent arbitrary precision integral constant values and operations...
All zero aggregate value.
Definition: Constants.h:340
static Value * simplifyX86vpermv(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
Attempt to convert vpermd/vpermps to shufflevector if the mask is constant.
Metadata * LowAndHigh[]
static Value * simplifyX86addcarry(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E, unsigned NumOperands)
DominatorTree & getDominatorTree() const
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
Definition: KnownBits.h:201
Key
PAL metadata keys.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
Definition: PatternMatch.h:970
bool doesNotThrow() const
Determine if the call cannot unwind.
Definition: InstrTypes.h:1683
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:85
Class to represent function types.
Definition: DerivedTypes.h:108
static Value * peekThroughBitcast(Value *V, bool OneUseOnly=false)
Return the source operand of a potentially bitcasted value while optionally checking if it has one us...
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1964
bool isInfinity() const
Definition: APFloat.h:1159
FastMathFlags getFastMathFlags() const
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
cstfp_pred_ty< is_nan > m_NaN()
Match an arbitrary NaN constant.
Definition: PatternMatch.h:506
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
Definition: PatternMatch.h:198
This represents the llvm.va_start intrinsic.
CastClass_match< OpTy, Instruction::FPExt > m_FPExt(const OpTy &Op)
Matches FPExt.
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition: APFloat.cpp:4483
May or may not overflow.
bool isStatepoint(const CallBase *Call)
Definition: Statepoint.cpp:20
void setCalledFunction(Function *Fn)
Sets the function called, including updating the function type.
Definition: InstrTypes.h:1323
CastClass_match< OpTy, Instruction::ZExt > m_ZExt(const OpTy &Op)
Matches ZExt.
AttributeSet getParamAttributes(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
bool isVarArg() const
Definition: DerivedTypes.h:128
This class represents a no-op cast from one type to another.
bool isOpNewLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates memory and throws if an all...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:244
Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Definition: IRBuilder.h:2465
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:137
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:125
bool maskIsAllOneOrUndef(Value *Mask)
Given a mask vector of the form <Y x="" i1>="">, Return true if all of the elements of this predicate...
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
Definition: PatternMatch.h:524
iterator_range< User::op_iterator > arg_operands()
Definition: InstrTypes.h:1233
AttrBuilder & remove(const AttrBuilder &B)
Remove the attributes from the builder.
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:223
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
Definition: APFloat.h:176
An instruction for storing to memory.
Definition: Instructions.h:325
bool extractProfTotalWeight(uint64_t &TotalVal) const
Retrieve total raw weight values of a branch.
Definition: Metadata.cpp:1336
Value * getRHS() const
static void ValueIsRAUWd(Value *Old, Value *New)
Definition: Value.cpp:923
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1878
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
uint64_t GetStringLength(const Value *V, unsigned CharSize=8)
If we can compute the length of the string pointed to by the specified pointer, return &#39;len+1&#39;...
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:291
static ConstantAsMetadata * get(Constant *C)
Definition: Metadata.h:409
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1093
This class represents a truncation of integer types.
Type * getElementType() const
Return the element type of the array/vector.
Definition: Constants.cpp:2433
Value * getOperand(unsigned i) const
Definition: User.h:169
Class to represent pointers.
Definition: DerivedTypes.h:575
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
Definition: Attributes.cpp:657
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:359
const DataLayout & getDataLayout() const
static MetadataAsValue * get(LLVMContext &Context, Metadata *MD)
Definition: Metadata.cpp:105
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1804
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
bool isFloatTy() const
Return true if this is &#39;float&#39;, a 32-bit IEEE fp type.
Definition: Type.h:147
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value...
OneUse_match< T > m_OneUse(const T &SubPattern)
Definition: PatternMatch.h:61
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata *> MDs)
Definition: Metadata.h:1165
Instruction * visitFenceInst(FenceInst &FI)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:148
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static AttributeSet get(LLVMContext &C, const AttrBuilder &B)
Definition: Attributes.cpp:592
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt...
Definition: PatternMatch.h:194
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:328
bool isNegative() const
Definition: APFloat.h:1162
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1432
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1248
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:465
unsigned arg_size() const
Definition: InstrTypes.h:1229
Value * getCalledValue() const
Definition: InstrTypes.h:1280
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:115
LLVM_NODISCARD AttributeList addParamAttribute(LLVMContext &C, unsigned ArgNo, Attribute::AttrKind Kind) const
Add an argument attribute to the list.
Definition: Attributes.h:413
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
Definition: PatternMatch.h:880
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
ConstantInt * getTrue()
Get the constant value for i1 true.
Definition: IRBuilder.h:323
Value * SimplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q)
Given operands for the multiplication of a FMA, fold the result or return null.
bool isNaN() const
Definition: APFloat.h:1160
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:41
void setAlignment(MaybeAlign Align)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.h:2288
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:224
bool isSigned() const
Whether the intrinsic is signed or unsigned.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition: InstrTypes.h:308
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:487
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1144
static ManagedStatic< OptionRegistry > OR
Definition: Options.cpp:30
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:144
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:263
specific_intval m_SpecificInt(APInt V)
Match a specific integer value or vector with all elements equal to the value.
Definition: PatternMatch.h:664
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
Definition: PatternMatch.h:336
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:587
Instruction * visitCallBrInst(CallBrInst &CBI)
const Instruction * getNextNonDebugInstruction() const
Return a pointer to the next non-debug instruction in the same basic block as &#39;this&#39;, or nullptr if no such instruction exists.
param_iterator param_begin() const
Definition: DerivedTypes.h:133
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static VectorType * getInteger(VectorType *VTy)
This static method gets a VectorType with the same number of elements as the input type...
Definition: DerivedTypes.h:468
bool isFast() const
Determine whether all fast-math-flags are set.
std::underlying_type< E >::type Underlying(E Val)
Check that Val is in range for E, and return Val cast to E&#39;s underlying type.
Definition: BitmaskEnum.h:90
static IntrinsicInst * findInitTrampolineFromBB(IntrinsicInst *AdjustTramp, Value *TrampMem)
bool isHalfTy() const
Return true if this is &#39;half&#39;, a 16-bit IEEE fp type.
Definition: Type.h:144
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:732
constexpr double e
Definition: MathExtras.h:57
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1348
match_combine_or< CastClass_match< OpTy, Instruction::ZExt >, CastClass_match< OpTy, Instruction::SExt > > m_ZExtOrSExt(const OpTy &Op)
bool isAllOnes() const
Returns true if value is all one bits.
Definition: KnownBits.h:77
This class represents any memset intrinsic.
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
static FunctionType * get(Type *Result, ArrayRef< Type *> Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
Definition: Type.cpp:301