LLVM  15.0.0git
InferAddressSpaces.cpp
Go to the documentation of this file.
1 //===- InferAddressSpace.cpp - --------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // CUDA C/C++ includes memory space designation as variable type qualifers (such
10 // as __global__ and __shared__). Knowing the space of a memory access allows
11 // CUDA compilers to emit faster PTX loads and stores. For example, a load from
12 // shared memory can be translated to `ld.shared` which is roughly 10% faster
13 // than a generic `ld` on an NVIDIA Tesla K40c.
14 //
15 // Unfortunately, type qualifiers only apply to variable declarations, so CUDA
16 // compilers must infer the memory space of an address expression from
17 // type-qualified variables.
18 //
19 // LLVM IR uses non-zero (so-called) specific address spaces to represent memory
20 // spaces (e.g. addrspace(3) means shared memory). The Clang frontend
21 // places only type-qualified variables in specific address spaces, and then
22 // conservatively `addrspacecast`s each type-qualified variable to addrspace(0)
23 // (so-called the generic address space) for other instructions to use.
24 //
25 // For example, the Clang translates the following CUDA code
26 // __shared__ float a[10];
27 // float v = a[i];
28 // to
29 // %0 = addrspacecast [10 x float] addrspace(3)* @a to [10 x float]*
30 // %1 = gep [10 x float], [10 x float]* %0, i64 0, i64 %i
31 // %v = load float, float* %1 ; emits ld.f32
32 // @a is in addrspace(3) since it's type-qualified, but its use from %1 is
33 // redirected to %0 (the generic version of @a).
34 //
35 // The optimization implemented in this file propagates specific address spaces
36 // from type-qualified variable declarations to its users. For example, it
37 // optimizes the above IR to
38 // %1 = gep [10 x float] addrspace(3)* @a, i64 0, i64 %i
39 // %v = load float addrspace(3)* %1 ; emits ld.shared.f32
40 // propagating the addrspace(3) from @a to %1. As the result, the NVPTX
41 // codegen is able to emit ld.shared.f32 for %v.
42 //
43 // Address space inference works in two steps. First, it uses a data-flow
44 // analysis to infer as many generic pointers as possible to point to only one
45 // specific address space. In the above example, it can prove that %1 only
46 // points to addrspace(3). This algorithm was published in
47 // CUDA: Compiling and optimizing for a GPU platform
48 // Chakrabarti, Grover, Aarts, Kong, Kudlur, Lin, Marathe, Murphy, Wang
49 // ICCS 2012
50 //
51 // Then, address space inference replaces all refinable generic pointers with
52 // equivalent specific pointers.
53 //
54 // The major challenge of implementing this optimization is handling PHINodes,
55 // which may create loops in the data flow graph. This brings two complications.
56 //
57 // First, the data flow analysis in Step 1 needs to be circular. For example,
58 // %generic.input = addrspacecast float addrspace(3)* %input to float*
59 // loop:
60 // %y = phi [ %generic.input, %y2 ]
61 // %y2 = getelementptr %y, 1
62 // %v = load %y2
63 // br ..., label %loop, ...
64 // proving %y specific requires proving both %generic.input and %y2 specific,
65 // but proving %y2 specific circles back to %y. To address this complication,
66 // the data flow analysis operates on a lattice:
67 // uninitialized > specific address spaces > generic.
68 // All address expressions (our implementation only considers phi, bitcast,
69 // addrspacecast, and getelementptr) start with the uninitialized address space.
70 // The monotone transfer function moves the address space of a pointer down a
71 // lattice path from uninitialized to specific and then to generic. A join
72 // operation of two different specific address spaces pushes the expression down
73 // to the generic address space. The analysis completes once it reaches a fixed
74 // point.
75 //
76 // Second, IR rewriting in Step 2 also needs to be circular. For example,
77 // converting %y to addrspace(3) requires the compiler to know the converted
78 // %y2, but converting %y2 needs the converted %y. To address this complication,
79 // we break these cycles using "undef" placeholders. When converting an
80 // instruction `I` to a new address space, if its operand `Op` is not converted
81 // yet, we let `I` temporarily use `undef` and fix all the uses of undef later.
82 // For instance, our algorithm first converts %y to
83 // %y' = phi float addrspace(3)* [ %input, undef ]
84 // Then, it converts %y2 to
85 // %y2' = getelementptr %y', 1
86 // Finally, it fixes the undef in %y' so that
87 // %y' = phi float addrspace(3)* [ %input, %y2' ]
88 //
89 //===----------------------------------------------------------------------===//
90 
92 #include "llvm/ADT/ArrayRef.h"
93 #include "llvm/ADT/DenseMap.h"
94 #include "llvm/ADT/DenseSet.h"
95 #include "llvm/ADT/SetVector.h"
96 #include "llvm/ADT/SmallVector.h"
100 #include "llvm/IR/BasicBlock.h"
101 #include "llvm/IR/Constant.h"
102 #include "llvm/IR/Constants.h"
103 #include "llvm/IR/Dominators.h"
104 #include "llvm/IR/Function.h"
105 #include "llvm/IR/IRBuilder.h"
106 #include "llvm/IR/InstIterator.h"
107 #include "llvm/IR/Instruction.h"
108 #include "llvm/IR/Instructions.h"
109 #include "llvm/IR/IntrinsicInst.h"
110 #include "llvm/IR/Intrinsics.h"
111 #include "llvm/IR/LLVMContext.h"
112 #include "llvm/IR/Operator.h"
113 #include "llvm/IR/PassManager.h"
114 #include "llvm/IR/Type.h"
115 #include "llvm/IR/Use.h"
116 #include "llvm/IR/User.h"
117 #include "llvm/IR/Value.h"
118 #include "llvm/IR/ValueHandle.h"
119 #include "llvm/InitializePasses.h"
120 #include "llvm/Pass.h"
121 #include "llvm/Support/Casting.h"
123 #include "llvm/Support/Compiler.h"
124 #include "llvm/Support/Debug.h"
127 #include "llvm/Transforms/Scalar.h"
130 #include <cassert>
131 #include <iterator>
132 #include <limits>
133 #include <utility>
134 #include <vector>
135 
136 #define DEBUG_TYPE "infer-address-spaces"
137 
138 using namespace llvm;
139 
141  "assume-default-is-flat-addrspace", cl::init(false), cl::ReallyHidden,
142  cl::desc("The default address space is assumed as the flat address space. "
143  "This is mainly for test purpose."));
144 
145 static const unsigned UninitializedAddressSpace =
147 
148 namespace {
149 
150 using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>;
151 // Different from ValueToAddrSpaceMapTy, where a new addrspace is inferred on
152 // the *def* of a value, PredicatedAddrSpaceMapTy is map where a new
153 // addrspace is inferred on the *use* of a pointer. This map is introduced to
154 // infer addrspace from the addrspace predicate assumption built from assume
155 // intrinsic. In that scenario, only specific uses (under valid assumption
156 // context) could be inferred with a new addrspace.
157 using PredicatedAddrSpaceMapTy =
159 using PostorderStackTy = llvm::SmallVector<PointerIntPair<Value *, 1, bool>, 4>;
160 
161 class InferAddressSpaces : public FunctionPass {
162  unsigned FlatAddrSpace = 0;
163 
164 public:
165  static char ID;
166 
167  InferAddressSpaces() :
168  FunctionPass(ID), FlatAddrSpace(UninitializedAddressSpace) {}
169  InferAddressSpaces(unsigned AS) : FunctionPass(ID), FlatAddrSpace(AS) {}
170 
171  void getAnalysisUsage(AnalysisUsage &AU) const override {
172  AU.setPreservesCFG();
176  }
177 
178  bool runOnFunction(Function &F) override;
179 };
180 
181 class InferAddressSpacesImpl {
182  AssumptionCache &AC;
183  const DominatorTree *DT = nullptr;
184  const TargetTransformInfo *TTI = nullptr;
185  const DataLayout *DL = nullptr;
186 
187  /// Target specific address space which uses of should be replaced if
188  /// possible.
189  unsigned FlatAddrSpace = 0;
190 
191  // Try to update the address space of V. If V is updated, returns true and
192  // false otherwise.
193  bool updateAddressSpace(const Value &V,
194  ValueToAddrSpaceMapTy &InferredAddrSpace,
195  PredicatedAddrSpaceMapTy &PredicatedAS) const;
196 
197  // Tries to infer the specific address space of each address expression in
198  // Postorder.
199  void inferAddressSpaces(ArrayRef<WeakTrackingVH> Postorder,
200  ValueToAddrSpaceMapTy &InferredAddrSpace,
201  PredicatedAddrSpaceMapTy &PredicatedAS) const;
202 
203  bool isSafeToCastConstAddrSpace(Constant *C, unsigned NewAS) const;
204 
205  Value *cloneInstructionWithNewAddressSpace(
206  Instruction *I, unsigned NewAddrSpace,
207  const ValueToValueMapTy &ValueWithNewAddrSpace,
208  const PredicatedAddrSpaceMapTy &PredicatedAS,
209  SmallVectorImpl<const Use *> *UndefUsesToFix) const;
210 
211  // Changes the flat address expressions in function F to point to specific
212  // address spaces if InferredAddrSpace says so. Postorder is the postorder of
213  // all flat expressions in the use-def graph of function F.
214  bool
215  rewriteWithNewAddressSpaces(ArrayRef<WeakTrackingVH> Postorder,
216  const ValueToAddrSpaceMapTy &InferredAddrSpace,
217  const PredicatedAddrSpaceMapTy &PredicatedAS,
218  Function *F) const;
219 
220  void appendsFlatAddressExpressionToPostorderStack(
221  Value *V, PostorderStackTy &PostorderStack,
222  DenseSet<Value *> &Visited) const;
223 
224  bool rewriteIntrinsicOperands(IntrinsicInst *II,
225  Value *OldV, Value *NewV) const;
226  void collectRewritableIntrinsicOperands(IntrinsicInst *II,
227  PostorderStackTy &PostorderStack,
228  DenseSet<Value *> &Visited) const;
229 
230  std::vector<WeakTrackingVH> collectFlatAddressExpressions(Function &F) const;
231 
232  Value *cloneValueWithNewAddressSpace(
233  Value *V, unsigned NewAddrSpace,
234  const ValueToValueMapTy &ValueWithNewAddrSpace,
235  const PredicatedAddrSpaceMapTy &PredicatedAS,
236  SmallVectorImpl<const Use *> *UndefUsesToFix) const;
237  unsigned joinAddressSpaces(unsigned AS1, unsigned AS2) const;
238 
239  unsigned getPredicatedAddrSpace(const Value &V, Value *Opnd) const;
240 
241 public:
242  InferAddressSpacesImpl(AssumptionCache &AC, const DominatorTree *DT,
243  const TargetTransformInfo *TTI, unsigned FlatAddrSpace)
244  : AC(AC), DT(DT), TTI(TTI), FlatAddrSpace(FlatAddrSpace) {}
245  bool run(Function &F);
246 };
247 
248 } // end anonymous namespace
249 
250 char InferAddressSpaces::ID = 0;
251 
252 INITIALIZE_PASS_BEGIN(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces",
253  false, false)
256 INITIALIZE_PASS_END(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces",
258 
259 // Check whether that's no-op pointer bicast using a pair of
260 // `ptrtoint`/`inttoptr` due to the missing no-op pointer bitcast over
261 // different address spaces.
264  assert(I2P->getOpcode() == Instruction::IntToPtr);
265  auto *P2I = dyn_cast<Operator>(I2P->getOperand(0));
266  if (!P2I || P2I->getOpcode() != Instruction::PtrToInt)
267  return false;
268  // Check it's really safe to treat that pair of `ptrtoint`/`inttoptr` as a
269  // no-op cast. Besides checking both of them are no-op casts, as the
270  // reinterpreted pointer may be used in other pointer arithmetic, we also
271  // need to double-check that through the target-specific hook. That ensures
272  // the underlying target also agrees that's a no-op address space cast and
273  // pointer bits are preserved.
274  // The current IR spec doesn't have clear rules on address space casts,
275  // especially a clear definition for pointer bits in non-default address
276  // spaces. It would be undefined if that pointer is dereferenced after an
277  // invalid reinterpret cast. Also, due to the unclearness for the meaning of
278  // bits in non-default address spaces in the current spec, the pointer
279  // arithmetic may also be undefined after invalid pointer reinterpret cast.
280  // However, as we confirm through the target hooks that it's a no-op
281  // addrspacecast, it doesn't matter since the bits should be the same.
282  unsigned P2IOp0AS = P2I->getOperand(0)->getType()->getPointerAddressSpace();
283  unsigned I2PAS = I2P->getType()->getPointerAddressSpace();
284  return CastInst::isNoopCast(Instruction::CastOps(I2P->getOpcode()),
285  I2P->getOperand(0)->getType(), I2P->getType(),
286  DL) &&
287  CastInst::isNoopCast(Instruction::CastOps(P2I->getOpcode()),
288  P2I->getOperand(0)->getType(), P2I->getType(),
289  DL) &&
290  (P2IOp0AS == I2PAS || TTI->isNoopAddrSpaceCast(P2IOp0AS, I2PAS));
291 }
292 
293 // Returns true if V is an address expression.
294 // TODO: Currently, we consider only phi, bitcast, addrspacecast, and
295 // getelementptr operators.
296 static bool isAddressExpression(const Value &V, const DataLayout &DL,
297  const TargetTransformInfo *TTI) {
298  const Operator *Op = dyn_cast<Operator>(&V);
299  if (!Op)
300  return false;
301 
302  switch (Op->getOpcode()) {
303  case Instruction::PHI:
304  assert(Op->getType()->isPointerTy());
305  return true;
306  case Instruction::BitCast:
307  case Instruction::AddrSpaceCast:
308  case Instruction::GetElementPtr:
309  return true;
310  case Instruction::Select:
311  return Op->getType()->isPointerTy();
312  case Instruction::Call: {
313  const IntrinsicInst *II = dyn_cast<IntrinsicInst>(&V);
314  return II && II->getIntrinsicID() == Intrinsic::ptrmask;
315  }
316  case Instruction::IntToPtr:
317  return isNoopPtrIntCastPair(Op, DL, TTI);
318  default:
319  // That value is an address expression if it has an assumed address space.
321  }
322 }
323 
324 // Returns the pointer operands of V.
325 //
326 // Precondition: V is an address expression.
329  const TargetTransformInfo *TTI) {
330  const Operator &Op = cast<Operator>(V);
331  switch (Op.getOpcode()) {
332  case Instruction::PHI: {
333  auto IncomingValues = cast<PHINode>(Op).incoming_values();
334  return {IncomingValues.begin(), IncomingValues.end()};
335  }
336  case Instruction::BitCast:
337  case Instruction::AddrSpaceCast:
338  case Instruction::GetElementPtr:
339  return {Op.getOperand(0)};
340  case Instruction::Select:
341  return {Op.getOperand(1), Op.getOperand(2)};
342  case Instruction::Call: {
343  const IntrinsicInst &II = cast<IntrinsicInst>(Op);
344  assert(II.getIntrinsicID() == Intrinsic::ptrmask &&
345  "unexpected intrinsic call");
346  return {II.getArgOperand(0)};
347  }
348  case Instruction::IntToPtr: {
350  auto *P2I = cast<Operator>(Op.getOperand(0));
351  return {P2I->getOperand(0)};
352  }
353  default:
354  llvm_unreachable("Unexpected instruction type.");
355  }
356 }
357 
358 bool InferAddressSpacesImpl::rewriteIntrinsicOperands(IntrinsicInst *II,
359  Value *OldV,
360  Value *NewV) const {
361  Module *M = II->getParent()->getParent()->getParent();
362 
363  switch (II->getIntrinsicID()) {
364  case Intrinsic::objectsize: {
365  Type *DestTy = II->getType();
366  Type *SrcTy = NewV->getType();
367  Function *NewDecl =
368  Intrinsic::getDeclaration(M, II->getIntrinsicID(), {DestTy, SrcTy});
369  II->setArgOperand(0, NewV);
370  II->setCalledFunction(NewDecl);
371  return true;
372  }
373  case Intrinsic::ptrmask:
374  // This is handled as an address expression, not as a use memory operation.
375  return false;
376  default: {
377  Value *Rewrite = TTI->rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
378  if (!Rewrite)
379  return false;
380  if (Rewrite != II)
381  II->replaceAllUsesWith(Rewrite);
382  return true;
383  }
384  }
385 }
386 
387 void InferAddressSpacesImpl::collectRewritableIntrinsicOperands(
388  IntrinsicInst *II, PostorderStackTy &PostorderStack,
389  DenseSet<Value *> &Visited) const {
390  auto IID = II->getIntrinsicID();
391  switch (IID) {
392  case Intrinsic::ptrmask:
393  case Intrinsic::objectsize:
394  appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(0),
395  PostorderStack, Visited);
396  break;
397  default:
398  SmallVector<int, 2> OpIndexes;
399  if (TTI->collectFlatAddressOperands(OpIndexes, IID)) {
400  for (int Idx : OpIndexes) {
401  appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(Idx),
402  PostorderStack, Visited);
403  }
404  }
405  break;
406  }
407 }
408 
409 // Returns all flat address expressions in function F. The elements are
410 // If V is an unvisited flat address expression, appends V to PostorderStack
411 // and marks it as visited.
412 void InferAddressSpacesImpl::appendsFlatAddressExpressionToPostorderStack(
413  Value *V, PostorderStackTy &PostorderStack,
414  DenseSet<Value *> &Visited) const {
415  assert(V->getType()->isPointerTy());
416 
417  // Generic addressing expressions may be hidden in nested constant
418  // expressions.
419  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
420  // TODO: Look in non-address parts, like icmp operands.
421  if (isAddressExpression(*CE, *DL, TTI) && Visited.insert(CE).second)
422  PostorderStack.emplace_back(CE, false);
423 
424  return;
425  }
426 
427  if (V->getType()->getPointerAddressSpace() == FlatAddrSpace &&
428  isAddressExpression(*V, *DL, TTI)) {
429  if (Visited.insert(V).second) {
430  PostorderStack.emplace_back(V, false);
431 
432  Operator *Op = cast<Operator>(V);
433  for (unsigned I = 0, E = Op->getNumOperands(); I != E; ++I) {
434  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op->getOperand(I))) {
435  if (isAddressExpression(*CE, *DL, TTI) && Visited.insert(CE).second)
436  PostorderStack.emplace_back(CE, false);
437  }
438  }
439  }
440  }
441 }
442 
443 // Returns all flat address expressions in function F. The elements are ordered
444 // ordered in postorder.
445 std::vector<WeakTrackingVH>
446 InferAddressSpacesImpl::collectFlatAddressExpressions(Function &F) const {
447  // This function implements a non-recursive postorder traversal of a partial
448  // use-def graph of function F.
449  PostorderStackTy PostorderStack;
450  // The set of visited expressions.
451  DenseSet<Value *> Visited;
452 
453  auto PushPtrOperand = [&](Value *Ptr) {
454  appendsFlatAddressExpressionToPostorderStack(Ptr, PostorderStack,
455  Visited);
456  };
457 
458  // Look at operations that may be interesting accelerate by moving to a known
459  // address space. We aim at generating after loads and stores, but pure
460  // addressing calculations may also be faster.
461  for (Instruction &I : instructions(F)) {
462  if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
463  if (!GEP->getType()->isVectorTy())
464  PushPtrOperand(GEP->getPointerOperand());
465  } else if (auto *LI = dyn_cast<LoadInst>(&I))
466  PushPtrOperand(LI->getPointerOperand());
467  else if (auto *SI = dyn_cast<StoreInst>(&I))
468  PushPtrOperand(SI->getPointerOperand());
469  else if (auto *RMW = dyn_cast<AtomicRMWInst>(&I))
470  PushPtrOperand(RMW->getPointerOperand());
471  else if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(&I))
472  PushPtrOperand(CmpX->getPointerOperand());
473  else if (auto *MI = dyn_cast<MemIntrinsic>(&I)) {
474  // For memset/memcpy/memmove, any pointer operand can be replaced.
475  PushPtrOperand(MI->getRawDest());
476 
477  // Handle 2nd operand for memcpy/memmove.
478  if (auto *MTI = dyn_cast<MemTransferInst>(MI))
479  PushPtrOperand(MTI->getRawSource());
480  } else if (auto *II = dyn_cast<IntrinsicInst>(&I))
481  collectRewritableIntrinsicOperands(II, PostorderStack, Visited);
482  else if (ICmpInst *Cmp = dyn_cast<ICmpInst>(&I)) {
483  // FIXME: Handle vectors of pointers
484  if (Cmp->getOperand(0)->getType()->isPointerTy()) {
485  PushPtrOperand(Cmp->getOperand(0));
486  PushPtrOperand(Cmp->getOperand(1));
487  }
488  } else if (auto *ASC = dyn_cast<AddrSpaceCastInst>(&I)) {
489  if (!ASC->getType()->isVectorTy())
490  PushPtrOperand(ASC->getPointerOperand());
491  } else if (auto *I2P = dyn_cast<IntToPtrInst>(&I)) {
492  if (isNoopPtrIntCastPair(cast<Operator>(I2P), *DL, TTI))
493  PushPtrOperand(
494  cast<Operator>(I2P->getOperand(0))->getOperand(0));
495  }
496  }
497 
498  std::vector<WeakTrackingVH> Postorder; // The resultant postorder.
499  while (!PostorderStack.empty()) {
500  Value *TopVal = PostorderStack.back().getPointer();
501  // If the operands of the expression on the top are already explored,
502  // adds that expression to the resultant postorder.
503  if (PostorderStack.back().getInt()) {
504  if (TopVal->getType()->getPointerAddressSpace() == FlatAddrSpace)
505  Postorder.push_back(TopVal);
506  PostorderStack.pop_back();
507  continue;
508  }
509  // Otherwise, adds its operands to the stack and explores them.
510  PostorderStack.back().setInt(true);
511  // Skip values with an assumed address space.
513  for (Value *PtrOperand : getPointerOperands(*TopVal, *DL, TTI)) {
514  appendsFlatAddressExpressionToPostorderStack(PtrOperand, PostorderStack,
515  Visited);
516  }
517  }
518  }
519  return Postorder;
520 }
521 
522 // A helper function for cloneInstructionWithNewAddressSpace. Returns the clone
523 // of OperandUse.get() in the new address space. If the clone is not ready yet,
524 // returns an undef in the new address space as a placeholder.
526  const Use &OperandUse, unsigned NewAddrSpace,
527  const ValueToValueMapTy &ValueWithNewAddrSpace,
528  const PredicatedAddrSpaceMapTy &PredicatedAS,
529  SmallVectorImpl<const Use *> *UndefUsesToFix) {
530  Value *Operand = OperandUse.get();
531 
533  cast<PointerType>(Operand->getType()), NewAddrSpace);
534 
535  if (Constant *C = dyn_cast<Constant>(Operand))
536  return ConstantExpr::getAddrSpaceCast(C, NewPtrTy);
537 
538  if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand))
539  return NewOperand;
540 
541  Instruction *Inst = cast<Instruction>(OperandUse.getUser());
542  auto I = PredicatedAS.find(std::make_pair(Inst, Operand));
543  if (I != PredicatedAS.end()) {
544  // Insert an addrspacecast on that operand before the user.
545  unsigned NewAS = I->second;
547  cast<PointerType>(Operand->getType()), NewAS);
548  auto *NewI = new AddrSpaceCastInst(Operand, NewPtrTy);
549  NewI->insertBefore(Inst);
550  return NewI;
551  }
552 
553  UndefUsesToFix->push_back(&OperandUse);
554  return UndefValue::get(NewPtrTy);
555 }
556 
557 // Returns a clone of `I` with its operands converted to those specified in
558 // ValueWithNewAddrSpace. Due to potential cycles in the data flow graph, an
559 // operand whose address space needs to be modified might not exist in
560 // ValueWithNewAddrSpace. In that case, uses undef as a placeholder operand and
561 // adds that operand use to UndefUsesToFix so that caller can fix them later.
562 //
563 // Note that we do not necessarily clone `I`, e.g., if it is an addrspacecast
564 // from a pointer whose type already matches. Therefore, this function returns a
565 // Value* instead of an Instruction*.
566 //
567 // This may also return nullptr in the case the instruction could not be
568 // rewritten.
569 Value *InferAddressSpacesImpl::cloneInstructionWithNewAddressSpace(
570  Instruction *I, unsigned NewAddrSpace,
571  const ValueToValueMapTy &ValueWithNewAddrSpace,
572  const PredicatedAddrSpaceMapTy &PredicatedAS,
573  SmallVectorImpl<const Use *> *UndefUsesToFix) const {
575  cast<PointerType>(I->getType()), NewAddrSpace);
576 
577  if (I->getOpcode() == Instruction::AddrSpaceCast) {
578  Value *Src = I->getOperand(0);
579  // Because `I` is flat, the source address space must be specific.
580  // Therefore, the inferred address space must be the source space, according
581  // to our algorithm.
582  assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace);
583  if (Src->getType() != NewPtrType)
584  return new BitCastInst(Src, NewPtrType);
585  return Src;
586  }
587 
588  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
589  // Technically the intrinsic ID is a pointer typed argument, so specially
590  // handle calls early.
591  assert(II->getIntrinsicID() == Intrinsic::ptrmask);
593  II->getArgOperandUse(0), NewAddrSpace, ValueWithNewAddrSpace,
594  PredicatedAS, UndefUsesToFix);
595  Value *Rewrite =
597  if (Rewrite) {
598  assert(Rewrite != II && "cannot modify this pointer operation in place");
599  return Rewrite;
600  }
601 
602  return nullptr;
603  }
604 
605  unsigned AS = TTI->getAssumedAddrSpace(I);
606  if (AS != UninitializedAddressSpace) {
607  // For the assumed address space, insert an `addrspacecast` to make that
608  // explicit.
610  cast<PointerType>(I->getType()), AS);
611  auto *NewI = new AddrSpaceCastInst(I, NewPtrTy);
612  NewI->insertAfter(I);
613  return NewI;
614  }
615 
616  // Computes the converted pointer operands.
617  SmallVector<Value *, 4> NewPointerOperands;
618  for (const Use &OperandUse : I->operands()) {
619  if (!OperandUse.get()->getType()->isPointerTy())
620  NewPointerOperands.push_back(nullptr);
621  else
622  NewPointerOperands.push_back(operandWithNewAddressSpaceOrCreateUndef(
623  OperandUse, NewAddrSpace, ValueWithNewAddrSpace, PredicatedAS,
624  UndefUsesToFix));
625  }
626 
627  switch (I->getOpcode()) {
628  case Instruction::BitCast:
629  return new BitCastInst(NewPointerOperands[0], NewPtrType);
630  case Instruction::PHI: {
631  assert(I->getType()->isPointerTy());
632  PHINode *PHI = cast<PHINode>(I);
633  PHINode *NewPHI = PHINode::Create(NewPtrType, PHI->getNumIncomingValues());
634  for (unsigned Index = 0; Index < PHI->getNumIncomingValues(); ++Index) {
635  unsigned OperandNo = PHINode::getOperandNumForIncomingValue(Index);
636  NewPHI->addIncoming(NewPointerOperands[OperandNo],
637  PHI->getIncomingBlock(Index));
638  }
639  return NewPHI;
640  }
641  case Instruction::GetElementPtr: {
642  GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
644  GEP->getSourceElementType(), NewPointerOperands[0],
645  SmallVector<Value *, 4>(GEP->indices()));
646  NewGEP->setIsInBounds(GEP->isInBounds());
647  return NewGEP;
648  }
649  case Instruction::Select:
650  assert(I->getType()->isPointerTy());
651  return SelectInst::Create(I->getOperand(0), NewPointerOperands[1],
652  NewPointerOperands[2], "", nullptr, I);
653  case Instruction::IntToPtr: {
654  assert(isNoopPtrIntCastPair(cast<Operator>(I), *DL, TTI));
655  Value *Src = cast<Operator>(I->getOperand(0))->getOperand(0);
656  if (Src->getType() == NewPtrType)
657  return Src;
658 
659  // If we had a no-op inttoptr/ptrtoint pair, we may still have inferred a
660  // source address space from a generic pointer source need to insert a cast
661  // back.
662  return CastInst::CreatePointerBitCastOrAddrSpaceCast(Src, NewPtrType);
663  }
664  default:
665  llvm_unreachable("Unexpected opcode");
666  }
667 }
668 
669 // Similar to cloneInstructionWithNewAddressSpace, returns a clone of the
670 // constant expression `CE` with its operands replaced as specified in
671 // ValueWithNewAddrSpace.
673  ConstantExpr *CE, unsigned NewAddrSpace,
674  const ValueToValueMapTy &ValueWithNewAddrSpace, const DataLayout *DL,
675  const TargetTransformInfo *TTI) {
676  Type *TargetType = CE->getType()->isPointerTy()
678  cast<PointerType>(CE->getType()), NewAddrSpace)
679  : CE->getType();
680 
681  if (CE->getOpcode() == Instruction::AddrSpaceCast) {
682  // Because CE is flat, the source address space must be specific.
683  // Therefore, the inferred address space must be the source space according
684  // to our algorithm.
685  assert(CE->getOperand(0)->getType()->getPointerAddressSpace() ==
686  NewAddrSpace);
687  return ConstantExpr::getBitCast(CE->getOperand(0), TargetType);
688  }
689 
690  if (CE->getOpcode() == Instruction::BitCast) {
691  if (Value *NewOperand = ValueWithNewAddrSpace.lookup(CE->getOperand(0)))
692  return ConstantExpr::getBitCast(cast<Constant>(NewOperand), TargetType);
693  return ConstantExpr::getAddrSpaceCast(CE, TargetType);
694  }
695 
696  if (CE->getOpcode() == Instruction::Select) {
697  Constant *Src0 = CE->getOperand(1);
698  Constant *Src1 = CE->getOperand(2);
699  if (Src0->getType()->getPointerAddressSpace() ==
700  Src1->getType()->getPointerAddressSpace()) {
701 
703  CE->getOperand(0), ConstantExpr::getAddrSpaceCast(Src0, TargetType),
704  ConstantExpr::getAddrSpaceCast(Src1, TargetType));
705  }
706  }
707 
708  if (CE->getOpcode() == Instruction::IntToPtr) {
709  assert(isNoopPtrIntCastPair(cast<Operator>(CE), *DL, TTI));
710  Constant *Src = cast<ConstantExpr>(CE->getOperand(0))->getOperand(0);
711  assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace);
712  return ConstantExpr::getBitCast(Src, TargetType);
713  }
714 
715  // Computes the operands of the new constant expression.
716  bool IsNew = false;
717  SmallVector<Constant *, 4> NewOperands;
718  for (unsigned Index = 0; Index < CE->getNumOperands(); ++Index) {
719  Constant *Operand = CE->getOperand(Index);
720  // If the address space of `Operand` needs to be modified, the new operand
721  // with the new address space should already be in ValueWithNewAddrSpace
722  // because (1) the constant expressions we consider (i.e. addrspacecast,
723  // bitcast, and getelementptr) do not incur cycles in the data flow graph
724  // and (2) this function is called on constant expressions in postorder.
725  if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand)) {
726  IsNew = true;
727  NewOperands.push_back(cast<Constant>(NewOperand));
728  continue;
729  }
730  if (auto *CExpr = dyn_cast<ConstantExpr>(Operand))
732  CExpr, NewAddrSpace, ValueWithNewAddrSpace, DL, TTI)) {
733  IsNew = true;
734  NewOperands.push_back(cast<Constant>(NewOperand));
735  continue;
736  }
737  // Otherwise, reuses the old operand.
738  NewOperands.push_back(Operand);
739  }
740 
741  // If !IsNew, we will replace the Value with itself. However, replaced values
742  // are assumed to wrapped in an addrspacecast cast later so drop it now.
743  if (!IsNew)
744  return nullptr;
745 
746  if (CE->getOpcode() == Instruction::GetElementPtr) {
747  // Needs to specify the source type while constructing a getelementptr
748  // constant expression.
749  return CE->getWithOperands(NewOperands, TargetType, /*OnlyIfReduced=*/false,
750  cast<GEPOperator>(CE)->getSourceElementType());
751  }
752 
753  return CE->getWithOperands(NewOperands, TargetType);
754 }
755 
756 // Returns a clone of the value `V`, with its operands replaced as specified in
757 // ValueWithNewAddrSpace. This function is called on every flat address
758 // expression whose address space needs to be modified, in postorder.
759 //
760 // See cloneInstructionWithNewAddressSpace for the meaning of UndefUsesToFix.
761 Value *InferAddressSpacesImpl::cloneValueWithNewAddressSpace(
762  Value *V, unsigned NewAddrSpace,
763  const ValueToValueMapTy &ValueWithNewAddrSpace,
764  const PredicatedAddrSpaceMapTy &PredicatedAS,
765  SmallVectorImpl<const Use *> *UndefUsesToFix) const {
766  // All values in Postorder are flat address expressions.
767  assert(V->getType()->getPointerAddressSpace() == FlatAddrSpace &&
768  isAddressExpression(*V, *DL, TTI));
769 
770  if (Instruction *I = dyn_cast<Instruction>(V)) {
771  Value *NewV = cloneInstructionWithNewAddressSpace(
772  I, NewAddrSpace, ValueWithNewAddrSpace, PredicatedAS, UndefUsesToFix);
773  if (Instruction *NewI = dyn_cast_or_null<Instruction>(NewV)) {
774  if (NewI->getParent() == nullptr) {
775  NewI->insertBefore(I);
776  NewI->takeName(I);
777  }
778  }
779  return NewV;
780  }
781 
783  cast<ConstantExpr>(V), NewAddrSpace, ValueWithNewAddrSpace, DL, TTI);
784 }
785 
786 // Defines the join operation on the address space lattice (see the file header
787 // comments).
788 unsigned InferAddressSpacesImpl::joinAddressSpaces(unsigned AS1,
789  unsigned AS2) const {
790  if (AS1 == FlatAddrSpace || AS2 == FlatAddrSpace)
791  return FlatAddrSpace;
792 
793  if (AS1 == UninitializedAddressSpace)
794  return AS2;
795  if (AS2 == UninitializedAddressSpace)
796  return AS1;
797 
798  // The join of two different specific address spaces is flat.
799  return (AS1 == AS2) ? AS1 : FlatAddrSpace;
800 }
801 
803  DL = &F.getParent()->getDataLayout();
804 
806  FlatAddrSpace = 0;
807 
808  if (FlatAddrSpace == UninitializedAddressSpace) {
809  FlatAddrSpace = TTI->getFlatAddressSpace();
810  if (FlatAddrSpace == UninitializedAddressSpace)
811  return false;
812  }
813 
814  // Collects all flat address expressions in postorder.
815  std::vector<WeakTrackingVH> Postorder = collectFlatAddressExpressions(F);
816 
817  // Runs a data-flow analysis to refine the address spaces of every expression
818  // in Postorder.
819  ValueToAddrSpaceMapTy InferredAddrSpace;
820  PredicatedAddrSpaceMapTy PredicatedAS;
821  inferAddressSpaces(Postorder, InferredAddrSpace, PredicatedAS);
822 
823  // Changes the address spaces of the flat address expressions who are inferred
824  // to point to a specific address space.
825  return rewriteWithNewAddressSpaces(Postorder, InferredAddrSpace, PredicatedAS,
826  &F);
827 }
828 
829 // Constants need to be tracked through RAUW to handle cases with nested
830 // constant expressions, so wrap values in WeakTrackingVH.
831 void InferAddressSpacesImpl::inferAddressSpaces(
832  ArrayRef<WeakTrackingVH> Postorder,
833  ValueToAddrSpaceMapTy &InferredAddrSpace,
834  PredicatedAddrSpaceMapTy &PredicatedAS) const {
835  SetVector<Value *> Worklist(Postorder.begin(), Postorder.end());
836  // Initially, all expressions are in the uninitialized address space.
837  for (Value *V : Postorder)
838  InferredAddrSpace[V] = UninitializedAddressSpace;
839 
840  while (!Worklist.empty()) {
841  Value *V = Worklist.pop_back_val();
842 
843  // Try to update the address space of the stack top according to the
844  // address spaces of its operands.
845  if (!updateAddressSpace(*V, InferredAddrSpace, PredicatedAS))
846  continue;
847 
848  for (Value *User : V->users()) {
849  // Skip if User is already in the worklist.
850  if (Worklist.count(User))
851  continue;
852 
853  auto Pos = InferredAddrSpace.find(User);
854  // Our algorithm only updates the address spaces of flat address
855  // expressions, which are those in InferredAddrSpace.
856  if (Pos == InferredAddrSpace.end())
857  continue;
858 
859  // Function updateAddressSpace moves the address space down a lattice
860  // path. Therefore, nothing to do if User is already inferred as flat (the
861  // bottom element in the lattice).
862  if (Pos->second == FlatAddrSpace)
863  continue;
864 
865  Worklist.insert(User);
866  }
867  }
868 }
869 
870 unsigned InferAddressSpacesImpl::getPredicatedAddrSpace(const Value &V,
871  Value *Opnd) const {
872  const Instruction *I = dyn_cast<Instruction>(&V);
873  if (!I)
875 
876  Opnd = Opnd->stripInBoundsOffsets();
877  for (auto &AssumeVH : AC.assumptionsFor(Opnd)) {
878  if (!AssumeVH)
879  continue;
880  CallInst *CI = cast<CallInst>(AssumeVH);
881  if (!isValidAssumeForContext(CI, I, DT))
882  continue;
883 
884  const Value *Ptr;
885  unsigned AS;
886  std::tie(Ptr, AS) = TTI->getPredicatedAddrSpace(CI->getArgOperand(0));
887  if (Ptr)
888  return AS;
889  }
890 
892 }
893 
894 bool InferAddressSpacesImpl::updateAddressSpace(
895  const Value &V, ValueToAddrSpaceMapTy &InferredAddrSpace,
896  PredicatedAddrSpaceMapTy &PredicatedAS) const {
897  assert(InferredAddrSpace.count(&V));
898 
899  LLVM_DEBUG(dbgs() << "Updating the address space of\n " << V << '\n');
900 
901  // The new inferred address space equals the join of the address spaces
902  // of all its pointer operands.
903  unsigned NewAS = UninitializedAddressSpace;
904 
905  const Operator &Op = cast<Operator>(V);
906  if (Op.getOpcode() == Instruction::Select) {
907  Value *Src0 = Op.getOperand(1);
908  Value *Src1 = Op.getOperand(2);
909 
910  auto I = InferredAddrSpace.find(Src0);
911  unsigned Src0AS = (I != InferredAddrSpace.end()) ?
912  I->second : Src0->getType()->getPointerAddressSpace();
913 
914  auto J = InferredAddrSpace.find(Src1);
915  unsigned Src1AS = (J != InferredAddrSpace.end()) ?
916  J->second : Src1->getType()->getPointerAddressSpace();
917 
918  auto *C0 = dyn_cast<Constant>(Src0);
919  auto *C1 = dyn_cast<Constant>(Src1);
920 
921  // If one of the inputs is a constant, we may be able to do a constant
922  // addrspacecast of it. Defer inferring the address space until the input
923  // address space is known.
924  if ((C1 && Src0AS == UninitializedAddressSpace) ||
925  (C0 && Src1AS == UninitializedAddressSpace))
926  return false;
927 
928  if (C0 && isSafeToCastConstAddrSpace(C0, Src1AS))
929  NewAS = Src1AS;
930  else if (C1 && isSafeToCastConstAddrSpace(C1, Src0AS))
931  NewAS = Src0AS;
932  else
933  NewAS = joinAddressSpaces(Src0AS, Src1AS);
934  } else {
935  unsigned AS = TTI->getAssumedAddrSpace(&V);
936  if (AS != UninitializedAddressSpace) {
937  // Use the assumed address space directly.
938  NewAS = AS;
939  } else {
940  // Otherwise, infer the address space from its pointer operands.
941  for (Value *PtrOperand : getPointerOperands(V, *DL, TTI)) {
942  auto I = InferredAddrSpace.find(PtrOperand);
943  unsigned OperandAS;
944  if (I == InferredAddrSpace.end()) {
945  OperandAS = PtrOperand->getType()->getPointerAddressSpace();
946  if (OperandAS == FlatAddrSpace) {
947  // Check AC for assumption dominating V.
948  unsigned AS = getPredicatedAddrSpace(V, PtrOperand);
949  if (AS != UninitializedAddressSpace) {
950  LLVM_DEBUG(dbgs()
951  << " deduce operand AS from the predicate addrspace "
952  << AS << '\n');
953  OperandAS = AS;
954  // Record this use with the predicated AS.
955  PredicatedAS[std::make_pair(&V, PtrOperand)] = OperandAS;
956  }
957  }
958  } else
959  OperandAS = I->second;
960 
961  // join(flat, *) = flat. So we can break if NewAS is already flat.
962  NewAS = joinAddressSpaces(NewAS, OperandAS);
963  if (NewAS == FlatAddrSpace)
964  break;
965  }
966  }
967  }
968 
969  unsigned OldAS = InferredAddrSpace.lookup(&V);
970  assert(OldAS != FlatAddrSpace);
971  if (OldAS == NewAS)
972  return false;
973 
974  // If any updates are made, grabs its users to the worklist because
975  // their address spaces can also be possibly updated.
976  LLVM_DEBUG(dbgs() << " to " << NewAS << '\n');
977  InferredAddrSpace[&V] = NewAS;
978  return true;
979 }
980 
981 /// \p returns true if \p U is the pointer operand of a memory instruction with
982 /// a single pointer operand that can have its address space changed by simply
983 /// mutating the use to a new value. If the memory instruction is volatile,
984 /// return true only if the target allows the memory instruction to be volatile
985 /// in the new address space.
987  Use &U, unsigned AddrSpace) {
988  User *Inst = U.getUser();
989  unsigned OpNo = U.getOperandNo();
990  bool VolatileIsAllowed = false;
991  if (auto *I = dyn_cast<Instruction>(Inst))
992  VolatileIsAllowed = TTI.hasVolatileVariant(I, AddrSpace);
993 
994  if (auto *LI = dyn_cast<LoadInst>(Inst))
995  return OpNo == LoadInst::getPointerOperandIndex() &&
996  (VolatileIsAllowed || !LI->isVolatile());
997 
998  if (auto *SI = dyn_cast<StoreInst>(Inst))
999  return OpNo == StoreInst::getPointerOperandIndex() &&
1000  (VolatileIsAllowed || !SI->isVolatile());
1001 
1002  if (auto *RMW = dyn_cast<AtomicRMWInst>(Inst))
1003  return OpNo == AtomicRMWInst::getPointerOperandIndex() &&
1004  (VolatileIsAllowed || !RMW->isVolatile());
1005 
1006  if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst))
1007  return OpNo == AtomicCmpXchgInst::getPointerOperandIndex() &&
1008  (VolatileIsAllowed || !CmpX->isVolatile());
1009 
1010  return false;
1011 }
1012 
1013 /// Update memory intrinsic uses that require more complex processing than
1014 /// simple memory instructions. These require re-mangling and may have multiple
1015 /// pointer operands.
1017  Value *NewV) {
1018  IRBuilder<> B(MI);
1019  MDNode *TBAA = MI->getMetadata(LLVMContext::MD_tbaa);
1020  MDNode *ScopeMD = MI->getMetadata(LLVMContext::MD_alias_scope);
1021  MDNode *NoAliasMD = MI->getMetadata(LLVMContext::MD_noalias);
1022 
1023  if (auto *MSI = dyn_cast<MemSetInst>(MI)) {
1024  B.CreateMemSet(NewV, MSI->getValue(), MSI->getLength(),
1025  MaybeAlign(MSI->getDestAlignment()),
1026  false, // isVolatile
1027  TBAA, ScopeMD, NoAliasMD);
1028  } else if (auto *MTI = dyn_cast<MemTransferInst>(MI)) {
1029  Value *Src = MTI->getRawSource();
1030  Value *Dest = MTI->getRawDest();
1031 
1032  // Be careful in case this is a self-to-self copy.
1033  if (Src == OldV)
1034  Src = NewV;
1035 
1036  if (Dest == OldV)
1037  Dest = NewV;
1038 
1039  if (isa<MemCpyInlineInst>(MTI)) {
1040  MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct);
1041  B.CreateMemCpyInline(Dest, MTI->getDestAlign(), Src,
1042  MTI->getSourceAlign(), MTI->getLength(),
1043  false, // isVolatile
1044  TBAA, TBAAStruct, ScopeMD, NoAliasMD);
1045  } else if (isa<MemCpyInst>(MTI)) {
1046  MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct);
1047  B.CreateMemCpy(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(),
1048  MTI->getLength(),
1049  false, // isVolatile
1050  TBAA, TBAAStruct, ScopeMD, NoAliasMD);
1051  } else {
1052  assert(isa<MemMoveInst>(MTI));
1053  B.CreateMemMove(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(),
1054  MTI->getLength(),
1055  false, // isVolatile
1056  TBAA, ScopeMD, NoAliasMD);
1057  }
1058  } else
1059  llvm_unreachable("unhandled MemIntrinsic");
1060 
1061  MI->eraseFromParent();
1062  return true;
1063 }
1064 
1065 // \p returns true if it is OK to change the address space of constant \p C with
1066 // a ConstantExpr addrspacecast.
1067 bool InferAddressSpacesImpl::isSafeToCastConstAddrSpace(Constant *C,
1068  unsigned NewAS) const {
1070 
1071  unsigned SrcAS = C->getType()->getPointerAddressSpace();
1072  if (SrcAS == NewAS || isa<UndefValue>(C))
1073  return true;
1074 
1075  // Prevent illegal casts between different non-flat address spaces.
1076  if (SrcAS != FlatAddrSpace && NewAS != FlatAddrSpace)
1077  return false;
1078 
1079  if (isa<ConstantPointerNull>(C))
1080  return true;
1081 
1082  if (auto *Op = dyn_cast<Operator>(C)) {
1083  // If we already have a constant addrspacecast, it should be safe to cast it
1084  // off.
1085  if (Op->getOpcode() == Instruction::AddrSpaceCast)
1086  return isSafeToCastConstAddrSpace(cast<Constant>(Op->getOperand(0)), NewAS);
1087 
1088  if (Op->getOpcode() == Instruction::IntToPtr &&
1089  Op->getType()->getPointerAddressSpace() == FlatAddrSpace)
1090  return true;
1091  }
1092 
1093  return false;
1094 }
1095 
1097  Value::use_iterator End) {
1098  User *CurUser = I->getUser();
1099  ++I;
1100 
1101  while (I != End && I->getUser() == CurUser)
1102  ++I;
1103 
1104  return I;
1105 }
1106 
1107 bool InferAddressSpacesImpl::rewriteWithNewAddressSpaces(
1108  ArrayRef<WeakTrackingVH> Postorder,
1109  const ValueToAddrSpaceMapTy &InferredAddrSpace,
1110  const PredicatedAddrSpaceMapTy &PredicatedAS, Function *F) const {
1111  // For each address expression to be modified, creates a clone of it with its
1112  // pointer operands converted to the new address space. Since the pointer
1113  // operands are converted, the clone is naturally in the new address space by
1114  // construction.
1115  ValueToValueMapTy ValueWithNewAddrSpace;
1116  SmallVector<const Use *, 32> UndefUsesToFix;
1117  for (Value* V : Postorder) {
1118  unsigned NewAddrSpace = InferredAddrSpace.lookup(V);
1119 
1120  // In some degenerate cases (e.g. invalid IR in unreachable code), we may
1121  // not even infer the value to have its original address space.
1122  if (NewAddrSpace == UninitializedAddressSpace)
1123  continue;
1124 
1125  if (V->getType()->getPointerAddressSpace() != NewAddrSpace) {
1126  Value *New =
1127  cloneValueWithNewAddressSpace(V, NewAddrSpace, ValueWithNewAddrSpace,
1128  PredicatedAS, &UndefUsesToFix);
1129  if (New)
1130  ValueWithNewAddrSpace[V] = New;
1131  }
1132  }
1133 
1134  if (ValueWithNewAddrSpace.empty())
1135  return false;
1136 
1137  // Fixes all the undef uses generated by cloneInstructionWithNewAddressSpace.
1138  for (const Use *UndefUse : UndefUsesToFix) {
1139  User *V = UndefUse->getUser();
1140  User *NewV = cast_or_null<User>(ValueWithNewAddrSpace.lookup(V));
1141  if (!NewV)
1142  continue;
1143 
1144  unsigned OperandNo = UndefUse->getOperandNo();
1145  assert(isa<UndefValue>(NewV->getOperand(OperandNo)));
1146  NewV->setOperand(OperandNo, ValueWithNewAddrSpace.lookup(UndefUse->get()));
1147  }
1148 
1149  SmallVector<Instruction *, 16> DeadInstructions;
1150 
1151  // Replaces the uses of the old address expressions with the new ones.
1152  for (const WeakTrackingVH &WVH : Postorder) {
1153  assert(WVH && "value was unexpectedly deleted");
1154  Value *V = WVH;
1155  Value *NewV = ValueWithNewAddrSpace.lookup(V);
1156  if (NewV == nullptr)
1157  continue;
1158 
1159  LLVM_DEBUG(dbgs() << "Replacing the uses of " << *V << "\n with\n "
1160  << *NewV << '\n');
1161 
1162  if (Constant *C = dyn_cast<Constant>(V)) {
1163  Constant *Replace = ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
1164  C->getType());
1165  if (C != Replace) {
1166  LLVM_DEBUG(dbgs() << "Inserting replacement const cast: " << Replace
1167  << ": " << *Replace << '\n');
1168  C->replaceAllUsesWith(Replace);
1169  V = Replace;
1170  }
1171  }
1172 
1173  Value::use_iterator I, E, Next;
1174  for (I = V->use_begin(), E = V->use_end(); I != E; ) {
1175  Use &U = *I;
1176 
1177  // Some users may see the same pointer operand in multiple operands. Skip
1178  // to the next instruction.
1179  I = skipToNextUser(I, E);
1180 
1182  *TTI, U, V->getType()->getPointerAddressSpace())) {
1183  // If V is used as the pointer operand of a compatible memory operation,
1184  // sets the pointer operand to NewV. This replacement does not change
1185  // the element type, so the resultant load/store is still valid.
1186  U.set(NewV);
1187  continue;
1188  }
1189 
1190  User *CurUser = U.getUser();
1191  // Skip if the current user is the new value itself.
1192  if (CurUser == NewV)
1193  continue;
1194  // Handle more complex cases like intrinsic that need to be remangled.
1195  if (auto *MI = dyn_cast<MemIntrinsic>(CurUser)) {
1196  if (!MI->isVolatile() && handleMemIntrinsicPtrUse(MI, V, NewV))
1197  continue;
1198  }
1199 
1200  if (auto *II = dyn_cast<IntrinsicInst>(CurUser)) {
1201  if (rewriteIntrinsicOperands(II, V, NewV))
1202  continue;
1203  }
1204 
1205  if (isa<Instruction>(CurUser)) {
1206  if (ICmpInst *Cmp = dyn_cast<ICmpInst>(CurUser)) {
1207  // If we can infer that both pointers are in the same addrspace,
1208  // transform e.g.
1209  // %cmp = icmp eq float* %p, %q
1210  // into
1211  // %cmp = icmp eq float addrspace(3)* %new_p, %new_q
1212 
1213  unsigned NewAS = NewV->getType()->getPointerAddressSpace();
1214  int SrcIdx = U.getOperandNo();
1215  int OtherIdx = (SrcIdx == 0) ? 1 : 0;
1216  Value *OtherSrc = Cmp->getOperand(OtherIdx);
1217 
1218  if (Value *OtherNewV = ValueWithNewAddrSpace.lookup(OtherSrc)) {
1219  if (OtherNewV->getType()->getPointerAddressSpace() == NewAS) {
1220  Cmp->setOperand(OtherIdx, OtherNewV);
1221  Cmp->setOperand(SrcIdx, NewV);
1222  continue;
1223  }
1224  }
1225 
1226  // Even if the type mismatches, we can cast the constant.
1227  if (auto *KOtherSrc = dyn_cast<Constant>(OtherSrc)) {
1228  if (isSafeToCastConstAddrSpace(KOtherSrc, NewAS)) {
1229  Cmp->setOperand(SrcIdx, NewV);
1230  Cmp->setOperand(OtherIdx,
1231  ConstantExpr::getAddrSpaceCast(KOtherSrc, NewV->getType()));
1232  continue;
1233  }
1234  }
1235  }
1236 
1237  if (AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(CurUser)) {
1238  unsigned NewAS = NewV->getType()->getPointerAddressSpace();
1239  if (ASC->getDestAddressSpace() == NewAS) {
1240  if (!cast<PointerType>(ASC->getType())
1241  ->hasSameElementTypeAs(
1242  cast<PointerType>(NewV->getType()))) {
1243  BasicBlock::iterator InsertPos;
1244  if (Instruction *NewVInst = dyn_cast<Instruction>(NewV))
1245  InsertPos = std::next(NewVInst->getIterator());
1246  else if (Instruction *VInst = dyn_cast<Instruction>(V))
1247  InsertPos = std::next(VInst->getIterator());
1248  else
1249  InsertPos = ASC->getIterator();
1250 
1251  NewV = CastInst::Create(Instruction::BitCast, NewV,
1252  ASC->getType(), "", &*InsertPos);
1253  }
1254  ASC->replaceAllUsesWith(NewV);
1255  DeadInstructions.push_back(ASC);
1256  continue;
1257  }
1258  }
1259 
1260  // Otherwise, replaces the use with flat(NewV).
1261  if (Instruction *VInst = dyn_cast<Instruction>(V)) {
1262  // Don't create a copy of the original addrspacecast.
1263  if (U == V && isa<AddrSpaceCastInst>(V))
1264  continue;
1265 
1266  // Insert the addrspacecast after NewV.
1267  BasicBlock::iterator InsertPos;
1268  if (Instruction *NewVInst = dyn_cast<Instruction>(NewV))
1269  InsertPos = std::next(NewVInst->getIterator());
1270  else
1271  InsertPos = std::next(VInst->getIterator());
1272 
1273  while (isa<PHINode>(InsertPos))
1274  ++InsertPos;
1275  U.set(new AddrSpaceCastInst(NewV, V->getType(), "", &*InsertPos));
1276  } else {
1277  U.set(ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
1278  V->getType()));
1279  }
1280  }
1281  }
1282 
1283  if (V->use_empty()) {
1284  if (Instruction *I = dyn_cast<Instruction>(V))
1285  DeadInstructions.push_back(I);
1286  }
1287  }
1288 
1289  for (Instruction *I : DeadInstructions)
1291 
1292  return true;
1293 }
1294 
1296  if (skipFunction(F))
1297  return false;
1298 
1299  auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
1300  DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
1301  return InferAddressSpacesImpl(
1302  getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), DT,
1303  &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F),
1304  FlatAddrSpace)
1305  .run(F);
1306 }
1307 
1309  return new InferAddressSpaces(AddressSpace);
1310 }
1311 
1313  : FlatAddrSpace(UninitializedAddressSpace) {}
1315  : FlatAddrSpace(AddressSpace) {}
1316 
1319  bool Changed =
1320  InferAddressSpacesImpl(AM.getResult<AssumptionAnalysis>(F),
1322  &AM.getResult<TargetIRAnalysis>(F), FlatAddrSpace)
1323  .run(F);
1324  if (Changed) {
1325  PreservedAnalyses PA;
1326  PA.preserveSet<CFGAnalyses>();
1328  return PA;
1329  }
1330  return PreservedAnalyses::all();
1331 }
llvm::PreservedAnalyses
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
llvm::RecursivelyDeleteTriviallyDeadInstructions
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition: Local.cpp:517
AssumptionCache.h
llvm::TargetIRAnalysis
Analysis pass providing the TargetTransformInfo.
Definition: TargetTransformInfo.h:2461
llvm::GetElementPtrInst::setIsInBounds
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
Definition: Instructions.cpp:1828
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:104
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::Operator
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition: Operator.h:32
llvm::CastInst::CreatePointerBitCastOrAddrSpaceCast
static CastInst * CreatePointerBitCastOrAddrSpaceCast(Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd)
Create a BitCast or an AddrSpaceCast cast instruction.
Definition: Instructions.cpp:3309
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::WeakTrackingVH
Value handle that is nullable, but tries to track the Value.
Definition: ValueHandle.h:204
ValueMapper.h
llvm::CastInst::isNoopCast
static bool isNoopCast(Instruction::CastOps Opcode, Type *SrcTy, Type *DstTy, const DataLayout &DL)
A no-op cast is one that can be effected without changing any bits.
Definition: Instructions.cpp:2915
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::Intrinsic::getDeclaration
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1410
llvm::BasicBlock::iterator
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:87
llvm::BasicBlock::getParent
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:104
IntrinsicInst.h
llvm::Type::isPointerTy
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:218
llvm::AnalysisManager::getResult
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:780
Scalar.h
InstIterator.h
llvm::Function
Definition: Function.h:60
Pass.h
llvm::IntrinsicInst::getIntrinsicID
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:53
llvm::CallBase::setCalledFunction
void setCalledFunction(Function *Fn)
Sets the function called, including updating the function type.
Definition: InstrTypes.h:1435
InferAddressSpaces.h
llvm::BitCastInst
This class represents a no-op cast from one type to another.
Definition: Instructions.h:5225
C1
instcombine should handle this C2 when C1
Definition: README.txt:263
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1185
llvm::PHINode::getOperandNumForIncomingValue
static unsigned getOperandNumForIncomingValue(unsigned i)
Definition: Instructions.h:2770
ErrorHandling.h
llvm::TargetTransformInfo
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Definition: TargetTransformInfo.h:167
llvm::Type::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:729
llvm::IRBuilder<>
llvm::Use::get
Value * get() const
Definition: Use.h:66
llvm::CastInst::Create
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Definition: Instructions.cpp:3180
llvm::ConstantExpr::getBitCast
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2258
llvm::TargetTransformInfo::getAssumedAddrSpace
unsigned getAssumedAddrSpace(const Value *V) const
Definition: TargetTransformInfo.cpp:265
ValueTracking.h
Local.h
llvm::ValueMap::empty
bool empty() const
Definition: ValueMap.h:140
llvm::ConstantExpr::getSelect
static Constant * getSelect(Constant *C, Constant *V1, Constant *V2, Type *OnlyIfReducedTy=nullptr)
Select constant expr.
Definition: Constants.cpp:2445
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
llvm::CallBase::getArgOperandUse
const Use & getArgOperandUse(unsigned i) const
Wrappers for getting the Use of a call argument.
Definition: InstrTypes.h:1352
INITIALIZE_PASS_BEGIN
INITIALIZE_PASS_BEGIN(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces", false, false) INITIALIZE_PASS_END(InferAddressSpaces
isNoopPtrIntCastPair
Infer address static false bool isNoopPtrIntCastPair(const Operator *I2P, const DataLayout &DL, const TargetTransformInfo *TTI)
Definition: InferAddressSpaces.cpp:262
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
DenseMap.h
llvm::AtomicRMWInst::getPointerOperandIndex
static unsigned getPointerOperandIndex()
Definition: Instructions.h:868
llvm::LoadInst::getPointerOperandIndex
static unsigned getPointerOperandIndex()
Definition: Instructions.h:270
llvm::Use::getOperandNo
unsigned getOperandNo() const
Return the operand # of this use in its User.
Definition: Use.cpp:31
llvm::MemIntrinsic
This is the common base class for memset/memcpy/memmove.
Definition: IntrinsicInst.h:962
Operator.h
llvm::detail::DenseSetImpl::insert
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
llvm::cl::ReallyHidden
@ ReallyHidden
Definition: CommandLine.h:140
Use.h
operandWithNewAddressSpaceOrCreateUndef
static Value * operandWithNewAddressSpaceOrCreateUndef(const Use &OperandUse, unsigned NewAddrSpace, const ValueToValueMapTy &ValueWithNewAddrSpace, const PredicatedAddrSpaceMapTy &PredicatedAS, SmallVectorImpl< const Use * > *UndefUsesToFix)
Definition: InferAddressSpaces.cpp:525
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Instruction.h
CommandLine.h
llvm::createInferAddressSpacesPass
FunctionPass * createInferAddressSpacesPass(unsigned AddressSpace=~0u)
Definition: InferAddressSpaces.cpp:1308
llvm::isValidAssumeForContext
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
Definition: ValueTracking.cpp:566
llvm::SelectInst::Create
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
Definition: Instructions.h:1768
llvm::AddrSpaceCastInst
This class represents a conversion between pointers from one address space to another.
Definition: Instructions.h:5265
Constants.h
llvm::Value::use_iterator
use_iterator_impl< Use > use_iterator
Definition: Value.h:353
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::User
Definition: User.h:44
Intrinsics.h
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
cloneConstantExprWithNewAddressSpace
static Value * cloneConstantExprWithNewAddressSpace(ConstantExpr *CE, unsigned NewAddrSpace, const ValueToValueMapTy &ValueWithNewAddrSpace, const DataLayout *DL, const TargetTransformInfo *TTI)
Definition: InferAddressSpaces.cpp:672
DenseSet.h
false
Definition: StackSlotColoring.cpp:141
llvm::dwarf::Index
Index
Definition: Dwarf.h:472
llvm::MaybeAlign
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:109
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::Instruction::CastOps
CastOps
Definition: Instruction.h:800
llvm::Instruction
Definition: Instruction.h:42
llvm::DominatorTreeWrapperPass
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:302
llvm::UndefValue::get
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1769
llvm::Use::getUser
User * getUser() const
Returns the User that contains this Use.
Definition: Use.h:72
llvm::MCID::Call
@ Call
Definition: MCInstrDesc.h:155
llvm::AddressSpace
AddressSpace
Definition: NVPTXBaseInfo.h:21
llvm::PHINode::getNumIncomingValues
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: Instructions.h:2756
llvm::Value::use_empty
bool use_empty() const
Definition: Value.h:344
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
Type.h
INITIALIZE_PASS_END
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:58
llvm::TargetTransformInfo::hasVolatileVariant
bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const
Return true if the given instruction (assumed to be a memory access instruction) has a volatile varia...
Definition: TargetTransformInfo.cpp:440
llvm::dxil::PointerTypeAnalysis::run
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
Definition: PointerTypeAnalysis.cpp:101
DEBUG_TYPE
#define DEBUG_TYPE
Definition: InferAddressSpaces.cpp:136
llvm::TargetTransformInfo::getFlatAddressSpace
unsigned getFlatAddressSpace() const
Returns the address space ID for a target's 'flat' address space.
Definition: TargetTransformInfo.cpp:246
llvm::DenseSet< Value * >
llvm::Use::set
void set(Value *Val)
Definition: Value.h:868
BasicBlock.h
llvm::cl::opt< bool >
llvm::instructions
inst_range instructions(Function *F)
Definition: InstIterator.h:133
llvm::Constant
This is an important base class in LLVM.
Definition: Constant.h:41
llvm::ICmpInst
This instruction compares its operands according to the predicate given to the constructor.
Definition: Instructions.h:1186
llvm::TargetTransformInfo::collectFlatAddressOperands
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
Return any intrinsic address operand indexes which may be rewritten if they use a flat address space ...
Definition: TargetTransformInfo.cpp:250
llvm::TargetTransformInfoWrapperPass
Wrapper pass for TargetTransformInfo.
Definition: TargetTransformInfo.h:2517
llvm::GlobalValue::getParent
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:577
const
aarch64 promote const
Definition: AArch64PromoteConstant.cpp:232
llvm::AssumptionAnalysis
A function analysis which provides an AssumptionCache.
Definition: AssumptionCache.h:173
llvm::PreservedAnalyses::preserve
void preserve()
Mark an analysis as preserved.
Definition: PassManager.h:173
INITIALIZE_PASS_DEPENDENCY
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
llvm::ConstantExpr::getAddrSpaceCast
static Constant * getAddrSpaceCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2270
llvm::PHINode::addIncoming
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Definition: Instructions.h:2814
llvm::DenseMap< const Value *, unsigned >
isAddressExpression
static bool isAddressExpression(const Value &V, const DataLayout &DL, const TargetTransformInfo *TTI)
Definition: InferAddressSpaces.cpp:296
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::GetElementPtrInst
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:929
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
llvm::AtomicCmpXchgInst::getPointerOperandIndex
static unsigned getPointerOperandIndex()
Definition: Instructions.h:654
ArrayRef.h
llvm::TargetTransformInfo::rewriteIntrinsicWithAddressSpace
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const
Rewrite intrinsic call II such that OldV will be replaced with NewV, which has a different address sp...
Definition: TargetTransformInfo.cpp:274
IRBuilder.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI
StandardInstrumentations SI(Debug, VerifyEach)
llvm::Value::use_begin
use_iterator use_begin()
Definition: Value.h:360
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
llvm::MDNode
Metadata node.
Definition: Metadata.h:937
llvm::User::setOperand
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
llvm::GetElementPtrInst::Create
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:955
llvm::AssumptionCacheTracker
An immutable pass that tracks lazily created AssumptionCache objects.
Definition: AssumptionCache.h:202
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::AnalysisUsage::setPreservesCFG
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:263
llvm::AssumptionCache
A cache of @llvm.assume calls within a function.
Definition: AssumptionCache.h:42
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
llvm::TargetTransformInfo::getPredicatedAddrSpace
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
Definition: TargetTransformInfo.cpp:270
llvm::CFGAnalyses
Represents analyses that only rely on functions' control flow.
Definition: PassManager.h:113
llvm::AnalysisUsage::addPreserved
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
Definition: PassAnalysisSupport.h:98
llvm::Value::replaceAllUsesWith
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:529
llvm::ms_demangle::IntrinsicFunctionKind::New
@ New
Compiler.h
llvm::Value::use_end
use_iterator use_end()
Definition: Value.h:368
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::ValueMap< const Value *, WeakTrackingVH >
ValueHandle.h
skipToNextUser
static Value::use_iterator skipToNextUser(Value::use_iterator I, Value::use_iterator End)
Definition: InferAddressSpaces.cpp:1096
llvm::CallBase::setArgOperand
void setArgOperand(unsigned i, Value *v)
Definition: InstrTypes.h:1346
llvm::Value::stripInBoundsOffsets
const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition: Value.cpp:774
llvm::MCID::Select
@ Select
Definition: MCInstrDesc.h:164
runOnFunction
static bool runOnFunction(Function &F, bool PostInlining)
Definition: EntryExitInstrumenter.cpp:69
Constant.h
llvm::ConstantExpr
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:971
getPointerOperands
static SmallVector< Value *, 2 > getPointerOperands(const Value &V, const DataLayout &DL, const TargetTransformInfo *TTI)
Definition: InferAddressSpaces.cpp:328
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:341
llvm::PreservedAnalyses::all
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:158
llvm::PHINode::Create
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Definition: Instructions.h:2706
llvm::ArrayRef::begin
iterator begin() const
Definition: ArrayRef.h:152
llvm::X86::FirstMacroFusionInstKind::Cmp
@ Cmp
llvm::TargetTransformInfo::isNoopAddrSpaceCast
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Definition: TargetTransformInfo.cpp:255
llvm::AnalysisManager::getCachedResult
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:799
Casting.h
Function.h
PassManager.h
isSimplePointerUseValidToReplace
static bool isSimplePointerUseValidToReplace(const TargetTransformInfo &TTI, Use &U, unsigned AddrSpace)
returns true if U is the pointer operand of a memory instruction with a single pointer operand that c...
Definition: InferAddressSpaces.cpp:986
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:46
llvm::DominatorTreeAnalysis
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:267
Instructions.h
llvm::PreservedAnalyses::preserveSet
void preserveSet()
Mark an analysis set as preserved.
Definition: PassManager.h:188
SmallVector.h
User.h
handleMemIntrinsicPtrUse
static bool handleMemIntrinsicPtrUse(MemIntrinsic *MI, Value *OldV, Value *NewV)
Update memory intrinsic uses that require more complex processing than simple memory instructions.
Definition: InferAddressSpaces.cpp:1016
Dominators.h
llvm::PointerType::getWithSamePointeeType
static PointerType * getWithSamePointeeType(PointerType *PT, unsigned AddressSpace)
This constructs a pointer type with the same pointee type as input PointerType (or opaque pointer is ...
Definition: DerivedTypes.h:666
llvm::CallBase::getArgOperand
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1341
llvm::Instruction::getParent
const BasicBlock * getParent() const
Definition: Instruction.h:91
spaces
Infer address spaces
Definition: InferAddressSpaces.cpp:256
llvm::PHINode::getIncomingBlock
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Definition: Instructions.h:2780
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
TargetTransformInfo.h
llvm::PHINode
Definition: Instructions.h:2664
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
llvm::AnalysisManager
A container for analyses that lazily runs them and caches their results.
Definition: InstructionSimplify.h:42
AssumeDefaultIsFlatAddressSpace
static cl::opt< bool > AssumeDefaultIsFlatAddressSpace("assume-default-is-flat-addrspace", cl::init(false), cl::ReallyHidden, cl::desc("The default address space is assumed as the flat address space. " "This is mainly for test purpose."))
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:308
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition: Instructions.h:1474
UninitializedAddressSpace
static const unsigned UninitializedAddressSpace
Definition: InferAddressSpaces.cpp:145
GEP
Hexagon Common GEP
Definition: HexagonCommonGEP.cpp:172
llvm::ValueMap::lookup
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: ValueMap.h:165
llvm::AnalysisUsage::addRequired
AnalysisUsage & addRequired()
Definition: PassAnalysisSupport.h:75
LLVMContext.h
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::cl::desc
Definition: CommandLine.h:405
raw_ostream.h
llvm::StoreInst::getPointerOperandIndex
static unsigned getPointerOperandIndex()
Definition: Instructions.h:404
llvm::SetVector< Value * >
Value.h
InitializePasses.h
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::InferAddressSpacesPass::run
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition: InferAddressSpaces.cpp:1317
llvm::InferAddressSpacesPass::InferAddressSpacesPass
InferAddressSpacesPass()
Definition: InferAddressSpaces.cpp:1312
Debug.h
llvm::Value::users
iterator_range< user_iterator > users()
Definition: Value.h:421
llvm::ArrayRef::end
iterator end() const
Definition: ArrayRef.h:153
SetVector.h
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:37