LLVM 19.0.0git
RewriteStatepointsForGC.cpp
Go to the documentation of this file.
1//===- RewriteStatepointsForGC.cpp - Make GC relocations explicit ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Rewrite call/invoke instructions so as to make potential relocations
10// performed by the garbage collector explicit in the IR.
11//
12//===----------------------------------------------------------------------===//
13
15
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/MapVector.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/Sequence.h"
22#include "llvm/ADT/SetVector.h"
23#include "llvm/ADT/SmallSet.h"
25#include "llvm/ADT/StringRef.h"
30#include "llvm/IR/Argument.h"
32#include "llvm/IR/Attributes.h"
33#include "llvm/IR/BasicBlock.h"
34#include "llvm/IR/CallingConv.h"
35#include "llvm/IR/Constant.h"
36#include "llvm/IR/Constants.h"
37#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/Dominators.h"
40#include "llvm/IR/Function.h"
41#include "llvm/IR/GCStrategy.h"
42#include "llvm/IR/IRBuilder.h"
44#include "llvm/IR/InstrTypes.h"
45#include "llvm/IR/Instruction.h"
48#include "llvm/IR/Intrinsics.h"
49#include "llvm/IR/LLVMContext.h"
50#include "llvm/IR/MDBuilder.h"
51#include "llvm/IR/Metadata.h"
52#include "llvm/IR/Module.h"
53#include "llvm/IR/Statepoint.h"
54#include "llvm/IR/Type.h"
55#include "llvm/IR/User.h"
56#include "llvm/IR/Value.h"
57#include "llvm/IR/ValueHandle.h"
61#include "llvm/Support/Debug.h"
67#include <algorithm>
68#include <cassert>
69#include <cstddef>
70#include <cstdint>
71#include <iterator>
72#include <optional>
73#include <set>
74#include <string>
75#include <utility>
76#include <vector>
77
78#define DEBUG_TYPE "rewrite-statepoints-for-gc"
79
80using namespace llvm;
81
82// Print the liveset found at the insert location
83static cl::opt<bool> PrintLiveSet("spp-print-liveset", cl::Hidden,
84 cl::init(false));
85static cl::opt<bool> PrintLiveSetSize("spp-print-liveset-size", cl::Hidden,
86 cl::init(false));
87
88// Print out the base pointers for debugging
89static cl::opt<bool> PrintBasePointers("spp-print-base-pointers", cl::Hidden,
90 cl::init(false));
91
92// Cost threshold measuring when it is profitable to rematerialize value instead
93// of relocating it
95RematerializationThreshold("spp-rematerialization-threshold", cl::Hidden,
96 cl::init(6));
97
98#ifdef EXPENSIVE_CHECKS
99static bool ClobberNonLive = true;
100#else
101static bool ClobberNonLive = false;
102#endif
103
104static cl::opt<bool, true> ClobberNonLiveOverride("rs4gc-clobber-non-live",
106 cl::Hidden);
107
108static cl::opt<bool>
109 AllowStatepointWithNoDeoptInfo("rs4gc-allow-statepoint-with-no-deopt-info",
110 cl::Hidden, cl::init(true));
111
112static cl::opt<bool> RematDerivedAtUses("rs4gc-remat-derived-at-uses",
113 cl::Hidden, cl::init(true));
114
115/// The IR fed into RewriteStatepointsForGC may have had attributes and
116/// metadata implying dereferenceability that are no longer valid/correct after
117/// RewriteStatepointsForGC has run. This is because semantically, after
118/// RewriteStatepointsForGC runs, all calls to gc.statepoint "free" the entire
119/// heap. stripNonValidData (conservatively) restores
120/// correctness by erasing all attributes in the module that externally imply
121/// dereferenceability. Similar reasoning also applies to the noalias
122/// attributes and metadata. gc.statepoint can touch the entire heap including
123/// noalias objects.
124/// Apart from attributes and metadata, we also remove instructions that imply
125/// constant physical memory: llvm.invariant.start.
126static void stripNonValidData(Module &M);
127
128// Find the GC strategy for a function, or null if it doesn't have one.
129static std::unique_ptr<GCStrategy> findGCStrategy(Function &F);
130
132
135 bool Changed = false;
136 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
137 for (Function &F : M) {
138 // Nothing to do for declarations.
139 if (F.isDeclaration() || F.empty())
140 continue;
141
142 // Policy choice says not to rewrite - the most common reason is that we're
143 // compiling code without a GCStrategy.
145 continue;
146
149 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
150 Changed |= runOnFunction(F, DT, TTI, TLI);
151 }
152 if (!Changed)
153 return PreservedAnalyses::all();
154
155 // stripNonValidData asserts that shouldRewriteStatepointsIn
156 // returns true for at least one function in the module. Since at least
157 // one function changed, we know that the precondition is satisfied.
159
163 return PA;
164}
165
166namespace {
167
168struct GCPtrLivenessData {
169 /// Values defined in this block.
171
172 /// Values used in this block (and thus live); does not included values
173 /// killed within this block.
175
176 /// Values live into this basic block (i.e. used by any
177 /// instruction in this basic block or ones reachable from here)
179
180 /// Values live out of this basic block (i.e. live into
181 /// any successor block)
183};
184
185// The type of the internal cache used inside the findBasePointers family
186// of functions. From the callers perspective, this is an opaque type and
187// should not be inspected.
188//
189// In the actual implementation this caches two relations:
190// - The base relation itself (i.e. this pointer is based on that one)
191// - The base defining value relation (i.e. before base_phi insertion)
192// Generally, after the execution of a full findBasePointer call, only the
193// base relation will remain. Internally, we add a mixture of the two
194// types, then update all the second type to the first type
195using DefiningValueMapTy = MapVector<Value *, Value *>;
196using IsKnownBaseMapTy = MapVector<Value *, bool>;
197using PointerToBaseTy = MapVector<Value *, Value *>;
198using StatepointLiveSetTy = SetVector<Value *>;
199using RematerializedValueMapTy =
201
202struct PartiallyConstructedSafepointRecord {
203 /// The set of values known to be live across this safepoint
204 StatepointLiveSetTy LiveSet;
205
206 /// The *new* gc.statepoint instruction itself. This produces the token
207 /// that normal path gc.relocates and the gc.result are tied to.
208 GCStatepointInst *StatepointToken;
209
210 /// Instruction to which exceptional gc relocates are attached
211 /// Makes it easier to iterate through them during relocationViaAlloca.
212 Instruction *UnwindToken;
213
214 /// Record live values we are rematerialized instead of relocating.
215 /// They are not included into 'LiveSet' field.
216 /// Maps rematerialized copy to it's original value.
217 RematerializedValueMapTy RematerializedValues;
218};
219
220struct RematerizlizationCandidateRecord {
221 // Chain from derived pointer to base.
223 // Original base.
224 Value *RootOfChain;
225 // Cost of chain.
227};
229
230} // end anonymous namespace
231
233 std::optional<OperandBundleUse> DeoptBundle =
234 Call->getOperandBundle(LLVMContext::OB_deopt);
235
236 if (!DeoptBundle) {
238 "Found non-leaf call without deopt info!");
239 return std::nullopt;
240 }
241
242 return DeoptBundle->Inputs;
243}
244
245/// Compute the live-in set for every basic block in the function
247 GCPtrLivenessData &Data, GCStrategy *GC);
248
249/// Given results from the dataflow liveness computation, find the set of live
250/// Values at a particular instruction.
251static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data,
252 StatepointLiveSetTy &out, GCStrategy *GC);
253
254static bool isGCPointerType(Type *T, GCStrategy *GC) {
255 assert(GC && "GC Strategy for isGCPointerType cannot be null");
256
257 if (!isa<PointerType>(T))
258 return false;
259
260 // conservative - same as StatepointLowering
261 return GC->isGCManagedPointer(T).value_or(true);
262}
263
264// Return true if this type is one which a) is a gc pointer or contains a GC
265// pointer and b) is of a type this code expects to encounter as a live value.
266// (The insertion code will assert that a type which matches (a) and not (b)
267// is not encountered.)
269 // We fully support gc pointers
270 if (isGCPointerType(T, GC))
271 return true;
272 // We partially support vectors of gc pointers. The code will assert if it
273 // can't handle something.
274 if (auto VT = dyn_cast<VectorType>(T))
275 if (isGCPointerType(VT->getElementType(), GC))
276 return true;
277 return false;
278}
279
280#ifndef NDEBUG
281/// Returns true if this type contains a gc pointer whether we know how to
282/// handle that type or not.
283static bool containsGCPtrType(Type *Ty, GCStrategy *GC) {
284 if (isGCPointerType(Ty, GC))
285 return true;
286 if (VectorType *VT = dyn_cast<VectorType>(Ty))
287 return isGCPointerType(VT->getScalarType(), GC);
288 if (ArrayType *AT = dyn_cast<ArrayType>(Ty))
289 return containsGCPtrType(AT->getElementType(), GC);
290 if (StructType *ST = dyn_cast<StructType>(Ty))
291 return llvm::any_of(ST->elements(),
292 [GC](Type *Ty) { return containsGCPtrType(Ty, GC); });
293 return false;
294}
295
296// Returns true if this is a type which a) is a gc pointer or contains a GC
297// pointer and b) is of a type which the code doesn't expect (i.e. first class
298// aggregates). Used to trip assertions.
300 return containsGCPtrType(Ty, GC) && !isHandledGCPointerType(Ty, GC);
301}
302#endif
303
304// Return the name of the value suffixed with the provided value, or if the
305// value didn't have a name, the default value specified.
306static std::string suffixed_name_or(Value *V, StringRef Suffix,
307 StringRef DefaultName) {
308 return V->hasName() ? (V->getName() + Suffix).str() : DefaultName.str();
309}
310
311// Conservatively identifies any definitions which might be live at the
312// given instruction. The analysis is performed immediately before the
313// given instruction. Values defined by that instruction are not considered
314// live. Values used by that instruction are considered live.
316 DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData, CallBase *Call,
317 PartiallyConstructedSafepointRecord &Result, GCStrategy *GC) {
318 StatepointLiveSetTy LiveSet;
319 findLiveSetAtInst(Call, OriginalLivenessData, LiveSet, GC);
320
321 if (PrintLiveSet) {
322 dbgs() << "Live Variables:\n";
323 for (Value *V : LiveSet)
324 dbgs() << " " << V->getName() << " " << *V << "\n";
325 }
326 if (PrintLiveSetSize) {
327 dbgs() << "Safepoint For: " << Call->getCalledOperand()->getName() << "\n";
328 dbgs() << "Number live values: " << LiveSet.size() << "\n";
329 }
330 Result.LiveSet = LiveSet;
331}
332
333/// Returns true if V is a known base.
334static bool isKnownBase(Value *V, const IsKnownBaseMapTy &KnownBases);
335
336/// Caches the IsKnownBase flag for a value and asserts that it wasn't present
337/// in the cache before.
338static void setKnownBase(Value *V, bool IsKnownBase,
339 IsKnownBaseMapTy &KnownBases);
340
341static Value *findBaseDefiningValue(Value *I, DefiningValueMapTy &Cache,
342 IsKnownBaseMapTy &KnownBases);
343
344/// Return a base defining value for the 'Index' element of the given vector
345/// instruction 'I'. If Index is null, returns a BDV for the entire vector
346/// 'I'. As an optimization, this method will try to determine when the
347/// element is known to already be a base pointer. If this can be established,
348/// the second value in the returned pair will be true. Note that either a
349/// vector or a pointer typed value can be returned. For the former, the
350/// vector returned is a BDV (and possibly a base) of the entire vector 'I'.
351/// If the later, the return pointer is a BDV (or possibly a base) for the
352/// particular element in 'I'.
353static Value *findBaseDefiningValueOfVector(Value *I, DefiningValueMapTy &Cache,
354 IsKnownBaseMapTy &KnownBases) {
355 // Each case parallels findBaseDefiningValue below, see that code for
356 // detailed motivation.
357
358 auto Cached = Cache.find(I);
359 if (Cached != Cache.end())
360 return Cached->second;
361
362 if (isa<Argument>(I)) {
363 // An incoming argument to the function is a base pointer
364 Cache[I] = I;
365 setKnownBase(I, /* IsKnownBase */true, KnownBases);
366 return I;
367 }
368
369 if (isa<Constant>(I)) {
370 // Base of constant vector consists only of constant null pointers.
371 // For reasoning see similar case inside 'findBaseDefiningValue' function.
372 auto *CAZ = ConstantAggregateZero::get(I->getType());
373 Cache[I] = CAZ;
374 setKnownBase(CAZ, /* IsKnownBase */true, KnownBases);
375 return CAZ;
376 }
377
378 if (isa<LoadInst>(I)) {
379 Cache[I] = I;
380 setKnownBase(I, /* IsKnownBase */true, KnownBases);
381 return I;
382 }
383
384 if (isa<InsertElementInst>(I)) {
385 // We don't know whether this vector contains entirely base pointers or
386 // not. To be conservatively correct, we treat it as a BDV and will
387 // duplicate code as needed to construct a parallel vector of bases.
388 Cache[I] = I;
389 setKnownBase(I, /* IsKnownBase */false, KnownBases);
390 return I;
391 }
392
393 if (isa<ShuffleVectorInst>(I)) {
394 // We don't know whether this vector contains entirely base pointers or
395 // not. To be conservatively correct, we treat it as a BDV and will
396 // duplicate code as needed to construct a parallel vector of bases.
397 // TODO: There a number of local optimizations which could be applied here
398 // for particular sufflevector patterns.
399 Cache[I] = I;
400 setKnownBase(I, /* IsKnownBase */false, KnownBases);
401 return I;
402 }
403
404 // The behavior of getelementptr instructions is the same for vector and
405 // non-vector data types.
406 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
407 auto *BDV =
408 findBaseDefiningValue(GEP->getPointerOperand(), Cache, KnownBases);
409 Cache[GEP] = BDV;
410 return BDV;
411 }
412
413 // The behavior of freeze instructions is the same for vector and
414 // non-vector data types.
415 if (auto *Freeze = dyn_cast<FreezeInst>(I)) {
416 auto *BDV = findBaseDefiningValue(Freeze->getOperand(0), Cache, KnownBases);
417 Cache[Freeze] = BDV;
418 return BDV;
419 }
420
421 // If the pointer comes through a bitcast of a vector of pointers to
422 // a vector of another type of pointer, then look through the bitcast
423 if (auto *BC = dyn_cast<BitCastInst>(I)) {
424 auto *BDV = findBaseDefiningValue(BC->getOperand(0), Cache, KnownBases);
425 Cache[BC] = BDV;
426 return BDV;
427 }
428
429 // We assume that functions in the source language only return base
430 // pointers. This should probably be generalized via attributes to support
431 // both source language and internal functions.
432 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
433 Cache[I] = I;
434 setKnownBase(I, /* IsKnownBase */true, KnownBases);
435 return I;
436 }
437
438 // A PHI or Select is a base defining value. The outer findBasePointer
439 // algorithm is responsible for constructing a base value for this BDV.
440 assert((isa<SelectInst>(I) || isa<PHINode>(I)) &&
441 "unknown vector instruction - no base found for vector element");
442 Cache[I] = I;
443 setKnownBase(I, /* IsKnownBase */false, KnownBases);
444 return I;
445}
446
447/// Helper function for findBasePointer - Will return a value which either a)
448/// defines the base pointer for the input, b) blocks the simple search
449/// (i.e. a PHI or Select of two derived pointers), or c) involves a change
450/// from pointer to vector type or back.
451static Value *findBaseDefiningValue(Value *I, DefiningValueMapTy &Cache,
452 IsKnownBaseMapTy &KnownBases) {
453 assert(I->getType()->isPtrOrPtrVectorTy() &&
454 "Illegal to ask for the base pointer of a non-pointer type");
455 auto Cached = Cache.find(I);
456 if (Cached != Cache.end())
457 return Cached->second;
458
459 if (I->getType()->isVectorTy())
460 return findBaseDefiningValueOfVector(I, Cache, KnownBases);
461
462 if (isa<Argument>(I)) {
463 // An incoming argument to the function is a base pointer
464 // We should have never reached here if this argument isn't an gc value
465 Cache[I] = I;
466 setKnownBase(I, /* IsKnownBase */true, KnownBases);
467 return I;
468 }
469
470 if (isa<Constant>(I)) {
471 // We assume that objects with a constant base (e.g. a global) can't move
472 // and don't need to be reported to the collector because they are always
473 // live. Besides global references, all kinds of constants (e.g. undef,
474 // constant expressions, null pointers) can be introduced by the inliner or
475 // the optimizer, especially on dynamically dead paths.
476 // Here we treat all of them as having single null base. By doing this we
477 // trying to avoid problems reporting various conflicts in a form of
478 // "phi (const1, const2)" or "phi (const, regular gc ptr)".
479 // See constant.ll file for relevant test cases.
480
481 auto *CPN = ConstantPointerNull::get(cast<PointerType>(I->getType()));
482 Cache[I] = CPN;
483 setKnownBase(CPN, /* IsKnownBase */true, KnownBases);
484 return CPN;
485 }
486
487 // inttoptrs in an integral address space are currently ill-defined. We
488 // treat them as defining base pointers here for consistency with the
489 // constant rule above and because we don't really have a better semantic
490 // to give them. Note that the optimizer is always free to insert undefined
491 // behavior on dynamically dead paths as well.
492 if (isa<IntToPtrInst>(I)) {
493 Cache[I] = I;
494 setKnownBase(I, /* IsKnownBase */true, KnownBases);
495 return I;
496 }
497
498 if (CastInst *CI = dyn_cast<CastInst>(I)) {
499 Value *Def = CI->stripPointerCasts();
500 // If stripping pointer casts changes the address space there is an
501 // addrspacecast in between.
502 assert(cast<PointerType>(Def->getType())->getAddressSpace() ==
503 cast<PointerType>(CI->getType())->getAddressSpace() &&
504 "unsupported addrspacecast");
505 // If we find a cast instruction here, it means we've found a cast which is
506 // not simply a pointer cast (i.e. an inttoptr). We don't know how to
507 // handle int->ptr conversion.
508 assert(!isa<CastInst>(Def) && "shouldn't find another cast here");
509 auto *BDV = findBaseDefiningValue(Def, Cache, KnownBases);
510 Cache[CI] = BDV;
511 return BDV;
512 }
513
514 if (isa<LoadInst>(I)) {
515 // The value loaded is an gc base itself
516 Cache[I] = I;
517 setKnownBase(I, /* IsKnownBase */true, KnownBases);
518 return I;
519 }
520
521 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
522 // The base of this GEP is the base
523 auto *BDV =
524 findBaseDefiningValue(GEP->getPointerOperand(), Cache, KnownBases);
525 Cache[GEP] = BDV;
526 return BDV;
527 }
528
529 if (auto *Freeze = dyn_cast<FreezeInst>(I)) {
530 auto *BDV = findBaseDefiningValue(Freeze->getOperand(0), Cache, KnownBases);
531 Cache[Freeze] = BDV;
532 return BDV;
533 }
534
535 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
536 switch (II->getIntrinsicID()) {
537 default:
538 // fall through to general call handling
539 break;
540 case Intrinsic::experimental_gc_statepoint:
541 llvm_unreachable("statepoints don't produce pointers");
542 case Intrinsic::experimental_gc_relocate:
543 // Rerunning safepoint insertion after safepoints are already
544 // inserted is not supported. It could probably be made to work,
545 // but why are you doing this? There's no good reason.
546 llvm_unreachable("repeat safepoint insertion is not supported");
547 case Intrinsic::gcroot:
548 // Currently, this mechanism hasn't been extended to work with gcroot.
549 // There's no reason it couldn't be, but I haven't thought about the
550 // implications much.
552 "interaction with the gcroot mechanism is not supported");
553 case Intrinsic::experimental_gc_get_pointer_base:
554 auto *BDV = findBaseDefiningValue(II->getOperand(0), Cache, KnownBases);
555 Cache[II] = BDV;
556 return BDV;
557 }
558 }
559 // We assume that functions in the source language only return base
560 // pointers. This should probably be generalized via attributes to support
561 // both source language and internal functions.
562 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
563 Cache[I] = I;
564 setKnownBase(I, /* IsKnownBase */true, KnownBases);
565 return I;
566 }
567
568 // TODO: I have absolutely no idea how to implement this part yet. It's not
569 // necessarily hard, I just haven't really looked at it yet.
570 assert(!isa<LandingPadInst>(I) && "Landing Pad is unimplemented");
571
572 if (isa<AtomicCmpXchgInst>(I)) {
573 // A CAS is effectively a atomic store and load combined under a
574 // predicate. From the perspective of base pointers, we just treat it
575 // like a load.
576 Cache[I] = I;
577 setKnownBase(I, /* IsKnownBase */true, KnownBases);
578 return I;
579 }
580
581 assert(!isa<AtomicRMWInst>(I) && "Xchg handled above, all others are "
582 "binary ops which don't apply to pointers");
583
584 // The aggregate ops. Aggregates can either be in the heap or on the
585 // stack, but in either case, this is simply a field load. As a result,
586 // this is a defining definition of the base just like a load is.
587 if (isa<ExtractValueInst>(I)) {
588 Cache[I] = I;
589 setKnownBase(I, /* IsKnownBase */true, KnownBases);
590 return I;
591 }
592
593 // We should never see an insert vector since that would require we be
594 // tracing back a struct value not a pointer value.
595 assert(!isa<InsertValueInst>(I) &&
596 "Base pointer for a struct is meaningless");
597
598 // This value might have been generated by findBasePointer() called when
599 // substituting gc.get.pointer.base() intrinsic.
600 bool IsKnownBase =
601 isa<Instruction>(I) && cast<Instruction>(I)->getMetadata("is_base_value");
602 setKnownBase(I, /* IsKnownBase */IsKnownBase, KnownBases);
603 Cache[I] = I;
604
605 // An extractelement produces a base result exactly when it's input does.
606 // We may need to insert a parallel instruction to extract the appropriate
607 // element out of the base vector corresponding to the input. Given this,
608 // it's analogous to the phi and select case even though it's not a merge.
609 if (isa<ExtractElementInst>(I))
610 // Note: There a lot of obvious peephole cases here. This are deliberately
611 // handled after the main base pointer inference algorithm to make writing
612 // test cases to exercise that code easier.
613 return I;
614
615 // The last two cases here don't return a base pointer. Instead, they
616 // return a value which dynamically selects from among several base
617 // derived pointers (each with it's own base potentially). It's the job of
618 // the caller to resolve these.
619 assert((isa<SelectInst>(I) || isa<PHINode>(I)) &&
620 "missing instruction case in findBaseDefiningValue");
621 return I;
622}
623
624/// Returns the base defining value for this value.
625static Value *findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache,
626 IsKnownBaseMapTy &KnownBases) {
627 if (!Cache.contains(I)) {
628 auto *BDV = findBaseDefiningValue(I, Cache, KnownBases);
629 Cache[I] = BDV;
630 LLVM_DEBUG(dbgs() << "fBDV-cached: " << I->getName() << " -> "
631 << Cache[I]->getName() << ", is known base = "
632 << KnownBases[I] << "\n");
633 }
634 assert(Cache[I] != nullptr);
635 assert(KnownBases.contains(Cache[I]) &&
636 "Cached value must be present in known bases map");
637 return Cache[I];
638}
639
640/// Return a base pointer for this value if known. Otherwise, return it's
641/// base defining value.
642static Value *findBaseOrBDV(Value *I, DefiningValueMapTy &Cache,
643 IsKnownBaseMapTy &KnownBases) {
644 Value *Def = findBaseDefiningValueCached(I, Cache, KnownBases);
645 auto Found = Cache.find(Def);
646 if (Found != Cache.end()) {
647 // Either a base-of relation, or a self reference. Caller must check.
648 return Found->second;
649 }
650 // Only a BDV available
651 return Def;
652}
653
654#ifndef NDEBUG
655/// This value is a base pointer that is not generated by RS4GC, i.e. it already
656/// exists in the code.
658 // no recursion possible
659 return !isa<PHINode>(V) && !isa<SelectInst>(V) &&
660 !isa<ExtractElementInst>(V) && !isa<InsertElementInst>(V) &&
661 !isa<ShuffleVectorInst>(V);
662}
663#endif
664
665static bool isKnownBase(Value *V, const IsKnownBaseMapTy &KnownBases) {
666 auto It = KnownBases.find(V);
667 assert(It != KnownBases.end() && "Value not present in the map");
668 return It->second;
669}
670
671static void setKnownBase(Value *V, bool IsKnownBase,
672 IsKnownBaseMapTy &KnownBases) {
673#ifndef NDEBUG
674 auto It = KnownBases.find(V);
675 if (It != KnownBases.end())
676 assert(It->second == IsKnownBase && "Changing already present value");
677#endif
678 KnownBases[V] = IsKnownBase;
679}
680
681// Returns true if First and Second values are both scalar or both vector.
682static bool areBothVectorOrScalar(Value *First, Value *Second) {
683 return isa<VectorType>(First->getType()) ==
684 isa<VectorType>(Second->getType());
685}
686
687namespace {
688
689/// Models the state of a single base defining value in the findBasePointer
690/// algorithm for determining where a new instruction is needed to propagate
691/// the base of this BDV.
692class BDVState {
693public:
694 enum StatusTy {
695 // Starting state of lattice
696 Unknown,
697 // Some specific base value -- does *not* mean that instruction
698 // propagates the base of the object
699 // ex: gep %arg, 16 -> %arg is the base value
700 Base,
701 // Need to insert a node to represent a merge.
702 Conflict
703 };
704
705 BDVState() {
706 llvm_unreachable("missing state in map");
707 }
708
709 explicit BDVState(Value *OriginalValue)
710 : OriginalValue(OriginalValue) {}
711 explicit BDVState(Value *OriginalValue, StatusTy Status, Value *BaseValue = nullptr)
712 : OriginalValue(OriginalValue), Status(Status), BaseValue(BaseValue) {
713 assert(Status != Base || BaseValue);
714 }
715
716 StatusTy getStatus() const { return Status; }
717 Value *getOriginalValue() const { return OriginalValue; }
718 Value *getBaseValue() const { return BaseValue; }
719
720 bool isBase() const { return getStatus() == Base; }
721 bool isUnknown() const { return getStatus() == Unknown; }
722 bool isConflict() const { return getStatus() == Conflict; }
723
724 // Values of type BDVState form a lattice, and this function implements the
725 // meet
726 // operation.
727 void meet(const BDVState &Other) {
728 auto markConflict = [&]() {
729 Status = BDVState::Conflict;
730 BaseValue = nullptr;
731 };
732 // Conflict is a final state.
733 if (isConflict())
734 return;
735 // if we are not known - just take other state.
736 if (isUnknown()) {
737 Status = Other.getStatus();
738 BaseValue = Other.getBaseValue();
739 return;
740 }
741 // We are base.
742 assert(isBase() && "Unknown state");
743 // If other is unknown - just keep our state.
744 if (Other.isUnknown())
745 return;
746 // If other is conflict - it is a final state.
747 if (Other.isConflict())
748 return markConflict();
749 // Other is base as well.
750 assert(Other.isBase() && "Unknown state");
751 // If bases are different - Conflict.
752 if (getBaseValue() != Other.getBaseValue())
753 return markConflict();
754 // We are identical, do nothing.
755 }
756
757 bool operator==(const BDVState &Other) const {
758 return OriginalValue == Other.OriginalValue && BaseValue == Other.BaseValue &&
759 Status == Other.Status;
760 }
761
762 bool operator!=(const BDVState &other) const { return !(*this == other); }
763
765 void dump() const {
766 print(dbgs());
767 dbgs() << '\n';
768 }
769
770 void print(raw_ostream &OS) const {
771 switch (getStatus()) {
772 case Unknown:
773 OS << "U";
774 break;
775 case Base:
776 OS << "B";
777 break;
778 case Conflict:
779 OS << "C";
780 break;
781 }
782 OS << " (base " << getBaseValue() << " - "
783 << (getBaseValue() ? getBaseValue()->getName() : "nullptr") << ")"
784 << " for " << OriginalValue->getName() << ":";
785 }
786
787private:
788 AssertingVH<Value> OriginalValue; // instruction this state corresponds to
789 StatusTy Status = Unknown;
790 AssertingVH<Value> BaseValue = nullptr; // Non-null only if Status == Base.
791};
792
793} // end anonymous namespace
794
795#ifndef NDEBUG
796static raw_ostream &operator<<(raw_ostream &OS, const BDVState &State) {
797 State.print(OS);
798 return OS;
799}
800#endif
801
802/// For a given value or instruction, figure out what base ptr its derived from.
803/// For gc objects, this is simply itself. On success, returns a value which is
804/// the base pointer. (This is reliable and can be used for relocation.) On
805/// failure, returns nullptr.
806static Value *findBasePointer(Value *I, DefiningValueMapTy &Cache,
807 IsKnownBaseMapTy &KnownBases) {
808 Value *Def = findBaseOrBDV(I, Cache, KnownBases);
809
810 if (isKnownBase(Def, KnownBases) && areBothVectorOrScalar(Def, I))
811 return Def;
812
813 // Here's the rough algorithm:
814 // - For every SSA value, construct a mapping to either an actual base
815 // pointer or a PHI which obscures the base pointer.
816 // - Construct a mapping from PHI to unknown TOP state. Use an
817 // optimistic algorithm to propagate base pointer information. Lattice
818 // looks like:
819 // UNKNOWN
820 // b1 b2 b3 b4
821 // CONFLICT
822 // When algorithm terminates, all PHIs will either have a single concrete
823 // base or be in a conflict state.
824 // - For every conflict, insert a dummy PHI node without arguments. Add
825 // these to the base[Instruction] = BasePtr mapping. For every
826 // non-conflict, add the actual base.
827 // - For every conflict, add arguments for the base[a] of each input
828 // arguments.
829 //
830 // Note: A simpler form of this would be to add the conflict form of all
831 // PHIs without running the optimistic algorithm. This would be
832 // analogous to pessimistic data flow and would likely lead to an
833 // overall worse solution.
834
835#ifndef NDEBUG
836 auto isExpectedBDVType = [](Value *BDV) {
837 return isa<PHINode>(BDV) || isa<SelectInst>(BDV) ||
838 isa<ExtractElementInst>(BDV) || isa<InsertElementInst>(BDV) ||
839 isa<ShuffleVectorInst>(BDV);
840 };
841#endif
842
843 // Once populated, will contain a mapping from each potentially non-base BDV
844 // to a lattice value (described above) which corresponds to that BDV.
845 // We use the order of insertion (DFS over the def/use graph) to provide a
846 // stable deterministic ordering for visiting DenseMaps (which are unordered)
847 // below. This is important for deterministic compilation.
849
850#ifndef NDEBUG
851 auto VerifyStates = [&]() {
852 for (auto &Entry : States) {
853 assert(Entry.first == Entry.second.getOriginalValue());
854 }
855 };
856#endif
857
858 auto visitBDVOperands = [](Value *BDV, std::function<void (Value*)> F) {
859 if (PHINode *PN = dyn_cast<PHINode>(BDV)) {
860 for (Value *InVal : PN->incoming_values())
861 F(InVal);
862 } else if (SelectInst *SI = dyn_cast<SelectInst>(BDV)) {
863 F(SI->getTrueValue());
864 F(SI->getFalseValue());
865 } else if (auto *EE = dyn_cast<ExtractElementInst>(BDV)) {
866 F(EE->getVectorOperand());
867 } else if (auto *IE = dyn_cast<InsertElementInst>(BDV)) {
868 F(IE->getOperand(0));
869 F(IE->getOperand(1));
870 } else if (auto *SV = dyn_cast<ShuffleVectorInst>(BDV)) {
871 // For a canonical broadcast, ignore the undef argument
872 // (without this, we insert a parallel base shuffle for every broadcast)
873 F(SV->getOperand(0));
874 if (!SV->isZeroEltSplat())
875 F(SV->getOperand(1));
876 } else {
877 llvm_unreachable("unexpected BDV type");
878 }
879 };
880
881
882 // Recursively fill in all base defining values reachable from the initial
883 // one for which we don't already know a definite base value for
884 /* scope */ {
886 Worklist.push_back(Def);
887 States.insert({Def, BDVState(Def)});
888 while (!Worklist.empty()) {
889 Value *Current = Worklist.pop_back_val();
890 assert(!isOriginalBaseResult(Current) && "why did it get added?");
891
892 auto visitIncomingValue = [&](Value *InVal) {
893 Value *Base = findBaseOrBDV(InVal, Cache, KnownBases);
894 if (isKnownBase(Base, KnownBases) && areBothVectorOrScalar(Base, InVal))
895 // Known bases won't need new instructions introduced and can be
896 // ignored safely. However, this can only be done when InVal and Base
897 // are both scalar or both vector. Otherwise, we need to find a
898 // correct BDV for InVal, by creating an entry in the lattice
899 // (States).
900 return;
901 assert(isExpectedBDVType(Base) && "the only non-base values "
902 "we see should be base defining values");
903 if (States.insert(std::make_pair(Base, BDVState(Base))).second)
904 Worklist.push_back(Base);
905 };
906
907 visitBDVOperands(Current, visitIncomingValue);
908 }
909 }
910
911#ifndef NDEBUG
912 VerifyStates();
913 LLVM_DEBUG(dbgs() << "States after initialization:\n");
914 for (const auto &Pair : States) {
915 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n");
916 }
917#endif
918
919 // Iterate forward through the value graph pruning any node from the state
920 // list where all of the inputs are base pointers. The purpose of this is to
921 // reuse existing values when the derived pointer we were asked to materialize
922 // a base pointer for happens to be a base pointer itself. (Or a sub-graph
923 // feeding it does.)
925 do {
926 ToRemove.clear();
927 for (auto Pair : States) {
928 Value *BDV = Pair.first;
929 auto canPruneInput = [&](Value *V) {
930 // If the input of the BDV is the BDV itself we can prune it. This is
931 // only possible if the BDV is a PHI node.
932 if (V->stripPointerCasts() == BDV)
933 return true;
934 Value *VBDV = findBaseOrBDV(V, Cache, KnownBases);
935 if (V->stripPointerCasts() != VBDV)
936 return false;
937 // The assumption is that anything not in the state list is
938 // propagates a base pointer.
939 return States.count(VBDV) == 0;
940 };
941
942 bool CanPrune = true;
943 visitBDVOperands(BDV, [&](Value *Op) {
944 CanPrune = CanPrune && canPruneInput(Op);
945 });
946 if (CanPrune)
947 ToRemove.push_back(BDV);
948 }
949 for (Value *V : ToRemove) {
950 States.erase(V);
951 // Cache the fact V is it's own base for later usage.
952 Cache[V] = V;
953 }
954 } while (!ToRemove.empty());
955
956 // Did we manage to prove that Def itself must be a base pointer?
957 if (!States.count(Def))
958 return Def;
959
960 // Return a phi state for a base defining value. We'll generate a new
961 // base state for known bases and expect to find a cached state otherwise.
962 auto GetStateForBDV = [&](Value *BaseValue, Value *Input) {
963 auto I = States.find(BaseValue);
964 if (I != States.end())
965 return I->second;
966 assert(areBothVectorOrScalar(BaseValue, Input));
967 return BDVState(BaseValue, BDVState::Base, BaseValue);
968 };
969
970 // Even though we have identified a concrete base (or a conflict) for all live
971 // pointers at this point, there are cases where the base is of an
972 // incompatible type compared to the original instruction. We conservatively
973 // mark those as conflicts to ensure that corresponding BDVs will be generated
974 // in the next steps.
975
976 // this is a rather explicit check for all cases where we should mark the
977 // state as a conflict to force the latter stages of the algorithm to emit
978 // the BDVs.
979 // TODO: in many cases the instructions emited for the conflicting states
980 // will be identical to the I itself (if the I's operate on their BDVs
981 // themselves). We should exploit this, but can't do it here since it would
982 // break the invariant about the BDVs not being known to be a base.
983 // TODO: the code also does not handle constants at all - the algorithm relies
984 // on all constants having the same BDV and therefore constant-only insns
985 // will never be in conflict, but this check is ignored here. If the
986 // constant conflicts will be to BDVs themselves, they will be identical
987 // instructions and will get optimized away (as in the above TODO)
988 auto MarkConflict = [&](Instruction *I, Value *BaseValue) {
989 // II and EE mixes vector & scalar so is always a conflict
990 if (isa<InsertElementInst>(I) || isa<ExtractElementInst>(I))
991 return true;
992 // Shuffle vector is always a conflict as it creates new vector from
993 // existing ones.
994 if (isa<ShuffleVectorInst>(I))
995 return true;
996 // Any instructions where the computed base type differs from the
997 // instruction type. An example is where an extract instruction is used by a
998 // select. Here the select's BDV is a vector (because of extract's BDV),
999 // while the select itself is a scalar type. Note that the IE and EE
1000 // instruction check is not fully subsumed by the vector<->scalar check at
1001 // the end, this is due to the BDV algorithm being ignorant of BDV types at
1002 // this junction.
1003 if (!areBothVectorOrScalar(BaseValue, I))
1004 return true;
1005 return false;
1006 };
1007
1008 bool Progress = true;
1009 while (Progress) {
1010#ifndef NDEBUG
1011 const size_t OldSize = States.size();
1012#endif
1013 Progress = false;
1014 // We're only changing values in this loop, thus safe to keep iterators.
1015 // Since this is computing a fixed point, the order of visit does not
1016 // effect the result. TODO: We could use a worklist here and make this run
1017 // much faster.
1018 for (auto Pair : States) {
1019 Value *BDV = Pair.first;
1020 // Only values that do not have known bases or those that have differing
1021 // type (scalar versus vector) from a possible known base should be in the
1022 // lattice.
1023 assert((!isKnownBase(BDV, KnownBases) ||
1024 !areBothVectorOrScalar(BDV, Pair.second.getBaseValue())) &&
1025 "why did it get added?");
1026
1027 BDVState NewState(BDV);
1028 visitBDVOperands(BDV, [&](Value *Op) {
1029 Value *BDV = findBaseOrBDV(Op, Cache, KnownBases);
1030 auto OpState = GetStateForBDV(BDV, Op);
1031 NewState.meet(OpState);
1032 });
1033
1034 // if the instruction has known base, but should in fact be marked as
1035 // conflict because of incompatible in/out types, we mark it as such
1036 // ensuring that it will propagate through the fixpoint iteration
1037 auto I = cast<Instruction>(BDV);
1038 auto BV = NewState.getBaseValue();
1039 if (BV && MarkConflict(I, BV))
1040 NewState = BDVState(I, BDVState::Conflict);
1041
1042 BDVState OldState = Pair.second;
1043 if (OldState != NewState) {
1044 Progress = true;
1045 States[BDV] = NewState;
1046 }
1047 }
1048
1049 assert(OldSize == States.size() &&
1050 "fixed point shouldn't be adding any new nodes to state");
1051 }
1052
1053#ifndef NDEBUG
1054 VerifyStates();
1055 LLVM_DEBUG(dbgs() << "States after meet iteration:\n");
1056 for (const auto &Pair : States) {
1057 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n");
1058 }
1059
1060 // since we do the conflict marking as part of the fixpoint iteration this
1061 // loop only asserts that invariants are met
1062 for (auto Pair : States) {
1063 Instruction *I = cast<Instruction>(Pair.first);
1064 BDVState State = Pair.second;
1065 auto *BaseValue = State.getBaseValue();
1066 // Only values that do not have known bases or those that have differing
1067 // type (scalar versus vector) from a possible known base should be in the
1068 // lattice.
1069 assert(
1070 (!isKnownBase(I, KnownBases) || !areBothVectorOrScalar(I, BaseValue)) &&
1071 "why did it get added?");
1072 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!");
1073 }
1074#endif
1075
1076 // Insert Phis for all conflicts
1077 // TODO: adjust naming patterns to avoid this order of iteration dependency
1078 for (auto Pair : States) {
1079 Instruction *I = cast<Instruction>(Pair.first);
1080 BDVState State = Pair.second;
1081 // Only values that do not have known bases or those that have differing
1082 // type (scalar versus vector) from a possible known base should be in the
1083 // lattice.
1084 assert((!isKnownBase(I, KnownBases) ||
1085 !areBothVectorOrScalar(I, State.getBaseValue())) &&
1086 "why did it get added?");
1087 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!");
1088
1089 // Since we're joining a vector and scalar base, they can never be the
1090 // same. As a result, we should always see insert element having reached
1091 // the conflict state.
1092 assert(!isa<InsertElementInst>(I) || State.isConflict());
1093
1094 if (!State.isConflict())
1095 continue;
1096
1097 auto getMangledName = [](Instruction *I) -> std::string {
1098 if (isa<PHINode>(I)) {
1099 return suffixed_name_or(I, ".base", "base_phi");
1100 } else if (isa<SelectInst>(I)) {
1101 return suffixed_name_or(I, ".base", "base_select");
1102 } else if (isa<ExtractElementInst>(I)) {
1103 return suffixed_name_or(I, ".base", "base_ee");
1104 } else if (isa<InsertElementInst>(I)) {
1105 return suffixed_name_or(I, ".base", "base_ie");
1106 } else {
1107 return suffixed_name_or(I, ".base", "base_sv");
1108 }
1109 };
1110
1111 Instruction *BaseInst = I->clone();
1112 BaseInst->insertBefore(I);
1113 BaseInst->setName(getMangledName(I));
1114 // Add metadata marking this as a base value
1115 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {}));
1116 States[I] = BDVState(I, BDVState::Conflict, BaseInst);
1117 setKnownBase(BaseInst, /* IsKnownBase */true, KnownBases);
1118 }
1119
1120#ifndef NDEBUG
1121 VerifyStates();
1122#endif
1123
1124 // Returns a instruction which produces the base pointer for a given
1125 // instruction. The instruction is assumed to be an input to one of the BDVs
1126 // seen in the inference algorithm above. As such, we must either already
1127 // know it's base defining value is a base, or have inserted a new
1128 // instruction to propagate the base of it's BDV and have entered that newly
1129 // introduced instruction into the state table. In either case, we are
1130 // assured to be able to determine an instruction which produces it's base
1131 // pointer.
1132 auto getBaseForInput = [&](Value *Input, Instruction *InsertPt) {
1133 Value *BDV = findBaseOrBDV(Input, Cache, KnownBases);
1134 Value *Base = nullptr;
1135 if (!States.count(BDV)) {
1136 assert(areBothVectorOrScalar(BDV, Input));
1137 Base = BDV;
1138 } else {
1139 // Either conflict or base.
1140 assert(States.count(BDV));
1141 Base = States[BDV].getBaseValue();
1142 }
1143 assert(Base && "Can't be null");
1144 // The cast is needed since base traversal may strip away bitcasts
1145 if (Base->getType() != Input->getType() && InsertPt)
1146 Base = new BitCastInst(Base, Input->getType(), "cast",
1147 InsertPt->getIterator());
1148 return Base;
1149 };
1150
1151 // Fixup all the inputs of the new PHIs. Visit order needs to be
1152 // deterministic and predictable because we're naming newly created
1153 // instructions.
1154 for (auto Pair : States) {
1155 Instruction *BDV = cast<Instruction>(Pair.first);
1156 BDVState State = Pair.second;
1157
1158 // Only values that do not have known bases or those that have differing
1159 // type (scalar versus vector) from a possible known base should be in the
1160 // lattice.
1161 assert((!isKnownBase(BDV, KnownBases) ||
1162 !areBothVectorOrScalar(BDV, State.getBaseValue())) &&
1163 "why did it get added?");
1164 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!");
1165 if (!State.isConflict())
1166 continue;
1167
1168 if (PHINode *BasePHI = dyn_cast<PHINode>(State.getBaseValue())) {
1169 PHINode *PN = cast<PHINode>(BDV);
1170 const unsigned NumPHIValues = PN->getNumIncomingValues();
1171
1172 // The IR verifier requires phi nodes with multiple entries from the
1173 // same basic block to have the same incoming value for each of those
1174 // entries. Since we're inserting bitcasts in the loop, make sure we
1175 // do so at least once per incoming block.
1176 DenseMap<BasicBlock *, Value*> BlockToValue;
1177 for (unsigned i = 0; i < NumPHIValues; i++) {
1178 Value *InVal = PN->getIncomingValue(i);
1179 BasicBlock *InBB = PN->getIncomingBlock(i);
1180 if (!BlockToValue.count(InBB))
1181 BlockToValue[InBB] = getBaseForInput(InVal, InBB->getTerminator());
1182 else {
1183#ifndef NDEBUG
1184 Value *OldBase = BlockToValue[InBB];
1185 Value *Base = getBaseForInput(InVal, nullptr);
1186
1187 // We can't use `stripPointerCasts` instead of this function because
1188 // `stripPointerCasts` doesn't handle vectors of pointers.
1189 auto StripBitCasts = [](Value *V) -> Value * {
1190 while (auto *BC = dyn_cast<BitCastInst>(V))
1191 V = BC->getOperand(0);
1192 return V;
1193 };
1194 // In essence this assert states: the only way two values
1195 // incoming from the same basic block may be different is by
1196 // being different bitcasts of the same value. A cleanup
1197 // that remains TODO is changing findBaseOrBDV to return an
1198 // llvm::Value of the correct type (and still remain pure).
1199 // This will remove the need to add bitcasts.
1200 assert(StripBitCasts(Base) == StripBitCasts(OldBase) &&
1201 "findBaseOrBDV should be pure!");
1202#endif
1203 }
1204 Value *Base = BlockToValue[InBB];
1205 BasePHI->setIncomingValue(i, Base);
1206 }
1207 } else if (SelectInst *BaseSI =
1208 dyn_cast<SelectInst>(State.getBaseValue())) {
1209 SelectInst *SI = cast<SelectInst>(BDV);
1210
1211 // Find the instruction which produces the base for each input.
1212 // We may need to insert a bitcast.
1213 BaseSI->setTrueValue(getBaseForInput(SI->getTrueValue(), BaseSI));
1214 BaseSI->setFalseValue(getBaseForInput(SI->getFalseValue(), BaseSI));
1215 } else if (auto *BaseEE =
1216 dyn_cast<ExtractElementInst>(State.getBaseValue())) {
1217 Value *InVal = cast<ExtractElementInst>(BDV)->getVectorOperand();
1218 // Find the instruction which produces the base for each input. We may
1219 // need to insert a bitcast.
1220 BaseEE->setOperand(0, getBaseForInput(InVal, BaseEE));
1221 } else if (auto *BaseIE = dyn_cast<InsertElementInst>(State.getBaseValue())){
1222 auto *BdvIE = cast<InsertElementInst>(BDV);
1223 auto UpdateOperand = [&](int OperandIdx) {
1224 Value *InVal = BdvIE->getOperand(OperandIdx);
1225 Value *Base = getBaseForInput(InVal, BaseIE);
1226 BaseIE->setOperand(OperandIdx, Base);
1227 };
1228 UpdateOperand(0); // vector operand
1229 UpdateOperand(1); // scalar operand
1230 } else {
1231 auto *BaseSV = cast<ShuffleVectorInst>(State.getBaseValue());
1232 auto *BdvSV = cast<ShuffleVectorInst>(BDV);
1233 auto UpdateOperand = [&](int OperandIdx) {
1234 Value *InVal = BdvSV->getOperand(OperandIdx);
1235 Value *Base = getBaseForInput(InVal, BaseSV);
1236 BaseSV->setOperand(OperandIdx, Base);
1237 };
1238 UpdateOperand(0); // vector operand
1239 if (!BdvSV->isZeroEltSplat())
1240 UpdateOperand(1); // vector operand
1241 else {
1242 // Never read, so just use poison
1243 Value *InVal = BdvSV->getOperand(1);
1244 BaseSV->setOperand(1, PoisonValue::get(InVal->getType()));
1245 }
1246 }
1247 }
1248
1249#ifndef NDEBUG
1250 VerifyStates();
1251#endif
1252
1253 // get the data layout to compare the sizes of base/derived pointer values
1254 [[maybe_unused]] auto &DL =
1255 cast<llvm::Instruction>(Def)->getModule()->getDataLayout();
1256 // Cache all of our results so we can cheaply reuse them
1257 // NOTE: This is actually two caches: one of the base defining value
1258 // relation and one of the base pointer relation! FIXME
1259 for (auto Pair : States) {
1260 auto *BDV = Pair.first;
1261 Value *Base = Pair.second.getBaseValue();
1262 assert(BDV && Base);
1263 // Whenever we have a derived ptr(s), their base
1264 // ptr(s) must be of the same size, not necessarily the same type
1265 assert(DL.getTypeAllocSize(BDV->getType()) ==
1266 DL.getTypeAllocSize(Base->getType()) &&
1267 "Derived and base values should have same size");
1268 // Only values that do not have known bases or those that have differing
1269 // type (scalar versus vector) from a possible known base should be in the
1270 // lattice.
1271 assert(
1272 (!isKnownBase(BDV, KnownBases) || !areBothVectorOrScalar(BDV, Base)) &&
1273 "why did it get added?");
1274
1275 LLVM_DEBUG(
1276 dbgs() << "Updating base value cache"
1277 << " for: " << BDV->getName() << " from: "
1278 << (Cache.count(BDV) ? Cache[BDV]->getName().str() : "none")
1279 << " to: " << Base->getName() << "\n");
1280
1281 Cache[BDV] = Base;
1282 }
1283 assert(Cache.count(Def));
1284 return Cache[Def];
1285}
1286
1287// For a set of live pointers (base and/or derived), identify the base
1288// pointer of the object which they are derived from. This routine will
1289// mutate the IR graph as needed to make the 'base' pointer live at the
1290// definition site of 'derived'. This ensures that any use of 'derived' can
1291// also use 'base'. This may involve the insertion of a number of
1292// additional PHI nodes.
1293//
1294// preconditions: live is a set of pointer type Values
1295//
1296// side effects: may insert PHI nodes into the existing CFG, will preserve
1297// CFG, will not remove or mutate any existing nodes
1298//
1299// post condition: PointerToBase contains one (derived, base) pair for every
1300// pointer in live. Note that derived can be equal to base if the original
1301// pointer was a base pointer.
1302static void findBasePointers(const StatepointLiveSetTy &live,
1303 PointerToBaseTy &PointerToBase, DominatorTree *DT,
1304 DefiningValueMapTy &DVCache,
1305 IsKnownBaseMapTy &KnownBases) {
1306 for (Value *ptr : live) {
1307 Value *base = findBasePointer(ptr, DVCache, KnownBases);
1308 assert(base && "failed to find base pointer");
1309 PointerToBase[ptr] = base;
1310 assert((!isa<Instruction>(base) || !isa<Instruction>(ptr) ||
1311 DT->dominates(cast<Instruction>(base)->getParent(),
1312 cast<Instruction>(ptr)->getParent())) &&
1313 "The base we found better dominate the derived pointer");
1314 }
1315}
1316
1317/// Find the required based pointers (and adjust the live set) for the given
1318/// parse point.
1319static void findBasePointers(DominatorTree &DT, DefiningValueMapTy &DVCache,
1320 CallBase *Call,
1321 PartiallyConstructedSafepointRecord &result,
1322 PointerToBaseTy &PointerToBase,
1323 IsKnownBaseMapTy &KnownBases) {
1324 StatepointLiveSetTy PotentiallyDerivedPointers = result.LiveSet;
1325 // We assume that all pointers passed to deopt are base pointers; as an
1326 // optimization, we can use this to avoid seperately materializing the base
1327 // pointer graph. This is only relevant since we're very conservative about
1328 // generating new conflict nodes during base pointer insertion. If we were
1329 // smarter there, this would be irrelevant.
1330 if (auto Opt = Call->getOperandBundle(LLVMContext::OB_deopt))
1331 for (Value *V : Opt->Inputs) {
1332 if (!PotentiallyDerivedPointers.count(V))
1333 continue;
1334 PotentiallyDerivedPointers.remove(V);
1335 PointerToBase[V] = V;
1336 }
1337 findBasePointers(PotentiallyDerivedPointers, PointerToBase, &DT, DVCache,
1338 KnownBases);
1339}
1340
1341/// Given an updated version of the dataflow liveness results, update the
1342/// liveset and base pointer maps for the call site CS.
1343static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData,
1344 CallBase *Call,
1345 PartiallyConstructedSafepointRecord &result,
1346 PointerToBaseTy &PointerToBase,
1347 GCStrategy *GC);
1348
1352 PointerToBaseTy &PointerToBase, GCStrategy *GC) {
1353 // TODO-PERF: reuse the original liveness, then simply run the dataflow
1354 // again. The old values are still live and will help it stabilize quickly.
1355 GCPtrLivenessData RevisedLivenessData;
1356 computeLiveInValues(DT, F, RevisedLivenessData, GC);
1357 for (size_t i = 0; i < records.size(); i++) {
1358 struct PartiallyConstructedSafepointRecord &info = records[i];
1359 recomputeLiveInValues(RevisedLivenessData, toUpdate[i], info, PointerToBase,
1360 GC);
1361 }
1362}
1363
1364// Utility function which clones all instructions from "ChainToBase"
1365// and inserts them before "InsertBefore". Returns rematerialized value
1366// which should be used after statepoint.
1368 Instruction *InsertBefore,
1369 Value *RootOfChain,
1370 Value *AlternateLiveBase) {
1371 Instruction *LastClonedValue = nullptr;
1372 Instruction *LastValue = nullptr;
1373 // Walk backwards to visit top-most instructions first.
1374 for (Instruction *Instr :
1375 make_range(ChainToBase.rbegin(), ChainToBase.rend())) {
1376 // Only GEP's and casts are supported as we need to be careful to not
1377 // introduce any new uses of pointers not in the liveset.
1378 // Note that it's fine to introduce new uses of pointers which were
1379 // otherwise not used after this statepoint.
1380 assert(isa<GetElementPtrInst>(Instr) || isa<CastInst>(Instr));
1381
1382 Instruction *ClonedValue = Instr->clone();
1383 ClonedValue->insertBefore(InsertBefore);
1384 ClonedValue->setName(Instr->getName() + ".remat");
1385
1386 // If it is not first instruction in the chain then it uses previously
1387 // cloned value. We should update it to use cloned value.
1388 if (LastClonedValue) {
1389 assert(LastValue);
1390 ClonedValue->replaceUsesOfWith(LastValue, LastClonedValue);
1391#ifndef NDEBUG
1392 for (auto *OpValue : ClonedValue->operand_values()) {
1393 // Assert that cloned instruction does not use any instructions from
1394 // this chain other than LastClonedValue
1395 assert(!is_contained(ChainToBase, OpValue) &&
1396 "incorrect use in rematerialization chain");
1397 // Assert that the cloned instruction does not use the RootOfChain
1398 // or the AlternateLiveBase.
1399 assert(OpValue != RootOfChain && OpValue != AlternateLiveBase);
1400 }
1401#endif
1402 } else {
1403 // For the first instruction, replace the use of unrelocated base i.e.
1404 // RootOfChain/OrigRootPhi, with the corresponding PHI present in the
1405 // live set. They have been proved to be the same PHI nodes. Note
1406 // that the *only* use of the RootOfChain in the ChainToBase list is
1407 // the first Value in the list.
1408 if (RootOfChain != AlternateLiveBase)
1409 ClonedValue->replaceUsesOfWith(RootOfChain, AlternateLiveBase);
1410 }
1411
1412 LastClonedValue = ClonedValue;
1413 LastValue = Instr;
1414 }
1415 assert(LastClonedValue);
1416 return LastClonedValue;
1417}
1418
1419// When inserting gc.relocate and gc.result calls, we need to ensure there are
1420// no uses of the original value / return value between the gc.statepoint and
1421// the gc.relocate / gc.result call. One case which can arise is a phi node
1422// starting one of the successor blocks. We also need to be able to insert the
1423// gc.relocates only on the path which goes through the statepoint. We might
1424// need to split an edge to make this possible.
1425static BasicBlock *
1427 DominatorTree &DT) {
1428 BasicBlock *Ret = BB;
1429 if (!BB->getUniquePredecessor())
1430 Ret = SplitBlockPredecessors(BB, InvokeParent, "", &DT);
1431
1432 // Now that 'Ret' has unique predecessor we can safely remove all phi nodes
1433 // from it
1435 assert(!isa<PHINode>(Ret->begin()) &&
1436 "All PHI nodes should have been removed!");
1437
1438 // At this point, we can safely insert a gc.relocate or gc.result as the first
1439 // instruction in Ret if needed.
1440 return Ret;
1441}
1442
1443// List of all function attributes which must be stripped when lowering from
1444// abstract machine model to physical machine model. Essentially, these are
1445// all the effects a safepoint might have which we ignored in the abstract
1446// machine model for purposes of optimization. We have to strip these on
1447// both function declarations and call sites.
1449 {Attribute::Memory, Attribute::NoSync, Attribute::NoFree};
1450
1451// Create new attribute set containing only attributes which can be transferred
1452// from the original call to the safepoint.
1453static AttributeList legalizeCallAttributes(CallBase *Call, bool IsMemIntrinsic,
1454 AttributeList StatepointAL) {
1455 AttributeList OrigAL = Call->getAttributes();
1456 if (OrigAL.isEmpty())
1457 return StatepointAL;
1458
1459 // Remove the readonly, readnone, and statepoint function attributes.
1460 LLVMContext &Ctx = Call->getContext();
1461 AttrBuilder FnAttrs(Ctx, OrigAL.getFnAttrs());
1462 for (auto Attr : FnAttrsToStrip)
1463 FnAttrs.removeAttribute(Attr);
1464
1465 for (Attribute A : OrigAL.getFnAttrs()) {
1467 FnAttrs.removeAttribute(A);
1468 }
1469
1470 StatepointAL = StatepointAL.addFnAttributes(Ctx, FnAttrs);
1471
1472 // The memory intrinsics do not have a 1:1 correspondence of the original
1473 // call arguments to the produced statepoint. Do not transfer the argument
1474 // attributes to avoid putting them on incorrect arguments.
1475 if (IsMemIntrinsic)
1476 return StatepointAL;
1477
1478 // Attach the argument attributes from the original call at the corresponding
1479 // arguments in the statepoint. Note that any argument attributes that are
1480 // invalid after lowering are stripped in stripNonValidDataFromBody.
1481 for (unsigned I : llvm::seq(Call->arg_size()))
1482 StatepointAL = StatepointAL.addParamAttributes(
1484 AttrBuilder(Ctx, OrigAL.getParamAttrs(I)));
1485
1486 // Return attributes are later attached to the gc.result intrinsic.
1487 return StatepointAL;
1488}
1489
1490/// Helper function to place all gc relocates necessary for the given
1491/// statepoint.
1492/// Inputs:
1493/// liveVariables - list of variables to be relocated.
1494/// basePtrs - base pointers.
1495/// statepointToken - statepoint instruction to which relocates should be
1496/// bound.
1497/// Builder - Llvm IR builder to be used to construct new calls.
1499 ArrayRef<Value *> BasePtrs,
1500 Instruction *StatepointToken,
1501 IRBuilder<> &Builder, GCStrategy *GC) {
1502 if (LiveVariables.empty())
1503 return;
1504
1505 auto FindIndex = [](ArrayRef<Value *> LiveVec, Value *Val) {
1506 auto ValIt = llvm::find(LiveVec, Val);
1507 assert(ValIt != LiveVec.end() && "Val not found in LiveVec!");
1508 size_t Index = std::distance(LiveVec.begin(), ValIt);
1509 assert(Index < LiveVec.size() && "Bug in std::find?");
1510 return Index;
1511 };
1512 Module *M = StatepointToken->getModule();
1513
1514 // All gc_relocate are generated as i8 addrspace(1)* (or a vector type whose
1515 // element type is i8 addrspace(1)*). We originally generated unique
1516 // declarations for each pointer type, but this proved problematic because
1517 // the intrinsic mangling code is incomplete and fragile. Since we're moving
1518 // towards a single unified pointer type anyways, we can just cast everything
1519 // to an i8* of the right address space. A bitcast is added later to convert
1520 // gc_relocate to the actual value's type.
1521 auto getGCRelocateDecl = [&](Type *Ty) {
1523 auto AS = Ty->getScalarType()->getPointerAddressSpace();
1524 Type *NewTy = PointerType::get(M->getContext(), AS);
1525 if (auto *VT = dyn_cast<VectorType>(Ty))
1526 NewTy = FixedVectorType::get(NewTy,
1527 cast<FixedVectorType>(VT)->getNumElements());
1528 return Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate,
1529 {NewTy});
1530 };
1531
1532 // Lazily populated map from input types to the canonicalized form mentioned
1533 // in the comment above. This should probably be cached somewhere more
1534 // broadly.
1535 DenseMap<Type *, Function *> TypeToDeclMap;
1536
1537 for (unsigned i = 0; i < LiveVariables.size(); i++) {
1538 // Generate the gc.relocate call and save the result
1539 Value *BaseIdx = Builder.getInt32(FindIndex(LiveVariables, BasePtrs[i]));
1540 Value *LiveIdx = Builder.getInt32(i);
1541
1542 Type *Ty = LiveVariables[i]->getType();
1543 if (!TypeToDeclMap.count(Ty))
1544 TypeToDeclMap[Ty] = getGCRelocateDecl(Ty);
1545 Function *GCRelocateDecl = TypeToDeclMap[Ty];
1546
1547 // only specify a debug name if we can give a useful one
1548 CallInst *Reloc = Builder.CreateCall(
1549 GCRelocateDecl, {StatepointToken, BaseIdx, LiveIdx},
1550 suffixed_name_or(LiveVariables[i], ".relocated", ""));
1551 // Trick CodeGen into thinking there are lots of free registers at this
1552 // fake call.
1554 }
1555}
1556
1557namespace {
1558
1559/// This struct is used to defer RAUWs and `eraseFromParent` s. Using this
1560/// avoids having to worry about keeping around dangling pointers to Values.
1561class DeferredReplacement {
1564 bool IsDeoptimize = false;
1565
1566 DeferredReplacement() = default;
1567
1568public:
1569 static DeferredReplacement createRAUW(Instruction *Old, Instruction *New) {
1570 assert(Old != New && Old && New &&
1571 "Cannot RAUW equal values or to / from null!");
1572
1573 DeferredReplacement D;
1574 D.Old = Old;
1575 D.New = New;
1576 return D;
1577 }
1578
1579 static DeferredReplacement createDelete(Instruction *ToErase) {
1580 DeferredReplacement D;
1581 D.Old = ToErase;
1582 return D;
1583 }
1584
1585 static DeferredReplacement createDeoptimizeReplacement(Instruction *Old) {
1586#ifndef NDEBUG
1587 auto *F = cast<CallInst>(Old)->getCalledFunction();
1588 assert(F && F->getIntrinsicID() == Intrinsic::experimental_deoptimize &&
1589 "Only way to construct a deoptimize deferred replacement");
1590#endif
1591 DeferredReplacement D;
1592 D.Old = Old;
1593 D.IsDeoptimize = true;
1594 return D;
1595 }
1596
1597 /// Does the task represented by this instance.
1598 void doReplacement() {
1599 Instruction *OldI = Old;
1600 Instruction *NewI = New;
1601
1602 assert(OldI != NewI && "Disallowed at construction?!");
1603 assert((!IsDeoptimize || !New) &&
1604 "Deoptimize intrinsics are not replaced!");
1605
1606 Old = nullptr;
1607 New = nullptr;
1608
1609 if (NewI)
1610 OldI->replaceAllUsesWith(NewI);
1611
1612 if (IsDeoptimize) {
1613 // Note: we've inserted instructions, so the call to llvm.deoptimize may
1614 // not necessarily be followed by the matching return.
1615 auto *RI = cast<ReturnInst>(OldI->getParent()->getTerminator());
1616 new UnreachableInst(RI->getContext(), RI->getIterator());
1617 RI->eraseFromParent();
1618 }
1619
1620 OldI->eraseFromParent();
1621 }
1622};
1623
1624} // end anonymous namespace
1625
1627 const char *DeoptLowering = "deopt-lowering";
1628 if (Call->hasFnAttr(DeoptLowering)) {
1629 // FIXME: Calls have a *really* confusing interface around attributes
1630 // with values.
1631 const AttributeList &CSAS = Call->getAttributes();
1632 if (CSAS.hasFnAttr(DeoptLowering))
1633 return CSAS.getFnAttr(DeoptLowering).getValueAsString();
1634 Function *F = Call->getCalledFunction();
1635 assert(F && F->hasFnAttribute(DeoptLowering));
1636 return F->getFnAttribute(DeoptLowering).getValueAsString();
1637 }
1638 return "live-through";
1639}
1640
1641static void
1643 const SmallVectorImpl<Value *> &BasePtrs,
1645 PartiallyConstructedSafepointRecord &Result,
1646 std::vector<DeferredReplacement> &Replacements,
1647 const PointerToBaseTy &PointerToBase,
1648 GCStrategy *GC) {
1649 assert(BasePtrs.size() == LiveVariables.size());
1650
1651 // Then go ahead and use the builder do actually do the inserts. We insert
1652 // immediately before the previous instruction under the assumption that all
1653 // arguments will be available here. We can't insert afterwards since we may
1654 // be replacing a terminator.
1655 IRBuilder<> Builder(Call);
1656
1659 uint32_t NumPatchBytes = 0;
1661
1662 SmallVector<Value *, 8> CallArgs(Call->args());
1663 std::optional<ArrayRef<Use>> DeoptArgs;
1664 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_deopt))
1665 DeoptArgs = Bundle->Inputs;
1666 std::optional<ArrayRef<Use>> TransitionArgs;
1667 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_gc_transition)) {
1668 TransitionArgs = Bundle->Inputs;
1669 // TODO: This flag no longer serves a purpose and can be removed later
1671 }
1672
1673 // Instead of lowering calls to @llvm.experimental.deoptimize as normal calls
1674 // with a return value, we lower then as never returning calls to
1675 // __llvm_deoptimize that are followed by unreachable to get better codegen.
1676 bool IsDeoptimize = false;
1677 bool IsMemIntrinsic = false;
1678
1680 parseStatepointDirectivesFromAttrs(Call->getAttributes());
1681 if (SD.NumPatchBytes)
1682 NumPatchBytes = *SD.NumPatchBytes;
1683 if (SD.StatepointID)
1684 StatepointID = *SD.StatepointID;
1685
1686 // Pass through the requested lowering if any. The default is live-through.
1687 StringRef DeoptLowering = getDeoptLowering(Call);
1688 if (DeoptLowering.equals("live-in"))
1690 else {
1691 assert(DeoptLowering.equals("live-through") && "Unsupported value!");
1692 }
1693
1694 FunctionCallee CallTarget(Call->getFunctionType(), Call->getCalledOperand());
1695 if (Function *F = dyn_cast<Function>(CallTarget.getCallee())) {
1696 auto IID = F->getIntrinsicID();
1697 if (IID == Intrinsic::experimental_deoptimize) {
1698 // Calls to llvm.experimental.deoptimize are lowered to calls to the
1699 // __llvm_deoptimize symbol. We want to resolve this now, since the
1700 // verifier does not allow taking the address of an intrinsic function.
1701
1702 SmallVector<Type *, 8> DomainTy;
1703 for (Value *Arg : CallArgs)
1704 DomainTy.push_back(Arg->getType());
1705 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy,
1706 /* isVarArg = */ false);
1707
1708 // Note: CallTarget can be a bitcast instruction of a symbol if there are
1709 // calls to @llvm.experimental.deoptimize with different argument types in
1710 // the same module. This is fine -- we assume the frontend knew what it
1711 // was doing when generating this kind of IR.
1712 CallTarget = F->getParent()
1713 ->getOrInsertFunction("__llvm_deoptimize", FTy);
1714
1715 IsDeoptimize = true;
1716 } else if (IID == Intrinsic::memcpy_element_unordered_atomic ||
1717 IID == Intrinsic::memmove_element_unordered_atomic) {
1718 IsMemIntrinsic = true;
1719
1720 // Unordered atomic memcpy and memmove intrinsics which are not explicitly
1721 // marked as "gc-leaf-function" should be lowered in a GC parseable way.
1722 // Specifically, these calls should be lowered to the
1723 // __llvm_{memcpy|memmove}_element_unordered_atomic_safepoint symbols.
1724 // Similarly to __llvm_deoptimize we want to resolve this now, since the
1725 // verifier does not allow taking the address of an intrinsic function.
1726 //
1727 // Moreover we need to shuffle the arguments for the call in order to
1728 // accommodate GC. The underlying source and destination objects might be
1729 // relocated during copy operation should the GC occur. To relocate the
1730 // derived source and destination pointers the implementation of the
1731 // intrinsic should know the corresponding base pointers.
1732 //
1733 // To make the base pointers available pass them explicitly as arguments:
1734 // memcpy(dest_derived, source_derived, ...) =>
1735 // memcpy(dest_base, dest_offset, source_base, source_offset, ...)
1736 auto &Context = Call->getContext();
1737 auto &DL = Call->getModule()->getDataLayout();
1738 auto GetBaseAndOffset = [&](Value *Derived) {
1739 Value *Base = nullptr;
1740 // Optimizations in unreachable code might substitute the real pointer
1741 // with undef, poison or null-derived constant. Return null base for
1742 // them to be consistent with the handling in the main algorithm in
1743 // findBaseDefiningValue.
1744 if (isa<Constant>(Derived))
1745 Base =
1746 ConstantPointerNull::get(cast<PointerType>(Derived->getType()));
1747 else {
1748 assert(PointerToBase.count(Derived));
1749 Base = PointerToBase.find(Derived)->second;
1750 }
1751 unsigned AddressSpace = Derived->getType()->getPointerAddressSpace();
1752 unsigned IntPtrSize = DL.getPointerSizeInBits(AddressSpace);
1753 Value *Base_int = Builder.CreatePtrToInt(
1754 Base, Type::getIntNTy(Context, IntPtrSize));
1755 Value *Derived_int = Builder.CreatePtrToInt(
1756 Derived, Type::getIntNTy(Context, IntPtrSize));
1757 return std::make_pair(Base, Builder.CreateSub(Derived_int, Base_int));
1758 };
1759
1760 auto *Dest = CallArgs[0];
1761 Value *DestBase, *DestOffset;
1762 std::tie(DestBase, DestOffset) = GetBaseAndOffset(Dest);
1763
1764 auto *Source = CallArgs[1];
1765 Value *SourceBase, *SourceOffset;
1766 std::tie(SourceBase, SourceOffset) = GetBaseAndOffset(Source);
1767
1768 auto *LengthInBytes = CallArgs[2];
1769 auto *ElementSizeCI = cast<ConstantInt>(CallArgs[3]);
1770
1771 CallArgs.clear();
1772 CallArgs.push_back(DestBase);
1773 CallArgs.push_back(DestOffset);
1774 CallArgs.push_back(SourceBase);
1775 CallArgs.push_back(SourceOffset);
1776 CallArgs.push_back(LengthInBytes);
1777
1778 SmallVector<Type *, 8> DomainTy;
1779 for (Value *Arg : CallArgs)
1780 DomainTy.push_back(Arg->getType());
1781 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy,
1782 /* isVarArg = */ false);
1783
1784 auto GetFunctionName = [](Intrinsic::ID IID, ConstantInt *ElementSizeCI) {
1785 uint64_t ElementSize = ElementSizeCI->getZExtValue();
1786 if (IID == Intrinsic::memcpy_element_unordered_atomic) {
1787 switch (ElementSize) {
1788 case 1:
1789 return "__llvm_memcpy_element_unordered_atomic_safepoint_1";
1790 case 2:
1791 return "__llvm_memcpy_element_unordered_atomic_safepoint_2";
1792 case 4:
1793 return "__llvm_memcpy_element_unordered_atomic_safepoint_4";
1794 case 8:
1795 return "__llvm_memcpy_element_unordered_atomic_safepoint_8";
1796 case 16:
1797 return "__llvm_memcpy_element_unordered_atomic_safepoint_16";
1798 default:
1799 llvm_unreachable("unexpected element size!");
1800 }
1801 }
1802 assert(IID == Intrinsic::memmove_element_unordered_atomic);
1803 switch (ElementSize) {
1804 case 1:
1805 return "__llvm_memmove_element_unordered_atomic_safepoint_1";
1806 case 2:
1807 return "__llvm_memmove_element_unordered_atomic_safepoint_2";
1808 case 4:
1809 return "__llvm_memmove_element_unordered_atomic_safepoint_4";
1810 case 8:
1811 return "__llvm_memmove_element_unordered_atomic_safepoint_8";
1812 case 16:
1813 return "__llvm_memmove_element_unordered_atomic_safepoint_16";
1814 default:
1815 llvm_unreachable("unexpected element size!");
1816 }
1817 };
1818
1819 CallTarget =
1820 F->getParent()
1821 ->getOrInsertFunction(GetFunctionName(IID, ElementSizeCI), FTy);
1822 }
1823 }
1824
1825 // Create the statepoint given all the arguments
1826 GCStatepointInst *Token = nullptr;
1827 if (auto *CI = dyn_cast<CallInst>(Call)) {
1828 CallInst *SPCall = Builder.CreateGCStatepointCall(
1829 StatepointID, NumPatchBytes, CallTarget, Flags, CallArgs,
1830 TransitionArgs, DeoptArgs, GCArgs, "safepoint_token");
1831
1832 SPCall->setTailCallKind(CI->getTailCallKind());
1833 SPCall->setCallingConv(CI->getCallingConv());
1834
1835 // Set up function attrs directly on statepoint and return attrs later for
1836 // gc_result intrinsic.
1837 SPCall->setAttributes(
1838 legalizeCallAttributes(CI, IsMemIntrinsic, SPCall->getAttributes()));
1839
1840 Token = cast<GCStatepointInst>(SPCall);
1841
1842 // Put the following gc_result and gc_relocate calls immediately after the
1843 // the old call (which we're about to delete)
1844 assert(CI->getNextNode() && "Not a terminator, must have next!");
1845 Builder.SetInsertPoint(CI->getNextNode());
1846 Builder.SetCurrentDebugLocation(CI->getNextNode()->getDebugLoc());
1847 } else {
1848 auto *II = cast<InvokeInst>(Call);
1849
1850 // Insert the new invoke into the old block. We'll remove the old one in a
1851 // moment at which point this will become the new terminator for the
1852 // original block.
1853 InvokeInst *SPInvoke = Builder.CreateGCStatepointInvoke(
1854 StatepointID, NumPatchBytes, CallTarget, II->getNormalDest(),
1855 II->getUnwindDest(), Flags, CallArgs, TransitionArgs, DeoptArgs, GCArgs,
1856 "statepoint_token");
1857
1858 SPInvoke->setCallingConv(II->getCallingConv());
1859
1860 // Set up function attrs directly on statepoint and return attrs later for
1861 // gc_result intrinsic.
1862 SPInvoke->setAttributes(
1863 legalizeCallAttributes(II, IsMemIntrinsic, SPInvoke->getAttributes()));
1864
1865 Token = cast<GCStatepointInst>(SPInvoke);
1866
1867 // Generate gc relocates in exceptional path
1868 BasicBlock *UnwindBlock = II->getUnwindDest();
1869 assert(!isa<PHINode>(UnwindBlock->begin()) &&
1870 UnwindBlock->getUniquePredecessor() &&
1871 "can't safely insert in this block!");
1872
1873 Builder.SetInsertPoint(UnwindBlock, UnwindBlock->getFirstInsertionPt());
1874 Builder.SetCurrentDebugLocation(II->getDebugLoc());
1875
1876 // Attach exceptional gc relocates to the landingpad.
1877 Instruction *ExceptionalToken = UnwindBlock->getLandingPadInst();
1878 Result.UnwindToken = ExceptionalToken;
1879
1880 CreateGCRelocates(LiveVariables, BasePtrs, ExceptionalToken, Builder, GC);
1881
1882 // Generate gc relocates and returns for normal block
1883 BasicBlock *NormalDest = II->getNormalDest();
1884 assert(!isa<PHINode>(NormalDest->begin()) &&
1885 NormalDest->getUniquePredecessor() &&
1886 "can't safely insert in this block!");
1887
1888 Builder.SetInsertPoint(NormalDest, NormalDest->getFirstInsertionPt());
1889
1890 // gc relocates will be generated later as if it were regular call
1891 // statepoint
1892 }
1893 assert(Token && "Should be set in one of the above branches!");
1894
1895 if (IsDeoptimize) {
1896 // If we're wrapping an @llvm.experimental.deoptimize in a statepoint, we
1897 // transform the tail-call like structure to a call to a void function
1898 // followed by unreachable to get better codegen.
1899 Replacements.push_back(
1900 DeferredReplacement::createDeoptimizeReplacement(Call));
1901 } else {
1902 Token->setName("statepoint_token");
1903 if (!Call->getType()->isVoidTy() && !Call->use_empty()) {
1904 StringRef Name = Call->hasName() ? Call->getName() : "";
1905 CallInst *GCResult = Builder.CreateGCResult(Token, Call->getType(), Name);
1906 GCResult->setAttributes(
1908 Call->getAttributes().getRetAttrs()));
1909
1910 // We cannot RAUW or delete CS.getInstruction() because it could be in the
1911 // live set of some other safepoint, in which case that safepoint's
1912 // PartiallyConstructedSafepointRecord will hold a raw pointer to this
1913 // llvm::Instruction. Instead, we defer the replacement and deletion to
1914 // after the live sets have been made explicit in the IR, and we no longer
1915 // have raw pointers to worry about.
1916 Replacements.emplace_back(
1917 DeferredReplacement::createRAUW(Call, GCResult));
1918 } else {
1919 Replacements.emplace_back(DeferredReplacement::createDelete(Call));
1920 }
1921 }
1922
1923 Result.StatepointToken = Token;
1924
1925 // Second, create a gc.relocate for every live variable
1926 CreateGCRelocates(LiveVariables, BasePtrs, Token, Builder, GC);
1927}
1928
1929// Replace an existing gc.statepoint with a new one and a set of gc.relocates
1930// which make the relocations happening at this safepoint explicit.
1931//
1932// WARNING: Does not do any fixup to adjust users of the original live
1933// values. That's the callers responsibility.
1934static void
1936 PartiallyConstructedSafepointRecord &Result,
1937 std::vector<DeferredReplacement> &Replacements,
1938 const PointerToBaseTy &PointerToBase, GCStrategy *GC) {
1939 const auto &LiveSet = Result.LiveSet;
1940
1941 // Convert to vector for efficient cross referencing.
1942 SmallVector<Value *, 64> BaseVec, LiveVec;
1943 LiveVec.reserve(LiveSet.size());
1944 BaseVec.reserve(LiveSet.size());
1945 for (Value *L : LiveSet) {
1946 LiveVec.push_back(L);
1947 assert(PointerToBase.count(L));
1948 Value *Base = PointerToBase.find(L)->second;
1949 BaseVec.push_back(Base);
1950 }
1951 assert(LiveVec.size() == BaseVec.size());
1952
1953 // Do the actual rewriting and delete the old statepoint
1954 makeStatepointExplicitImpl(Call, BaseVec, LiveVec, Result, Replacements,
1955 PointerToBase, GC);
1956}
1957
1958// Helper function for the relocationViaAlloca.
1959//
1960// It receives iterator to the statepoint gc relocates and emits a store to the
1961// assigned location (via allocaMap) for the each one of them. It adds the
1962// visited values into the visitedLiveValues set, which we will later use them
1963// for validation checking.
1964static void
1967 DenseSet<Value *> &VisitedLiveValues) {
1968 for (User *U : GCRelocs) {
1969 GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U);
1970 if (!Relocate)
1971 continue;
1972
1973 Value *OriginalValue = Relocate->getDerivedPtr();
1974 assert(AllocaMap.count(OriginalValue));
1975 Value *Alloca = AllocaMap[OriginalValue];
1976
1977 // Emit store into the related alloca.
1978 assert(Relocate->getNextNode() &&
1979 "Should always have one since it's not a terminator");
1980 new StoreInst(Relocate, Alloca, std::next(Relocate->getIterator()));
1981
1982#ifndef NDEBUG
1983 VisitedLiveValues.insert(OriginalValue);
1984#endif
1985 }
1986}
1987
1988// Helper function for the "relocationViaAlloca". Similar to the
1989// "insertRelocationStores" but works for rematerialized values.
1991 const RematerializedValueMapTy &RematerializedValues,
1993 DenseSet<Value *> &VisitedLiveValues) {
1994 for (auto RematerializedValuePair: RematerializedValues) {
1995 Instruction *RematerializedValue = RematerializedValuePair.first;
1996 Value *OriginalValue = RematerializedValuePair.second;
1997
1998 assert(AllocaMap.count(OriginalValue) &&
1999 "Can not find alloca for rematerialized value");
2000 Value *Alloca = AllocaMap[OriginalValue];
2001
2002 new StoreInst(RematerializedValue, Alloca,
2003 std::next(RematerializedValue->getIterator()));
2004
2005#ifndef NDEBUG
2006 VisitedLiveValues.insert(OriginalValue);
2007#endif
2008 }
2009}
2010
2011/// Do all the relocation update via allocas and mem2reg
2015#ifndef NDEBUG
2016 // record initial number of (static) allocas; we'll check we have the same
2017 // number when we get done.
2018 int InitialAllocaNum = 0;
2019 for (Instruction &I : F.getEntryBlock())
2020 if (isa<AllocaInst>(I))
2021 InitialAllocaNum++;
2022#endif
2023
2024 // TODO-PERF: change data structures, reserve
2026 SmallVector<AllocaInst *, 200> PromotableAllocas;
2027 // Used later to chack that we have enough allocas to store all values
2028 std::size_t NumRematerializedValues = 0;
2029 PromotableAllocas.reserve(Live.size());
2030
2031 // Emit alloca for "LiveValue" and record it in "allocaMap" and
2032 // "PromotableAllocas"
2033 const DataLayout &DL = F.getParent()->getDataLayout();
2034 auto emitAllocaFor = [&](Value *LiveValue) {
2035 AllocaInst *Alloca =
2036 new AllocaInst(LiveValue->getType(), DL.getAllocaAddrSpace(), "",
2037 F.getEntryBlock().getFirstNonPHIIt());
2038 AllocaMap[LiveValue] = Alloca;
2039 PromotableAllocas.push_back(Alloca);
2040 };
2041
2042 // Emit alloca for each live gc pointer
2043 for (Value *V : Live)
2044 emitAllocaFor(V);
2045
2046 // Emit allocas for rematerialized values
2047 for (const auto &Info : Records)
2048 for (auto RematerializedValuePair : Info.RematerializedValues) {
2049 Value *OriginalValue = RematerializedValuePair.second;
2050 if (AllocaMap.contains(OriginalValue))
2051 continue;
2052
2053 emitAllocaFor(OriginalValue);
2054 ++NumRematerializedValues;
2055 }
2056
2057 // The next two loops are part of the same conceptual operation. We need to
2058 // insert a store to the alloca after the original def and at each
2059 // redefinition. We need to insert a load before each use. These are split
2060 // into distinct loops for performance reasons.
2061
2062 // Update gc pointer after each statepoint: either store a relocated value or
2063 // null (if no relocated value was found for this gc pointer and it is not a
2064 // gc_result). This must happen before we update the statepoint with load of
2065 // alloca otherwise we lose the link between statepoint and old def.
2066 for (const auto &Info : Records) {
2067 Value *Statepoint = Info.StatepointToken;
2068
2069 // This will be used for consistency check
2070 DenseSet<Value *> VisitedLiveValues;
2071
2072 // Insert stores for normal statepoint gc relocates
2073 insertRelocationStores(Statepoint->users(), AllocaMap, VisitedLiveValues);
2074
2075 // In case if it was invoke statepoint
2076 // we will insert stores for exceptional path gc relocates.
2077 if (isa<InvokeInst>(Statepoint)) {
2078 insertRelocationStores(Info.UnwindToken->users(), AllocaMap,
2079 VisitedLiveValues);
2080 }
2081
2082 // Do similar thing with rematerialized values
2083 insertRematerializationStores(Info.RematerializedValues, AllocaMap,
2084 VisitedLiveValues);
2085
2086 if (ClobberNonLive) {
2087 // As a debugging aid, pretend that an unrelocated pointer becomes null at
2088 // the gc.statepoint. This will turn some subtle GC problems into
2089 // slightly easier to debug SEGVs. Note that on large IR files with
2090 // lots of gc.statepoints this is extremely costly both memory and time
2091 // wise.
2093 for (auto Pair : AllocaMap) {
2094 Value *Def = Pair.first;
2095 AllocaInst *Alloca = Pair.second;
2096
2097 // This value was relocated
2098 if (VisitedLiveValues.count(Def)) {
2099 continue;
2100 }
2101 ToClobber.push_back(Alloca);
2102 }
2103
2104 auto InsertClobbersAt = [&](BasicBlock::iterator IP) {
2105 for (auto *AI : ToClobber) {
2106 auto AT = AI->getAllocatedType();
2107 Constant *CPN;
2108 if (AT->isVectorTy())
2110 else
2111 CPN = ConstantPointerNull::get(cast<PointerType>(AT));
2112 new StoreInst(CPN, AI, IP);
2113 }
2114 };
2115
2116 // Insert the clobbering stores. These may get intermixed with the
2117 // gc.results and gc.relocates, but that's fine.
2118 if (auto II = dyn_cast<InvokeInst>(Statepoint)) {
2119 InsertClobbersAt(II->getNormalDest()->getFirstInsertionPt());
2120 InsertClobbersAt(II->getUnwindDest()->getFirstInsertionPt());
2121 } else {
2122 InsertClobbersAt(
2123 std::next(cast<Instruction>(Statepoint)->getIterator()));
2124 }
2125 }
2126 }
2127
2128 // Update use with load allocas and add store for gc_relocated.
2129 for (auto Pair : AllocaMap) {
2130 Value *Def = Pair.first;
2131 AllocaInst *Alloca = Pair.second;
2132
2133 // We pre-record the uses of allocas so that we dont have to worry about
2134 // later update that changes the user information..
2135
2137 // PERF: trade a linear scan for repeated reallocation
2138 Uses.reserve(Def->getNumUses());
2139 for (User *U : Def->users()) {
2140 if (!isa<ConstantExpr>(U)) {
2141 // If the def has a ConstantExpr use, then the def is either a
2142 // ConstantExpr use itself or null. In either case
2143 // (recursively in the first, directly in the second), the oop
2144 // it is ultimately dependent on is null and this particular
2145 // use does not need to be fixed up.
2146 Uses.push_back(cast<Instruction>(U));
2147 }
2148 }
2149
2151 auto Last = std::unique(Uses.begin(), Uses.end());
2152 Uses.erase(Last, Uses.end());
2153
2154 for (Instruction *Use : Uses) {
2155 if (isa<PHINode>(Use)) {
2156 PHINode *Phi = cast<PHINode>(Use);
2157 for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) {
2158 if (Def == Phi->getIncomingValue(i)) {
2159 LoadInst *Load = new LoadInst(
2160 Alloca->getAllocatedType(), Alloca, "",
2161 Phi->getIncomingBlock(i)->getTerminator()->getIterator());
2162 Phi->setIncomingValue(i, Load);
2163 }
2164 }
2165 } else {
2166 LoadInst *Load = new LoadInst(Alloca->getAllocatedType(), Alloca, "",
2167 Use->getIterator());
2168 Use->replaceUsesOfWith(Def, Load);
2169 }
2170 }
2171
2172 // Emit store for the initial gc value. Store must be inserted after load,
2173 // otherwise store will be in alloca's use list and an extra load will be
2174 // inserted before it.
2175 StoreInst *Store = new StoreInst(Def, Alloca, /*volatile*/ false,
2176 DL.getABITypeAlign(Def->getType()));
2177 if (Instruction *Inst = dyn_cast<Instruction>(Def)) {
2178 if (InvokeInst *Invoke = dyn_cast<InvokeInst>(Inst)) {
2179 // InvokeInst is a terminator so the store need to be inserted into its
2180 // normal destination block.
2181 BasicBlock *NormalDest = Invoke->getNormalDest();
2182 Store->insertBefore(NormalDest->getFirstNonPHI());
2183 } else {
2184 assert(!Inst->isTerminator() &&
2185 "The only terminator that can produce a value is "
2186 "InvokeInst which is handled above.");
2187 Store->insertAfter(Inst);
2188 }
2189 } else {
2190 assert(isa<Argument>(Def));
2191 Store->insertAfter(cast<Instruction>(Alloca));
2192 }
2193 }
2194
2195 assert(PromotableAllocas.size() == Live.size() + NumRematerializedValues &&
2196 "we must have the same allocas with lives");
2197 (void) NumRematerializedValues;
2198 if (!PromotableAllocas.empty()) {
2199 // Apply mem2reg to promote alloca to SSA
2200 PromoteMemToReg(PromotableAllocas, DT);
2201 }
2202
2203#ifndef NDEBUG
2204 for (auto &I : F.getEntryBlock())
2205 if (isa<AllocaInst>(I))
2206 InitialAllocaNum--;
2207 assert(InitialAllocaNum == 0 && "We must not introduce any extra allocas");
2208#endif
2209}
2210
2211/// Implement a unique function which doesn't require we sort the input
2212/// vector. Doing so has the effect of changing the output of a couple of
2213/// tests in ways which make them less useful in testing fused safepoints.
2214template <typename T> static void unique_unsorted(SmallVectorImpl<T> &Vec) {
2215 SmallSet<T, 8> Seen;
2216 erase_if(Vec, [&](const T &V) { return !Seen.insert(V).second; });
2217}
2218
2219/// Insert holders so that each Value is obviously live through the entire
2220/// lifetime of the call.
2221static void insertUseHolderAfter(CallBase *Call, const ArrayRef<Value *> Values,
2222 SmallVectorImpl<CallInst *> &Holders) {
2223 if (Values.empty())
2224 // No values to hold live, might as well not insert the empty holder
2225 return;
2226
2227 Module *M = Call->getModule();
2228 // Use a dummy vararg function to actually hold the values live
2229 FunctionCallee Func = M->getOrInsertFunction(
2230 "__tmp_use", FunctionType::get(Type::getVoidTy(M->getContext()), true));
2231 if (isa<CallInst>(Call)) {
2232 // For call safepoints insert dummy calls right after safepoint
2233 Holders.push_back(
2234 CallInst::Create(Func, Values, "", std::next(Call->getIterator())));
2235 return;
2236 }
2237 // For invoke safepooints insert dummy calls both in normal and
2238 // exceptional destination blocks
2239 auto *II = cast<InvokeInst>(Call);
2241 Func, Values, "", II->getNormalDest()->getFirstInsertionPt()));
2243 Func, Values, "", II->getUnwindDest()->getFirstInsertionPt()));
2244}
2245
2249 GCStrategy *GC) {
2250 GCPtrLivenessData OriginalLivenessData;
2251 computeLiveInValues(DT, F, OriginalLivenessData, GC);
2252 for (size_t i = 0; i < records.size(); i++) {
2253 struct PartiallyConstructedSafepointRecord &info = records[i];
2254 analyzeParsePointLiveness(DT, OriginalLivenessData, toUpdate[i], info, GC);
2255 }
2256}
2257
2258// Helper function for the "rematerializeLiveValues". It walks use chain
2259// starting from the "CurrentValue" until it reaches the root of the chain, i.e.
2260// the base or a value it cannot process. Only "simple" values are processed
2261// (currently it is GEP's and casts). The returned root is examined by the
2262// callers of findRematerializableChainToBasePointer. Fills "ChainToBase" array
2263// with all visited values.
2265 SmallVectorImpl<Instruction*> &ChainToBase,
2266 Value *CurrentValue) {
2267 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurrentValue)) {
2268 ChainToBase.push_back(GEP);
2269 return findRematerializableChainToBasePointer(ChainToBase,
2270 GEP->getPointerOperand());
2271 }
2272
2273 if (CastInst *CI = dyn_cast<CastInst>(CurrentValue)) {
2274 if (!CI->isNoopCast(CI->getModule()->getDataLayout()))
2275 return CI;
2276
2277 ChainToBase.push_back(CI);
2278 return findRematerializableChainToBasePointer(ChainToBase,
2279 CI->getOperand(0));
2280 }
2281
2282 // We have reached the root of the chain, which is either equal to the base or
2283 // is the first unsupported value along the use chain.
2284 return CurrentValue;
2285}
2286
2287// Helper function for the "rematerializeLiveValues". Compute cost of the use
2288// chain we are going to rematerialize.
2289static InstructionCost
2293
2294 for (Instruction *Instr : Chain) {
2295 if (CastInst *CI = dyn_cast<CastInst>(Instr)) {
2296 assert(CI->isNoopCast(CI->getModule()->getDataLayout()) &&
2297 "non noop cast is found during rematerialization");
2298
2299 Type *SrcTy = CI->getOperand(0)->getType();
2300 Cost += TTI.getCastInstrCost(CI->getOpcode(), CI->getType(), SrcTy,
2303
2304 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Instr)) {
2305 // Cost of the address calculation
2306 Type *ValTy = GEP->getSourceElementType();
2308
2309 // And cost of the GEP itself
2310 // TODO: Use TTI->getGEPCost here (it exists, but appears to be not
2311 // allowed for the external usage)
2312 if (!GEP->hasAllConstantIndices())
2313 Cost += 2;
2314
2315 } else {
2316 llvm_unreachable("unsupported instruction type during rematerialization");
2317 }
2318 }
2319
2320 return Cost;
2321}
2322
2323static bool AreEquivalentPhiNodes(PHINode &OrigRootPhi, PHINode &AlternateRootPhi) {
2324 unsigned PhiNum = OrigRootPhi.getNumIncomingValues();
2325 if (PhiNum != AlternateRootPhi.getNumIncomingValues() ||
2326 OrigRootPhi.getParent() != AlternateRootPhi.getParent())
2327 return false;
2328 // Map of incoming values and their corresponding basic blocks of
2329 // OrigRootPhi.
2330 SmallDenseMap<Value *, BasicBlock *, 8> CurrentIncomingValues;
2331 for (unsigned i = 0; i < PhiNum; i++)
2332 CurrentIncomingValues[OrigRootPhi.getIncomingValue(i)] =
2333 OrigRootPhi.getIncomingBlock(i);
2334
2335 // Both current and base PHIs should have same incoming values and
2336 // the same basic blocks corresponding to the incoming values.
2337 for (unsigned i = 0; i < PhiNum; i++) {
2338 auto CIVI =
2339 CurrentIncomingValues.find(AlternateRootPhi.getIncomingValue(i));
2340 if (CIVI == CurrentIncomingValues.end())
2341 return false;
2342 BasicBlock *CurrentIncomingBB = CIVI->second;
2343 if (CurrentIncomingBB != AlternateRootPhi.getIncomingBlock(i))
2344 return false;
2345 }
2346 return true;
2347}
2348
2349// Find derived pointers that can be recomputed cheap enough and fill
2350// RematerizationCandidates with such candidates.
2351static void
2352findRematerializationCandidates(PointerToBaseTy PointerToBase,
2353 RematCandTy &RematerizationCandidates,
2355 const unsigned int ChainLengthThreshold = 10;
2356
2357 for (auto P2B : PointerToBase) {
2358 auto *Derived = P2B.first;
2359 auto *Base = P2B.second;
2360 // Consider only derived pointers.
2361 if (Derived == Base)
2362 continue;
2363
2364 // For each live pointer find its defining chain.
2366 Value *RootOfChain =
2367 findRematerializableChainToBasePointer(ChainToBase, Derived);
2368
2369 // Nothing to do, or chain is too long
2370 if ( ChainToBase.size() == 0 ||
2371 ChainToBase.size() > ChainLengthThreshold)
2372 continue;
2373
2374 // Handle the scenario where the RootOfChain is not equal to the
2375 // Base Value, but they are essentially the same phi values.
2376 if (RootOfChain != PointerToBase[Derived]) {
2377 PHINode *OrigRootPhi = dyn_cast<PHINode>(RootOfChain);
2378 PHINode *AlternateRootPhi = dyn_cast<PHINode>(PointerToBase[Derived]);
2379 if (!OrigRootPhi || !AlternateRootPhi)
2380 continue;
2381 // PHI nodes that have the same incoming values, and belonging to the same
2382 // basic blocks are essentially the same SSA value. When the original phi
2383 // has incoming values with different base pointers, the original phi is
2384 // marked as conflict, and an additional `AlternateRootPhi` with the same
2385 // incoming values get generated by the findBasePointer function. We need
2386 // to identify the newly generated AlternateRootPhi (.base version of phi)
2387 // and RootOfChain (the original phi node itself) are the same, so that we
2388 // can rematerialize the gep and casts. This is a workaround for the
2389 // deficiency in the findBasePointer algorithm.
2390 if (!AreEquivalentPhiNodes(*OrigRootPhi, *AlternateRootPhi))
2391 continue;
2392 }
2393 // Compute cost of this chain.
2395 // TODO: We can also account for cases when we will be able to remove some
2396 // of the rematerialized values by later optimization passes. I.e if
2397 // we rematerialized several intersecting chains. Or if original values
2398 // don't have any uses besides this statepoint.
2399
2400 // Ok, there is a candidate.
2401 RematerizlizationCandidateRecord Record;
2402 Record.ChainToBase = ChainToBase;
2403 Record.RootOfChain = RootOfChain;
2404 Record.Cost = Cost;
2405 RematerizationCandidates.insert({ Derived, Record });
2406 }
2407}
2408
2409// Try to rematerialize derived pointers immediately before their uses
2410// (instead of rematerializing after every statepoint it is live through).
2411// This can be beneficial when derived pointer is live across many
2412// statepoints, but uses are rare.
2414 RematCandTy &RematerizationCandidates,
2416 PointerToBaseTy &PointerToBase) {
2417 if (!RematDerivedAtUses)
2418 return;
2419
2420 SmallVector<Instruction *, 32> LiveValuesToBeDeleted;
2421
2422 LLVM_DEBUG(dbgs() << "Rematerialize derived pointers at uses, "
2423 << "Num statepoints: " << Records.size() << '\n');
2424
2425 for (auto &It : RematerizationCandidates) {
2426 Instruction *Cand = cast<Instruction>(It.first);
2427 auto &Record = It.second;
2428
2430 continue;
2431
2432 if (Cand->user_empty())
2433 continue;
2434
2435 if (Cand->hasOneUse())
2436 if (auto *U = dyn_cast<Instruction>(Cand->getUniqueUndroppableUser()))
2437 if (U->getParent() == Cand->getParent())
2438 continue;
2439
2440 // Rematerialization before PHI nodes is not implemented.
2441 if (llvm::any_of(Cand->users(),
2442 [](const auto *U) { return isa<PHINode>(U); }))
2443 continue;
2444
2445 LLVM_DEBUG(dbgs() << "Trying cand " << *Cand << " ... ");
2446
2447 // Count of rematerialization instructions we introduce is equal to number
2448 // of candidate uses.
2449 // Count of rematerialization instructions we eliminate is equal to number
2450 // of statepoints it is live through.
2451 // Consider transformation profitable if latter is greater than former
2452 // (in other words, we create less than eliminate).
2453 unsigned NumLiveStatepoints = llvm::count_if(
2454 Records, [Cand](const auto &R) { return R.LiveSet.contains(Cand); });
2455 unsigned NumUses = Cand->getNumUses();
2456
2457 LLVM_DEBUG(dbgs() << "Num uses: " << NumUses << " Num live statepoints: "
2458 << NumLiveStatepoints << " ");
2459
2460 if (NumLiveStatepoints < NumUses) {
2461 LLVM_DEBUG(dbgs() << "not profitable\n");
2462 continue;
2463 }
2464
2465 // If rematerialization is 'free', then favor rematerialization at
2466 // uses as it generally shortens live ranges.
2467 // TODO: Short (size ==1) chains only?
2468 if (NumLiveStatepoints == NumUses && Record.Cost > 0) {
2469 LLVM_DEBUG(dbgs() << "not profitable\n");
2470 continue;
2471 }
2472
2473 LLVM_DEBUG(dbgs() << "looks profitable\n");
2474
2475 // ChainToBase may contain another remat candidate (as a sub chain) which
2476 // has been rewritten by now. Need to recollect chain to have up to date
2477 // value.
2478 // TODO: sort records in findRematerializationCandidates() in
2479 // decreasing chain size order?
2480 if (Record.ChainToBase.size() > 1) {
2481 Record.ChainToBase.clear();
2483 }
2484
2485 // Current rematerialization algorithm is very simple: we rematerialize
2486 // immediately before EVERY use, even if there are several uses in same
2487 // block or if use is local to Cand Def. The reason is that this allows
2488 // us to avoid recomputing liveness without complicated analysis:
2489 // - If we did not eliminate all uses of original Candidate, we do not
2490 // know exaclty in what BBs it is still live.
2491 // - If we rematerialize once per BB, we need to find proper insertion
2492 // place (first use in block, but after Def) and analyze if there is
2493 // statepoint between uses in the block.
2494 while (!Cand->user_empty()) {
2495 Instruction *UserI = cast<Instruction>(*Cand->user_begin());
2496 Instruction *RematChain = rematerializeChain(
2497 Record.ChainToBase, UserI, Record.RootOfChain, PointerToBase[Cand]);
2498 UserI->replaceUsesOfWith(Cand, RematChain);
2499 PointerToBase[RematChain] = PointerToBase[Cand];
2500 }
2501 LiveValuesToBeDeleted.push_back(Cand);
2502 }
2503
2504 LLVM_DEBUG(dbgs() << "Rematerialized " << LiveValuesToBeDeleted.size()
2505 << " derived pointers\n");
2506 for (auto *Cand : LiveValuesToBeDeleted) {
2507 assert(Cand->use_empty() && "Unexpected user remain");
2508 RematerizationCandidates.erase(Cand);
2509 for (auto &R : Records) {
2510 assert(!R.LiveSet.contains(Cand) ||
2511 R.LiveSet.contains(PointerToBase[Cand]));
2512 R.LiveSet.remove(Cand);
2513 }
2514 }
2515
2516 // Recollect not rematerialized chains - we might have rewritten
2517 // their sub-chains.
2518 if (!LiveValuesToBeDeleted.empty()) {
2519 for (auto &P : RematerizationCandidates) {
2520 auto &R = P.second;
2521 if (R.ChainToBase.size() > 1) {
2522 R.ChainToBase.clear();
2523 findRematerializableChainToBasePointer(R.ChainToBase, P.first);
2524 }
2525 }
2526 }
2527}
2528
2529// From the statepoint live set pick values that are cheaper to recompute then
2530// to relocate. Remove this values from the live set, rematerialize them after
2531// statepoint and record them in "Info" structure. Note that similar to
2532// relocated values we don't do any user adjustments here.
2534 PartiallyConstructedSafepointRecord &Info,
2535 PointerToBaseTy &PointerToBase,
2536 RematCandTy &RematerizationCandidates,
2538 // Record values we are going to delete from this statepoint live set.
2539 // We can not di this in following loop due to iterator invalidation.
2540 SmallVector<Value *, 32> LiveValuesToBeDeleted;
2541
2542 for (Value *LiveValue : Info.LiveSet) {
2543 auto It = RematerizationCandidates.find(LiveValue);
2544 if (It == RematerizationCandidates.end())
2545 continue;
2546
2547 RematerizlizationCandidateRecord &Record = It->second;
2548
2550 // For invokes we need to rematerialize each chain twice - for normal and
2551 // for unwind basic blocks. Model this by multiplying cost by two.
2552 if (isa<InvokeInst>(Call))
2553 Cost *= 2;
2554
2555 // If it's too expensive - skip it.
2557 continue;
2558
2559 // Remove value from the live set
2560 LiveValuesToBeDeleted.push_back(LiveValue);
2561
2562 // Clone instructions and record them inside "Info" structure.
2563
2564 // Different cases for calls and invokes. For invokes we need to clone
2565 // instructions both on normal and unwind path.
2566 if (isa<CallInst>(Call)) {
2567 Instruction *InsertBefore = Call->getNextNode();
2568 assert(InsertBefore);
2569 Instruction *RematerializedValue =
2570 rematerializeChain(Record.ChainToBase, InsertBefore,
2571 Record.RootOfChain, PointerToBase[LiveValue]);
2572 Info.RematerializedValues[RematerializedValue] = LiveValue;
2573 } else {
2574 auto *Invoke = cast<InvokeInst>(Call);
2575
2576 Instruction *NormalInsertBefore =
2577 &*Invoke->getNormalDest()->getFirstInsertionPt();
2578 Instruction *UnwindInsertBefore =
2579 &*Invoke->getUnwindDest()->getFirstInsertionPt();
2580
2581 Instruction *NormalRematerializedValue =
2582 rematerializeChain(Record.ChainToBase, NormalInsertBefore,
2583 Record.RootOfChain, PointerToBase[LiveValue]);
2584 Instruction *UnwindRematerializedValue =
2585 rematerializeChain(Record.ChainToBase, UnwindInsertBefore,
2586 Record.RootOfChain, PointerToBase[LiveValue]);
2587
2588 Info.RematerializedValues[NormalRematerializedValue] = LiveValue;
2589 Info.RematerializedValues[UnwindRematerializedValue] = LiveValue;
2590 }
2591 }
2592
2593 // Remove rematerialized values from the live set.
2594 for (auto *LiveValue: LiveValuesToBeDeleted) {
2595 Info.LiveSet.remove(LiveValue);
2596 }
2597}
2598
2600 SmallVectorImpl<CallInst *> &Intrinsics,
2601 DefiningValueMapTy &DVCache,
2602 IsKnownBaseMapTy &KnownBases) {
2603 auto &Context = F.getContext();
2604 auto &DL = F.getParent()->getDataLayout();
2605 bool Changed = false;
2606
2607 for (auto *Callsite : Intrinsics)
2608 switch (Callsite->getIntrinsicID()) {
2609 case Intrinsic::experimental_gc_get_pointer_base: {
2610 Changed = true;
2611 Value *Base =
2612 findBasePointer(Callsite->getOperand(0), DVCache, KnownBases);
2613 assert(!DVCache.count(Callsite));
2614 Callsite->replaceAllUsesWith(Base);
2615 if (!Base->hasName())
2616 Base->takeName(Callsite);
2617 Callsite->eraseFromParent();
2618 break;
2619 }
2620 case Intrinsic::experimental_gc_get_pointer_offset: {
2621 Changed = true;
2622 Value *Derived = Callsite->getOperand(0);
2623 Value *Base = findBasePointer(Derived, DVCache, KnownBases);
2624 assert(!DVCache.count(Callsite));
2625 unsigned AddressSpace = Derived->getType()->getPointerAddressSpace();
2626 unsigned IntPtrSize = DL.getPointerSizeInBits(AddressSpace);
2627 IRBuilder<> Builder(Callsite);
2628 Value *BaseInt =
2629 Builder.CreatePtrToInt(Base, Type::getIntNTy(Context, IntPtrSize),
2630 suffixed_name_or(Base, ".int", ""));
2631 Value *DerivedInt =
2632 Builder.CreatePtrToInt(Derived, Type::getIntNTy(Context, IntPtrSize),
2633 suffixed_name_or(Derived, ".int", ""));
2634 Value *Offset = Builder.CreateSub(DerivedInt, BaseInt);
2635 Callsite->replaceAllUsesWith(Offset);
2636 Offset->takeName(Callsite);
2637 Callsite->eraseFromParent();
2638 break;
2639 }
2640 default:
2641 llvm_unreachable("Unknown intrinsic");
2642 }
2643
2644 return Changed;
2645}
2646
2650 DefiningValueMapTy &DVCache,
2651 IsKnownBaseMapTy &KnownBases) {
2652 std::unique_ptr<GCStrategy> GC = findGCStrategy(F);
2653
2654#ifndef NDEBUG
2655 // Validate the input
2656 std::set<CallBase *> Uniqued;
2657 Uniqued.insert(ToUpdate.begin(), ToUpdate.end());
2658 assert(Uniqued.size() == ToUpdate.size() && "no duplicates please!");
2659
2660 for (CallBase *Call : ToUpdate)
2661 assert(Call->getFunction() == &F);
2662#endif
2663
2664 // When inserting gc.relocates for invokes, we need to be able to insert at
2665 // the top of the successor blocks. See the comment on
2666 // normalForInvokeSafepoint on exactly what is needed. Note that this step
2667 // may restructure the CFG.
2668 for (CallBase *Call : ToUpdate) {
2669 auto *II = dyn_cast<InvokeInst>(Call);
2670 if (!II)
2671 continue;
2672 normalizeForInvokeSafepoint(II->getNormalDest(), II->getParent(), DT);
2673 normalizeForInvokeSafepoint(II->getUnwindDest(), II->getParent(), DT);
2674 }
2675
2676 // A list of dummy calls added to the IR to keep various values obviously
2677 // live in the IR. We'll remove all of these when done.
2679
2680 // Insert a dummy call with all of the deopt operands we'll need for the
2681 // actual safepoint insertion as arguments. This ensures reference operands
2682 // in the deopt argument list are considered live through the safepoint (and
2683 // thus makes sure they get relocated.)
2684 for (CallBase *Call : ToUpdate) {
2685 SmallVector<Value *, 64> DeoptValues;
2686
2687 for (Value *Arg : GetDeoptBundleOperands(Call)) {
2688 assert(!isUnhandledGCPointerType(Arg->getType(), GC.get()) &&
2689 "support for FCA unimplemented");
2690 if (isHandledGCPointerType(Arg->getType(), GC.get()))
2691 DeoptValues.push_back(Arg);
2692 }
2693
2694 insertUseHolderAfter(Call, DeoptValues, Holders);
2695 }
2696
2698
2699 // A) Identify all gc pointers which are statically live at the given call
2700 // site.
2701 findLiveReferences(F, DT, ToUpdate, Records, GC.get());
2702
2703 /// Global mapping from live pointers to a base-defining-value.
2704 PointerToBaseTy PointerToBase;
2705
2706 // B) Find the base pointers for each live pointer
2707 for (size_t i = 0; i < Records.size(); i++) {
2708 PartiallyConstructedSafepointRecord &info = Records[i];
2709 findBasePointers(DT, DVCache, ToUpdate[i], info, PointerToBase, KnownBases);
2710 }
2711 if (PrintBasePointers) {
2712 errs() << "Base Pairs (w/o Relocation):\n";
2713 for (auto &Pair : PointerToBase) {
2714 errs() << " derived ";
2715 Pair.first->printAsOperand(errs(), false);
2716 errs() << " base ";
2717 Pair.second->printAsOperand(errs(), false);
2718 errs() << "\n";
2719 ;
2720 }
2721 }
2722
2723 // The base phi insertion logic (for any safepoint) may have inserted new
2724 // instructions which are now live at some safepoint. The simplest such
2725 // example is:
2726 // loop:
2727 // phi a <-- will be a new base_phi here
2728 // safepoint 1 <-- that needs to be live here
2729 // gep a + 1
2730 // safepoint 2
2731 // br loop
2732 // We insert some dummy calls after each safepoint to definitely hold live
2733 // the base pointers which were identified for that safepoint. We'll then
2734 // ask liveness for _every_ base inserted to see what is now live. Then we
2735 // remove the dummy calls.
2736 Holders.reserve(Holders.size() + Records.size());
2737 for (size_t i = 0; i < Records.size(); i++) {
2738 PartiallyConstructedSafepointRecord &Info = Records[i];
2739
2741 for (auto *Derived : Info.LiveSet) {
2742 assert(PointerToBase.count(Derived) && "Missed base for derived pointer");
2743 Bases.push_back(PointerToBase[Derived]);
2744 }
2745
2746 insertUseHolderAfter(ToUpdate[i], Bases, Holders);
2747 }
2748
2749 // By selecting base pointers, we've effectively inserted new uses. Thus, we
2750 // need to rerun liveness. We may *also* have inserted new defs, but that's
2751 // not the key issue.
2752 recomputeLiveInValues(F, DT, ToUpdate, Records, PointerToBase, GC.get());
2753
2754 if (PrintBasePointers) {
2755 errs() << "Base Pairs: (w/Relocation)\n";
2756 for (auto Pair : PointerToBase) {
2757 errs() << " derived ";
2758 Pair.first->printAsOperand(errs(), false);
2759 errs() << " base ";
2760 Pair.second->printAsOperand(errs(), false);
2761 errs() << "\n";
2762 }
2763 }
2764
2765 // It is possible that non-constant live variables have a constant base. For
2766 // example, a GEP with a variable offset from a global. In this case we can
2767 // remove it from the liveset. We already don't add constants to the liveset
2768 // because we assume they won't move at runtime and the GC doesn't need to be
2769 // informed about them. The same reasoning applies if the base is constant.
2770 // Note that the relocation placement code relies on this filtering for
2771 // correctness as it expects the base to be in the liveset, which isn't true
2772 // if the base is constant.
2773 for (auto &Info : Records) {
2774 Info.LiveSet.remove_if([&](Value *LiveV) {
2775 assert(PointerToBase.count(LiveV) && "Missed base for derived pointer");
2776 return isa<Constant>(PointerToBase[LiveV]);
2777 });
2778 }
2779
2780 for (CallInst *CI : Holders)
2781 CI->eraseFromParent();
2782
2783 Holders.clear();
2784
2785 // Compute the cost of possible re-materialization of derived pointers.
2786 RematCandTy RematerizationCandidates;
2787 findRematerializationCandidates(PointerToBase, RematerizationCandidates, TTI);
2788
2789 // In order to reduce live set of statepoint we might choose to rematerialize
2790 // some values instead of relocating them. This is purely an optimization and
2791 // does not influence correctness.
2792 // First try rematerialization at uses, then after statepoints.
2793 rematerializeLiveValuesAtUses(RematerizationCandidates, Records,
2794 PointerToBase);
2795 for (size_t i = 0; i < Records.size(); i++)
2796 rematerializeLiveValues(ToUpdate[i], Records[i], PointerToBase,
2797 RematerizationCandidates, TTI);
2798
2799 // We need this to safely RAUW and delete call or invoke return values that
2800 // may themselves be live over a statepoint. For details, please see usage in
2801 // makeStatepointExplicitImpl.
2802 std::vector<DeferredReplacement> Replacements;
2803
2804 // Now run through and replace the existing statepoints with new ones with
2805 // the live variables listed. We do not yet update uses of the values being
2806 // relocated. We have references to live variables that need to
2807 // survive to the last iteration of this loop. (By construction, the
2808 // previous statepoint can not be a live variable, thus we can and remove
2809 // the old statepoint calls as we go.)
2810 for (size_t i = 0; i < Records.size(); i++)
2811 makeStatepointExplicit(DT, ToUpdate[i], Records[i], Replacements,
2812 PointerToBase, GC.get());
2813
2814 ToUpdate.clear(); // prevent accident use of invalid calls.
2815
2816 for (auto &PR : Replacements)
2817 PR.doReplacement();
2818
2819 Replacements.clear();
2820
2821 for (auto &Info : Records) {
2822 // These live sets may contain state Value pointers, since we replaced calls
2823 // with operand bundles with calls wrapped in gc.statepoint, and some of
2824 // those calls may have been def'ing live gc pointers. Clear these out to
2825 // avoid accidentally using them.
2826 //
2827 // TODO: We should create a separate data structure that does not contain
2828 // these live sets, and migrate to using that data structure from this point
2829 // onward.
2830 Info.LiveSet.clear();
2831 }
2832 PointerToBase.clear();
2833
2834 // Do all the fixups of the original live variables to their relocated selves
2836 for (const PartiallyConstructedSafepointRecord &Info : Records) {
2837 // We can't simply save the live set from the original insertion. One of
2838 // the live values might be the result of a call which needs a safepoint.
2839 // That Value* no longer exists and we need to use the new gc_result.
2840 // Thankfully, the live set is embedded in the statepoint (and updated), so
2841 // we just grab that.
2842 llvm::append_range(Live, Info.StatepointToken->gc_args());
2843#ifndef NDEBUG
2844 // Do some basic validation checking on our liveness results before
2845 // performing relocation. Relocation can and will turn mistakes in liveness
2846 // results into non-sensical code which is must harder to debug.
2847 // TODO: It would be nice to test consistency as well
2848 assert(DT.isReachableFromEntry(Info.StatepointToken->getParent()) &&
2849 "statepoint must be reachable or liveness is meaningless");
2850 for (Value *V : Info.StatepointToken->gc_args()) {
2851 if (!isa<Instruction>(V))
2852 // Non-instruction values trivial dominate all possible uses
2853 continue;
2854 auto *LiveInst = cast<Instruction>(V);
2855 assert(DT.isReachableFromEntry(LiveInst->getParent()) &&
2856 "unreachable values should never be live");
2857 assert(DT.dominates(LiveInst, Info.StatepointToken) &&
2858 "basic SSA liveness expectation violated by liveness analysis");
2859 }
2860#endif
2861 }
2863
2864#ifndef NDEBUG
2865 // Validation check
2866 for (auto *Ptr : Live)
2867 assert(isHandledGCPointerType(Ptr->getType(), GC.get()) &&
2868 "must be a gc pointer type");
2869#endif
2870
2871 relocationViaAlloca(F, DT, Live, Records);
2872 return !Records.empty();
2873}
2874
2875// List of all parameter and return attributes which must be stripped when
2876// lowering from the abstract machine model. Note that we list attributes
2877// here which aren't valid as return attributes, that is okay.
2879 AttributeMask R;
2880 R.addAttribute(Attribute::Dereferenceable);
2881 R.addAttribute(Attribute::DereferenceableOrNull);
2882 R.addAttribute(Attribute::ReadNone);
2883 R.addAttribute(Attribute::ReadOnly);
2884 R.addAttribute(Attribute::WriteOnly);
2885 R.addAttribute(Attribute::NoAlias);
2886 R.addAttribute(Attribute::NoFree);
2887 return R;
2888}
2889
2891 LLVMContext &Ctx = F.getContext();
2892
2893 // Intrinsics are very delicate. Lowering sometimes depends the presence
2894 // of certain attributes for correctness, but we may have also inferred
2895 // additional ones in the abstract machine model which need stripped. This
2896 // assumes that the attributes defined in Intrinsic.td are conservatively
2897 // correct for both physical and abstract model.
2898 if (Intrinsic::ID id = F.getIntrinsicID()) {
2899 F.setAttributes(Intrinsic::getAttributes(Ctx, id));
2900 return;
2901 }
2902
2904 for (Argument &A : F.args())
2905 if (isa<PointerType>(A.getType()))
2906 F.removeParamAttrs(A.getArgNo(), R);
2907
2908 if (isa<PointerType>(F.getReturnType()))
2909 F.removeRetAttrs(R);
2910
2911 for (auto Attr : FnAttrsToStrip)
2912 F.removeFnAttr(Attr);
2913}
2914
2915/// Certain metadata on instructions are invalid after running RS4GC.
2916/// Optimizations that run after RS4GC can incorrectly use this metadata to
2917/// optimize functions. We drop such metadata on the instruction.
2919 if (!isa<LoadInst>(I) && !isa<StoreInst>(I))
2920 return;
2921 // These are the attributes that are still valid on loads and stores after
2922 // RS4GC.
2923 // The metadata implying dereferenceability and noalias are (conservatively)
2924 // dropped. This is because semantically, after RewriteStatepointsForGC runs,
2925 // all calls to gc.statepoint "free" the entire heap. Also, gc.statepoint can
2926 // touch the entire heap including noalias objects. Note: The reasoning is
2927 // same as stripping the dereferenceability and noalias attributes that are
2928 // analogous to the metadata counterparts.
2929 // We also drop the invariant.load metadata on the load because that metadata
2930 // implies the address operand to the load points to memory that is never
2931 // changed once it became dereferenceable. This is no longer true after RS4GC.
2932 // Similar reasoning applies to invariant.group metadata, which applies to
2933 // loads within a group.
2934 unsigned ValidMetadataAfterRS4GC[] = {LLVMContext::MD_tbaa,
2935 LLVMContext::MD_range,
2936 LLVMContext::MD_alias_scope,
2937 LLVMContext::MD_nontemporal,
2938 LLVMContext::MD_nonnull,
2939 LLVMContext::MD_align,
2940 LLVMContext::MD_type};
2941
2942 // Drops all metadata on the instruction other than ValidMetadataAfterRS4GC.
2943 I.dropUnknownNonDebugMetadata(ValidMetadataAfterRS4GC);
2944}
2945
2947 if (F.empty())
2948 return;
2949
2950 LLVMContext &Ctx = F.getContext();
2951 MDBuilder Builder(Ctx);
2952
2953 // Set of invariantstart instructions that we need to remove.
2954 // Use this to avoid invalidating the instruction iterator.
2955 SmallVector<IntrinsicInst*, 12> InvariantStartInstructions;
2956
2957 for (Instruction &I : instructions(F)) {
2958 // invariant.start on memory location implies that the referenced memory
2959 // location is constant and unchanging. This is no longer true after
2960 // RewriteStatepointsForGC runs because there can be calls to gc.statepoint
2961 // which frees the entire heap and the presence of invariant.start allows
2962 // the optimizer to sink the load of a memory location past a statepoint,
2963 // which is incorrect.
2964 if (auto *II = dyn_cast<IntrinsicInst>(&I))
2965 if (II->getIntrinsicID() == Intrinsic::invariant_start) {
2966 InvariantStartInstructions.push_back(II);
2967 continue;
2968 }
2969
2970 if (MDNode *Tag = I.getMetadata(LLVMContext::MD_tbaa)) {
2971 MDNode *MutableTBAA = Builder.createMutableTBAAAccessTag(Tag);
2972 I.setMetadata(LLVMContext::MD_tbaa, MutableTBAA);
2973 }
2974
2976
2978 if (auto *Call = dyn_cast<CallBase>(&I)) {
2979 for (int i = 0, e = Call->arg_size(); i != e; i++)
2980 if (isa<PointerType>(Call->getArgOperand(i)->getType()))
2981 Call->removeParamAttrs(i, R);
2982 if (isa<PointerType>(Call->getType()))
2983 Call->removeRetAttrs(R);
2984 }
2985 }
2986
2987 // Delete the invariant.start instructions and RAUW poison.
2988 for (auto *II : InvariantStartInstructions) {
2989 II->replaceAllUsesWith(PoisonValue::get(II->getType()));
2990 II->eraseFromParent();
2991 }
2992}
2993
2994/// Looks up the GC strategy for a given function, returning null if the
2995/// function doesn't have a GC tag. The strategy is stored in the cache.
2996static std::unique_ptr<GCStrategy> findGCStrategy(Function &F) {
2997 if (!F.hasGC())
2998 return nullptr;
2999
3000 return getGCStrategy(F.getGC());
3001}
3002
3003/// Returns true if this function should be rewritten by this pass. The main
3004/// point of this function is as an extension point for custom logic.
3006 if (!F.hasGC())
3007 return false;
3008
3009 std::unique_ptr<GCStrategy> Strategy = findGCStrategy(F);
3010
3011 assert(Strategy && "GC strategy is required by function, but was not found");
3012
3013 return Strategy->useRS4GC();
3014}
3015
3016static void stripNonValidData(Module &M) {
3017#ifndef NDEBUG
3018 assert(llvm::any_of(M, shouldRewriteStatepointsIn) && "precondition!");
3019#endif
3020
3021 for (Function &F : M)
3023
3024 for (Function &F : M)
3026}
3027
3030 const TargetLibraryInfo &TLI) {
3031 assert(!F.isDeclaration() && !F.empty() &&
3032 "need function body to rewrite statepoints in");
3033 assert(shouldRewriteStatepointsIn(F) && "mismatch in rewrite decision");
3034
3035 auto NeedsRewrite = [&TLI](Instruction &I) {
3036 if (const auto *Call = dyn_cast<CallBase>(&I)) {
3037 if (isa<GCStatepointInst>(Call))
3038 return false;
3039 if (callsGCLeafFunction(Call, TLI))
3040 return false;
3041
3042 // Normally it's up to the frontend to make sure that non-leaf calls also
3043 // have proper deopt state if it is required. We make an exception for
3044 // element atomic memcpy/memmove intrinsics here. Unlike other intrinsics
3045 // these are non-leaf by default. They might be generated by the optimizer
3046 // which doesn't know how to produce a proper deopt state. So if we see a
3047 // non-leaf memcpy/memmove without deopt state just treat it as a leaf
3048 // copy and don't produce a statepoint.
3050 !Call->getOperandBundle(LLVMContext::OB_deopt)) {
3051 assert((isa<AtomicMemCpyInst>(Call) || isa<AtomicMemMoveInst>(Call)) &&
3052 "Don't expect any other calls here!");
3053 return false;
3054 }
3055 return true;
3056 }
3057 return false;
3058 };
3059
3060 // Delete any unreachable statepoints so that we don't have unrewritten
3061 // statepoints surviving this pass. This makes testing easier and the
3062 // resulting IR less confusing to human readers.
3064 bool MadeChange = removeUnreachableBlocks(F, &DTU);
3065 // Flush the Dominator Tree.
3066 DTU.getDomTree();
3067
3068 // Gather all the statepoints which need rewritten. Be careful to only
3069 // consider those in reachable code since we need to ask dominance queries
3070 // when rewriting. We'll delete the unreachable ones in a moment.
3071 SmallVector<CallBase *, 64> ParsePointNeeded;
3072 SmallVector<CallInst *, 64> Intrinsics;
3073 for (Instruction &I : instructions(F)) {
3074 // TODO: only the ones with the flag set!
3075 if (NeedsRewrite(I)) {
3076 // NOTE removeUnreachableBlocks() is stronger than
3077 // DominatorTree::isReachableFromEntry(). In other words
3078 // removeUnreachableBlocks can remove some blocks for which
3079 // isReachableFromEntry() returns true.
3080 assert(DT.isReachableFromEntry(I.getParent()) &&
3081 "no unreachable blocks expected");
3082 ParsePointNeeded.push_back(cast<CallBase>(&I));
3083 }
3084 if (auto *CI = dyn_cast<CallInst>(&I))
3085 if (CI->getIntrinsicID() == Intrinsic::experimental_gc_get_pointer_base ||
3086 CI->getIntrinsicID() == Intrinsic::experimental_gc_get_pointer_offset)
3087 Intrinsics.emplace_back(CI);
3088 }
3089
3090 // Return early if no work to do.
3091 if (ParsePointNeeded.empty() && Intrinsics.empty())
3092 return MadeChange;
3093
3094 // As a prepass, go ahead and aggressively destroy single entry phi nodes.
3095 // These are created by LCSSA. They have the effect of increasing the size
3096 // of liveness sets for no good reason. It may be harder to do this post
3097 // insertion since relocations and base phis can confuse things.
3098 for (BasicBlock &BB : F)
3099 if (BB.getUniquePredecessor())
3100 MadeChange |= FoldSingleEntryPHINodes(&BB);
3101
3102 // Before we start introducing relocations, we want to tweak the IR a bit to
3103 // avoid unfortunate code generation effects. The main example is that we
3104 // want to try to make sure the comparison feeding a branch is after any
3105 // safepoints. Otherwise, we end up with a comparison of pre-relocation
3106 // values feeding a branch after relocation. This is semantically correct,
3107 // but results in extra register pressure since both the pre-relocation and
3108 // post-relocation copies must be available in registers. For code without
3109 // relocations this is handled elsewhere, but teaching the scheduler to
3110 // reverse the transform we're about to do would be slightly complex.
3111 // Note: This may extend the live range of the inputs to the icmp and thus
3112 // increase the liveset of any statepoint we move over. This is profitable
3113 // as long as all statepoints are in rare blocks. If we had in-register
3114 // lowering for live values this would be a much safer transform.
3115 auto getConditionInst = [](Instruction *TI) -> Instruction * {
3116 if (auto *BI = dyn_cast<BranchInst>(TI))
3117 if (BI->isConditional())
3118 return dyn_cast<Instruction>(BI->getCondition());
3119 // TODO: Extend this to handle switches
3120 return nullptr;
3121 };
3122 for (BasicBlock &BB : F) {
3123 Instruction *TI = BB.getTerminator();
3124 if (auto *Cond = getConditionInst(TI))
3125 // TODO: Handle more than just ICmps here. We should be able to move
3126 // most instructions without side effects or memory access.
3127 if (isa<ICmpInst>(Cond) && Cond->hasOneUse()) {
3128 MadeChange = true;
3129 Cond->moveBefore(TI);
3130 }
3131 }
3132
3133 // Nasty workaround - The base computation code in the main algorithm doesn't
3134 // consider the fact that a GEP can be used to convert a scalar to a vector.
3135 // The right fix for this is to integrate GEPs into the base rewriting
3136 // algorithm properly, this is just a short term workaround to prevent
3137 // crashes by canonicalizing such GEPs into fully vector GEPs.
3138 for (Instruction &I : instructions(F)) {
3139 if (!isa<GetElementPtrInst>(I))
3140 continue;
3141
3142 unsigned VF = 0;
3143 for (unsigned i = 0; i < I.getNumOperands(); i++)
3144 if (auto *OpndVTy = dyn_cast<VectorType>(I.getOperand(i)->getType())) {
3145 assert(VF == 0 ||
3146 VF == cast<FixedVectorType>(OpndVTy)->getNumElements());
3147 VF = cast<FixedVectorType>(OpndVTy)->getNumElements();
3148 }
3149
3150 // It's the vector to scalar traversal through the pointer operand which
3151 // confuses base pointer rewriting, so limit ourselves to that case.
3152 if (!I.getOperand(0)->getType()->isVectorTy() && VF != 0) {
3153 IRBuilder<> B(&I);
3154 auto *Splat = B.CreateVectorSplat(VF, I.getOperand(0));
3155 I.setOperand(0, Splat);
3156 MadeChange = true;
3157 }
3158 }
3159
3160 // Cache the 'defining value' relation used in the computation and
3161 // insertion of base phis and selects. This ensures that we don't insert
3162 // large numbers of duplicate base_phis. Use one cache for both
3163 // inlineGetBaseAndOffset() and insertParsePoints().
3164 DefiningValueMapTy DVCache;
3165
3166 // Mapping between a base values and a flag indicating whether it's a known
3167 // base or not.
3168 IsKnownBaseMapTy KnownBases;
3169
3170 if (!Intrinsics.empty())
3171 // Inline @gc.get.pointer.base() and @gc.get.pointer.offset() before finding
3172 // live references.
3173 MadeChange |= inlineGetBaseAndOffset(F, Intrinsics, DVCache, KnownBases);
3174
3175 if (!ParsePointNeeded.empty())
3176 MadeChange |=
3177 insertParsePoints(F, DT, TTI, ParsePointNeeded, DVCache, KnownBases);
3178
3179 return MadeChange;
3180}
3181
3182// liveness computation via standard dataflow
3183// -------------------------------------------------------------------
3184
3185// TODO: Consider using bitvectors for liveness, the set of potentially
3186// interesting values should be small and easy to pre-compute.
3187
3188/// Compute the live-in set for the location rbegin starting from
3189/// the live-out set of the basic block
3192 SetVector<Value *> &LiveTmp, GCStrategy *GC) {
3193 for (auto &I : make_range(Begin, End)) {
3194 // KILL/Def - Remove this definition from LiveIn
3195 LiveTmp.remove(&I);
3196
3197 // Don't consider *uses* in PHI nodes, we handle their contribution to
3198 // predecessor blocks when we seed the LiveOut sets
3199 if (isa<PHINode>(I))
3200 continue;
3201
3202 // USE - Add to the LiveIn set for this instruction
3203 for (Value *V : I.operands()) {
3204 assert(!isUnhandledGCPointerType(V->getType(), GC) &&
3205 "support for FCA unimplemented");
3206 if (isHandledGCPointerType(V->getType(), GC) && !isa<Constant>(V)) {
3207 // The choice to exclude all things constant here is slightly subtle.
3208 // There are two independent reasons:
3209 // - We assume that things which are constant (from LLVM's definition)
3210 // do not move at runtime. For example, the address of a global
3211 // variable is fixed, even though it's contents may not be.
3212 // - Second, we can't disallow arbitrary inttoptr constants even
3213 // if the language frontend does. Optimization passes are free to
3214 // locally exploit facts without respect to global reachability. This
3215 // can create sections of code which are dynamically unreachable and
3216 // contain just about anything. (see constants.ll in tests)
3217 LiveTmp.insert(V);
3218 }
3219 }
3220 }
3221}
3222
3224 GCStrategy *GC) {
3225 for (BasicBlock *Succ : successors(BB)) {
3226 for (auto &I : *Succ) {
3227 PHINode *PN = dyn_cast<PHINode>(&I);
3228 if (!PN)
3229 break;
3230
3231 Value *V = PN->getIncomingValueForBlock(BB);
3232 assert(!isUnhandledGCPointerType(V->getType(), GC) &&
3233 "support for FCA unimplemented");
3234 if (isHandledGCPointerType(V->getType(), GC) && !isa<Constant>(V))
3235 LiveTmp.insert(V);
3236 }
3237 }
3238}
3239
3241 SetVector<Value *> KillSet;
3242 for (Instruction &I : *BB)
3243 if (isHandledGCPointerType(I.getType(), GC))
3244 KillSet.insert(&I);
3245 return KillSet;
3246}
3247
3248#ifndef NDEBUG
3249/// Check that the items in 'Live' dominate 'TI'. This is used as a basic
3250/// validation check for the liveness computation.
3252 Instruction *TI, bool TermOkay = false) {
3253 for (Value *V : Live) {
3254 if (auto *I = dyn_cast<Instruction>(V)) {
3255 // The terminator can be a member of the LiveOut set. LLVM's definition
3256 // of instruction dominance states that V does not dominate itself. As
3257 // such, we need to special case this to allow it.
3258 if (TermOkay && TI == I)
3259 continue;
3260 assert(DT.dominates(I, TI) &&
3261 "basic SSA liveness expectation violated by liveness analysis");
3262 }
3263 }
3264}
3265
3266/// Check that all the liveness sets used during the computation of liveness
3267/// obey basic SSA properties. This is useful for finding cases where we miss
3268/// a def.
3269static void checkBasicSSA(DominatorTree &DT, GCPtrLivenessData &Data,
3270 BasicBlock &BB) {
3271 checkBasicSSA(DT, Data.LiveSet[&BB], BB.getTerminator());
3272 checkBasicSSA(DT, Data.LiveOut[&BB], BB.getTerminator(), true);
3273 checkBasicSSA(DT, Data.LiveIn[&BB], BB.getTerminator());
3274}
3275#endif
3276
3278 GCPtrLivenessData &Data, GCStrategy *GC) {
3280
3281 // Seed the liveness for each individual block
3282 for (BasicBlock &BB : F) {
3283 Data.KillSet[&BB] = computeKillSet(&BB, GC);
3284 Data.LiveSet[&BB].clear();
3285 computeLiveInValues(BB.rbegin(), BB.rend(), Data.LiveSet[&BB], GC);
3286
3287#ifndef NDEBUG
3288 for (Value *Kill : Data.KillSet[&BB])
3289 assert(!Data.LiveSet[&BB].count(Kill) && "live set contains kill");
3290#endif
3291
3292 Data.LiveOut[&BB] = SetVector<Value *>();
3293 computeLiveOutSeed(&BB, Data.LiveOut[&BB], GC);
3294 Data.LiveIn[&BB] = Data.LiveSet[&BB];
3295 Data.LiveIn[&BB].set_union(Data.LiveOut[&BB]);
3296 Data.LiveIn[&BB].set_subtract(Data.KillSet[&BB]);
3297 if (!Data.LiveIn[&BB].empty())
3298 Worklist.insert(pred_begin(&BB), pred_end(&BB));
3299 }
3300
3301 // Propagate that liveness until stable
3302 while (!Worklist.empty()) {
3303 BasicBlock *BB = Worklist.pop_back_val();
3304
3305 // Compute our new liveout set, then exit early if it hasn't changed despite
3306 // the contribution of our successor.
3307 SetVector<Value *> LiveOut = Data.LiveOut[BB];
3308 const auto OldLiveOutSize = LiveOut.size();
3309 for (BasicBlock *Succ : successors(BB)) {
3310 assert(Data.LiveIn.count(Succ));
3311 LiveOut.set_union(Data.LiveIn[Succ]);
3312 }
3313 // assert OutLiveOut is a subset of LiveOut
3314 if (OldLiveOutSize == LiveOut.size()) {
3315 // If the sets are the same size, then we didn't actually add anything
3316 // when unioning our successors LiveIn. Thus, the LiveIn of this block
3317 // hasn't changed.
3318 continue;
3319 }
3320 Data.LiveOut[BB] = LiveOut;
3321
3322 // Apply the effects of this basic block
3323 SetVector<Value *> LiveTmp = LiveOut;
3324 LiveTmp.set_union(Data.LiveSet[BB]);
3325 LiveTmp.set_subtract(Data.KillSet[BB]);
3326
3327 assert(Data.LiveIn.count(BB));
3328 const SetVector<Value *> &OldLiveIn = Data.LiveIn[BB];
3329 // assert: OldLiveIn is a subset of LiveTmp
3330 if (OldLiveIn.size() != LiveTmp.size()) {
3331 Data.LiveIn[BB] = LiveTmp;
3332 Worklist.insert(pred_begin(BB), pred_end(BB));
3333 }
3334 } // while (!Worklist.empty())
3335
3336#ifndef NDEBUG
3337 // Verify our output against SSA properties. This helps catch any
3338 // missing kills during the above iteration.
3339 for (BasicBlock &BB : F)
3340 checkBasicSSA(DT, Data, BB);
3341#endif
3342}
3343
3344static void findLiveSetAtInst(Instruction *Inst, GCPtrLivenessData &Data,
3345 StatepointLiveSetTy &Out, GCStrategy *GC) {
3346 BasicBlock *BB = Inst->getParent();
3347
3348 // Note: The copy is intentional and required
3349 assert(Data.LiveOut.count(BB));
3350 SetVector<Value *> LiveOut = Data.LiveOut[BB];
3351
3352 // We want to handle the statepoint itself oddly. It's
3353 // call result is not live (normal), nor are it's arguments
3354 // (unless they're used again later). This adjustment is
3355 // specifically what we need to relocate
3356 computeLiveInValues(BB->rbegin(), ++Inst->getIterator().getReverse(), LiveOut,
3357 GC);
3358 LiveOut.remove(Inst);
3359 Out.insert(LiveOut.begin(), LiveOut.end());
3360}
3361
3362static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData,
3363 CallBase *Call,
3364 PartiallyConstructedSafepointRecord &Info,
3365 PointerToBaseTy &PointerToBase,
3366 GCStrategy *GC) {
3367 StatepointLiveSetTy Updated;
3368 findLiveSetAtInst(Call, RevisedLivenessData, Updated, GC);
3369
3370 // We may have base pointers which are now live that weren't before. We need
3371 // to update the PointerToBase structure to reflect this.
3372 for (auto *V : Updated)
3373 PointerToBase.insert({ V, V });
3374
3375 Info.LiveSet = Updated;
3376}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
ReachingDefAnalysis InstSet & ToRemove
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:529
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Checks Use for liveness in LiveValues If Use is not live
Looks at all the uses of the given value Returns the Liveness deduced from the uses of this value Adds all uses that cause the result to be MaybeLive to MaybeLiveRetUses If the result is Live
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
std::string Name
std::optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1290
bool End
Definition: ELF_riscv.cpp:480
Rewrite Partial Register Uses
Hexagon Common GEP
lazy value info
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
This file contains the declarations for metadata subclasses.
Module.h This file contains the declarations for the Module class.
LLVMContext & Context
#define P(N)
FunctionAnalysisManager FAM
const SmallVectorImpl< MachineOperand > & Cond
static void makeStatepointExplicitImpl(CallBase *Call, const SmallVectorImpl< Value * > &BasePtrs, const SmallVectorImpl< Value * > &LiveVariables, PartiallyConstructedSafepointRecord &Result, std::vector< DeferredReplacement > &Replacements, const PointerToBaseTy &PointerToBase, GCStrategy *GC)
static void rematerializeLiveValues(CallBase *Call, PartiallyConstructedSafepointRecord &Info, PointerToBaseTy &PointerToBase, RematCandTy &RematerizationCandidates, TargetTransformInfo &TTI)
static void findRematerializationCandidates(PointerToBaseTy PointerToBase, RematCandTy &RematerizationCandidates, TargetTransformInfo &TTI)
static std::unique_ptr< GCStrategy > findGCStrategy(Function &F)
Looks up the GC strategy for a given function, returning null if the function doesn't have a GC tag.
static Instruction * rematerializeChain(ArrayRef< Instruction * > ChainToBase, Instruction *InsertBefore, Value *RootOfChain, Value *AlternateLiveBase)
static void unique_unsorted(SmallVectorImpl< T > &Vec)
Implement a unique function which doesn't require we sort the input vector.
static void stripNonValidDataFromBody(Function &F)
static bool isKnownBase(Value *V, const IsKnownBaseMapTy &KnownBases)
Returns true if V is a known base.
static Value * findBasePointer(Value *I, DefiningValueMapTy &Cache, IsKnownBaseMapTy &KnownBases)
For a given value or instruction, figure out what base ptr its derived from.
static cl::opt< bool, true > ClobberNonLiveOverride("rs4gc-clobber-non-live", cl::location(ClobberNonLive), cl::Hidden)
static void insertRelocationStores(iterator_range< Value::user_iterator > GCRelocs, DenseMap< Value *, AllocaInst * > &AllocaMap, DenseSet< Value * > &VisitedLiveValues)
static BasicBlock * normalizeForInvokeSafepoint(BasicBlock *BB, BasicBlock *InvokeParent, DominatorTree &DT)
static void analyzeParsePointLiveness(DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData, CallBase *Call, PartiallyConstructedSafepointRecord &Result, GCStrategy *GC)
static void computeLiveOutSeed(BasicBlock *BB, SetVector< Value * > &LiveTmp, GCStrategy *GC)
static void relocationViaAlloca(Function &F, DominatorTree &DT, ArrayRef< Value * > Live, ArrayRef< PartiallyConstructedSafepointRecord > Records)
Do all the relocation update via allocas and mem2reg.
static bool AreEquivalentPhiNodes(PHINode &OrigRootPhi, PHINode &AlternateRootPhi)
static cl::opt< unsigned > RematerializationThreshold("spp-rematerialization-threshold", cl::Hidden, cl::init(6))
static Value * findBaseOrBDV(Value *I, DefiningValueMapTy &Cache, IsKnownBaseMapTy &KnownBases)
Return a base pointer for this value if known.
static Value * findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache, IsKnownBaseMapTy &KnownBases)
Returns the base defining value for this value.
static void insertUseHolderAfter(CallBase *Call, const ArrayRef< Value * > Values, SmallVectorImpl< CallInst * > &Holders)
Insert holders so that each Value is obviously live through the entire lifetime of the call.
static AttributeList legalizeCallAttributes(CallBase *Call, bool IsMemIntrinsic, AttributeList StatepointAL)
static void insertRematerializationStores(const RematerializedValueMapTy &RematerializedValues, DenseMap< Value *, AllocaInst * > &AllocaMap, DenseSet< Value * > &VisitedLiveValues)
static bool insertParsePoints(Function &F, DominatorTree &DT, TargetTransformInfo &TTI, SmallVectorImpl< CallBase * > &ToUpdate, DefiningValueMapTy &DVCache, IsKnownBaseMapTy &KnownBases)
static void findBasePointers(const StatepointLiveSetTy &live, PointerToBaseTy &PointerToBase, DominatorTree *DT, DefiningValueMapTy &DVCache, IsKnownBaseMapTy &KnownBases)
static bool shouldRewriteStatepointsIn(Function &F)
Returns true if this function should be rewritten by this pass.
static cl::opt< bool > RematDerivedAtUses("rs4gc-remat-derived-at-uses", cl::Hidden, cl::init(true))
static ArrayRef< Use > GetDeoptBundleOperands(const CallBase *Call)
static void stripNonValidAttributesFromPrototype(Function &F)
static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data, StatepointLiveSetTy &out, GCStrategy *GC)
Given results from the dataflow liveness computation, find the set of live Values at a particular ins...
static void computeLiveInValues(DominatorTree &DT, Function &F, GCPtrLivenessData &Data, GCStrategy *GC)
Compute the live-in set for every basic block in the function.
static void stripInvalidMetadataFromInstruction(Instruction &I)
Certain metadata on instructions are invalid after running RS4GC.
static constexpr Attribute::AttrKind FnAttrsToStrip[]
static bool areBothVectorOrScalar(Value *First, Value *Second)
static void rematerializeLiveValuesAtUses(RematCandTy &RematerizationCandidates, MutableArrayRef< PartiallyConstructedSafepointRecord > Records, PointerToBaseTy &PointerToBase)
static bool isHandledGCPointerType(Type *T, GCStrategy *GC)
static Value * findRematerializableChainToBasePointer(SmallVectorImpl< Instruction * > &ChainToBase, Value *CurrentValue)
static cl::opt< bool > PrintLiveSetSize("spp-print-liveset-size", cl::Hidden, cl::init(false))
static Value * findBaseDefiningValueOfVector(Value *I, DefiningValueMapTy &Cache, IsKnownBaseMapTy &KnownBases)
Return a base defining value for the 'Index' element of the given vector instruction 'I'.
static void stripNonValidData(Module &M)
The IR fed into RewriteStatepointsForGC may have had attributes and metadata implying dereferenceabil...
static InstructionCost chainToBasePointerCost(SmallVectorImpl< Instruction * > &Chain, TargetTransformInfo &TTI)
static bool isUnhandledGCPointerType(Type *Ty, GCStrategy *GC)
static SetVector< Value * > computeKillSet(BasicBlock *BB, GCStrategy *GC)
static bool ClobberNonLive
static cl::opt< bool > PrintBasePointers("spp-print-base-pointers", cl::Hidden, cl::init(false))
static bool isOriginalBaseResult(Value *V)
This value is a base pointer that is not generated by RS4GC, i.e.
static cl::opt< bool > PrintLiveSet("spp-print-liveset", cl::Hidden, cl::init(false))
static void setKnownBase(Value *V, bool IsKnownBase, IsKnownBaseMapTy &KnownBases)
Caches the IsKnownBase flag for a value and asserts that it wasn't present in the cache before.
static cl::opt< bool > AllowStatepointWithNoDeoptInfo("rs4gc-allow-statepoint-with-no-deopt-info", cl::Hidden, cl::init(true))
static void makeStatepointExplicit(DominatorTree &DT, CallBase *Call, PartiallyConstructedSafepointRecord &Result, std::vector< DeferredReplacement > &Replacements, const PointerToBaseTy &PointerToBase, GCStrategy *GC)
static std::string suffixed_name_or(Value *V, StringRef Suffix, StringRef DefaultName)
static void CreateGCRelocates(ArrayRef< Value * > LiveVariables, ArrayRef< Value * > BasePtrs, Instruction *StatepointToken, IRBuilder<> &Builder, GCStrategy *GC)
Helper function to place all gc relocates necessary for the given statepoint.
static void checkBasicSSA(DominatorTree &DT, SetVector< Value * > &Live, Instruction *TI, bool TermOkay=false)
Check that the items in 'Live' dominate 'TI'.
static StringRef getDeoptLowering(CallBase *Call)
static void findLiveReferences(Function &F, DominatorTree &DT, ArrayRef< CallBase * > toUpdate, MutableArrayRef< struct PartiallyConstructedSafepointRecord > records, GCStrategy *GC)
static AttributeMask getParamAndReturnAttributesToRemove()
static bool inlineGetBaseAndOffset(Function &F, SmallVectorImpl< CallInst * > &Intrinsics, DefiningValueMapTy &DVCache, IsKnownBaseMapTy &KnownBases)
static Value * findBaseDefiningValue(Value *I, DefiningValueMapTy &Cache, IsKnownBaseMapTy &KnownBases)
Helper function for findBasePointer - Will return a value which either a) defines the base pointer fo...
static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, CallBase *Call, PartiallyConstructedSafepointRecord &result, PointerToBaseTy &PointerToBase, GCStrategy *GC)
Given an updated version of the dataflow liveness results, update the liveset and base pointer maps f...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR static false bool isGCPointerType(Type *T)
static bool containsGCPtrType(Type *Ty)
raw_pwrite_stream & OS
Provides some synthesis utilities to produce sequences of values.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallSet class.
This file defines the SmallVector class.
This pass exposes codegen information to IR-level passes.
an instruction to allocate memory on the stack
Definition: Instructions.h:59
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:125
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:348
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:500
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
reverse_iterator rend() const
Definition: ArrayRef.h:157
iterator end() const
Definition: ArrayRef.h:154
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
iterator begin() const
Definition: ArrayRef.h:153
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
reverse_iterator rbegin() const
Definition: ArrayRef.h:156
Value handle that asserts if the Value is deleted.
Definition: ValueHandle.h:264
AttrBuilder & removeAttribute(Attribute::AttrKind Val)
Remove an attribute from the builder.
AttributeSet getFnAttrs() const
The function attributes are returned.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
bool isEmpty() const
Return true if there are no attributes.
Definition: Attributes.h:972
AttributeList addFnAttributes(LLVMContext &C, const AttrBuilder &B) const
Add function attribute to the list.
Definition: Attributes.h:560
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
Attribute getFnAttr(Attribute::AttrKind Kind) const
Return the attribute object that exists for the function.
Definition: Attributes.h:842
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
AttributeList addParamAttributes(LLVMContext &C, unsigned ArgNo, const AttrBuilder &B) const
Add an argument attribute to the list.
Definition: Attributes.h:610
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:349
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:85
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:429
const LandingPadInst * getLandingPadInst() const
Return the landingpad instruction associated with the landing pad.
Definition: BasicBlock.cpp:663
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:396
reverse_iterator rbegin()
Definition: BasicBlock.h:445
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:347
InstListType::reverse_iterator reverse_iterator
Definition: BasicBlock.h:166
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:447
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:164
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:220
This class represents a no-op cast from one type to another.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1455
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1765
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
Definition: InstrTypes.h:1784
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1780
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr, BasicBlock::iterator InsertBefore)
void setTailCallKind(TailCallKind TCK)
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:579
static ConstantAggregateZero * get(Type *Ty)
Definition: Constants.cpp:1663
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1775
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:151
iterator end()
Definition: DenseMap.h:84
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition: DenseMap.h:145
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
DominatorTree & getDomTree()
Flush DomTree updates and return DomTree.
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:692
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:168
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
Represents calls to the gc.relocate intrinsic.
Value * getDerivedPtr() const
Represents a gc.statepoint intrinsic call.
Definition: Statepoint.h:61
GCStrategy describes a garbage collector algorithm's code generation requirements,...
Definition: GCStrategy.h:63
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:973
CallInst * CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, ArrayRef< Value * > CallArgs, std::optional< ArrayRef< Value * > > DeoptArgs, ArrayRef< Value * > GCArgs, const Twine &Name="")
Create a call to the experimental.gc.statepoint intrinsic to start a new statepoint sequence.
Definition: IRBuilder.cpp:783
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
Definition: IRBuilder.h:220
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:480
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1338
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2100
CallInst * CreateGCResult(Instruction *Statepoint, Type *ResultType, const Twine &Name="")
Create a call to the experimental.gc.result intrinsic to extract the result from a call wrapped in a ...
Definition: IRBuilder.cpp:872
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:180
InvokeInst * CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef< Value * > InvokeArgs, std::optional< ArrayRef< Value * > > DeoptArgs, ArrayRef< Value * > GCArgs, const Twine &Name="")
Create an invoke to the experimental.gc.statepoint intrinsic to start a new statepoint sequence.
Definition: IRBuilder.cpp:839
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2395
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2649
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:658
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:80
const BasicBlock * getParent() const
Definition: Instruction.h:151
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1633
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:184
MDNode * createMutableTBAAAccessTag(MDNode *Tag)
Return mutable version of the given mutable or immutable TBAA access tag.
Definition: MDBuilder.cpp:303
Metadata node.
Definition: Metadata.h:1067
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1541
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
size_type count(const KeyT &Key) const
Definition: MapVector.h:165
iterator end()
Definition: MapVector.h:71
iterator find(const KeyT &Key)
Definition: MapVector.h:167
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: MapVector.h:141
size_type size() const
Definition: MapVector.h:60
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition: ArrayRef.h:307
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1827
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:109
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:115
void preserve()
Mark an analysis as preserved.
Definition: Analysis.h:129
This class represents the LLVM 'select' instruction.
A vector that has set insertion semantics.
Definition: SetVector.h:57
bool remove(const value_type &X)
Remove an item from the set vector.
Definition: SetVector.h:188
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:98
bool set_union(const STy &S)
Compute This := This u S, return whether 'This' changed.
Definition: SetVector.h:303
iterator end()
Get an iterator to the end of the SetVector.
Definition: SetVector.h:113
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:93
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:103
void set_subtract(const STy &S)
Compute This := This - B TODO: We should be able to use set_subtract from SetOperations....
Definition: SetVector.h:318
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
value_type pop_back_val()
Definition: SetVector.h:285
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void reserve(size_type N)
Definition: SmallVector.h:676
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:222
bool equals(StringRef RHS) const
equals - Check for string equality, this is more efficient than compare() when the relative ordering ...
Definition: StringRef.h:164
Class to represent struct types.
Definition: DerivedTypes.h:216
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
static CastContextHint getCastContextHint(const Instruction *I)
Calculates a CastContextHint from I.
InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *SE=nullptr, const SCEV *Ptr=nullptr) const
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
@ TCK_SizeAndLatency
The weighted sum of size and latency.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static Type * getVoidTy(LLVMContext &C)
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition: User.cpp:21
iterator_range< value_op_iterator > operand_values()
Definition: User.h:266
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
user_iterator user_begin()
Definition: Value.h:397
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< user_iterator > users()
Definition: Value.h:421
User * getUniqueUndroppableUser()
Return true if there is exactly one unique user of this value that cannot be dropped (that user can h...
Definition: Value.cpp:179
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
unsigned getNumUses() const
This method computes the number of uses of this Value.
Definition: Value.cpp:255
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
bool user_empty() const
Definition: Value.h:385
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition: DenseSet.h:97
self_iterator getIterator()
Definition: ilist_node.h:109
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:316
A range adaptor for a pair of iterators.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Definition: CallingConv.h:47
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1451
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:470
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
@ Offset
Definition: DWP.cpp:456
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1751
void PromoteMemToReg(ArrayRef< AllocaInst * > Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
auto successors(const MachineBasicBlock *BB)
AddressSpace
Definition: NVPTXBaseInfo.h:21
bool operator!=(uint64_t V1, const APInt &V2)
Definition: APInt.h:2043
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2082
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:112
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1738
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1656
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:109
StatepointDirectives parseStatepointDirectivesFromAttrs(AttributeList AS)
Parse out statepoint directives from the function attributes present in AS.
Definition: Statepoint.cpp:24
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
BasicBlock * SplitBlockPredecessors(BasicBlock *BB, ArrayRef< BasicBlock * > Preds, const char *Suffix, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false)
This method introduces at least one new basic block into the function and moves some of the predecess...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool FoldSingleEntryPHINodes(BasicBlock *BB, MemoryDependenceResults *MemDep=nullptr)
We know that BB has one predecessor.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:293
@ DeoptLiveIn
Mark the deopt arguments associated with the statepoint as only being "live-in".
@ GCTransition
Indicates that this statepoint is a transition from GC-aware code to code that is not GC-aware.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition: STLExtras.h:1930
std::unique_ptr< GCStrategy > getGCStrategy(const StringRef Name)
Lookup the GCStrategy object associated with the given gc name.
Definition: GCStrategy.cpp:24
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2060
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1888
InstructionCost Cost
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition: Sequence.h:305
bool isStatepointDirectiveAttr(Attribute Attr)
Return true if the Attr is an attribute that is a statepoint directive.
Definition: Statepoint.cpp:18
bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
Definition: Local.cpp:3180
bool callsGCLeafFunction(const CallBase *Call, const TargetLibraryInfo &TLI)
Return true if this call calls a gc leaf function.
Definition: Local.cpp:3467
bool runOnFunction(Function &F, DominatorTree &, TargetTransformInfo &, const TargetLibraryInfo &)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Call sites that get wrapped by a gc.statepoint (currently only in RewriteStatepointsForGC and potenti...
Definition: Statepoint.h:235
std::optional< uint32_t > NumPatchBytes
Definition: Statepoint.h:236
std::optional< uint64_t > StatepointID
Definition: Statepoint.h:237
static const uint64_t DefaultStatepointID
Definition: Statepoint.h:239