LLVM  14.0.0git
FunctionExtras.h
Go to the documentation of this file.
1 //===- FunctionExtras.h - Function type erasure utilities -------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file provides a collection of function (or more generally, callable)
10 /// type erasure utilities supplementing those provided by the standard library
11 /// in `<function>`.
12 ///
13 /// It provides `unique_function`, which works like `std::function` but supports
14 /// move-only callable objects and const-qualification.
15 ///
16 /// Future plans:
17 /// - Add a `function` that provides ref-qualified support, which doesn't work
18 /// with `std::function`.
19 /// - Provide support for specifying multiple signatures to type erase callable
20 /// objects with an overload set, such as those produced by generic lambdas.
21 /// - Expand to include a copyable utility that directly replaces std::function
22 /// but brings the above improvements.
23 ///
24 /// Note that LLVM's utilities are greatly simplified by not supporting
25 /// allocators.
26 ///
27 /// If the standard library ever begins to provide comparable facilities we can
28 /// consider switching to those.
29 ///
30 //===----------------------------------------------------------------------===//
31 
32 #ifndef LLVM_ADT_FUNCTIONEXTRAS_H
33 #define LLVM_ADT_FUNCTIONEXTRAS_H
34 
36 #include "llvm/ADT/PointerUnion.h"
38 #include "llvm/Support/MemAlloc.h"
40 #include <memory>
41 #include <type_traits>
42 
43 namespace llvm {
44 
45 /// unique_function is a type-erasing functor similar to std::function.
46 ///
47 /// It can hold move-only function objects, like lambdas capturing unique_ptrs.
48 /// Accordingly, it is movable but not copyable.
49 ///
50 /// It supports const-qualification:
51 /// - unique_function<int() const> has a const operator().
52 /// It can only hold functions which themselves have a const operator().
53 /// - unique_function<int()> has a non-const operator().
54 /// It can hold functions with a non-const operator(), like mutable lambdas.
55 template <typename FunctionT> class unique_function;
56 
57 namespace detail {
58 
59 template <typename T>
60 using EnableIfTrivial =
61  std::enable_if_t<llvm::is_trivially_move_constructible<T>::value &&
62  std::is_trivially_destructible<T>::value>;
63 template <typename CallableT, typename ThisT>
65  std::enable_if_t<!std::is_same<remove_cvref_t<CallableT>, ThisT>::value>;
66 template <typename CallableT, typename Ret, typename... Params>
67 using EnableIfCallable = std::enable_if_t<llvm::disjunction<
68  std::is_void<Ret>,
69  std::is_same<decltype(std::declval<CallableT>()(std::declval<Params>()...)),
70  Ret>,
71  std::is_same<const decltype(std::declval<CallableT>()(
72  std::declval<Params>()...)),
73  Ret>,
74  std::is_convertible<decltype(std::declval<CallableT>()(
75  std::declval<Params>()...)),
76  Ret>>::value>;
77 
78 template <typename ReturnT, typename... ParamTs> class UniqueFunctionBase {
79 protected:
80  static constexpr size_t InlineStorageSize = sizeof(void *) * 3;
81 
82  template <typename T, class = void>
83  struct IsSizeLessThanThresholdT : std::false_type {};
84 
85  template <typename T>
86  struct IsSizeLessThanThresholdT<
87  T, std::enable_if_t<sizeof(T) <= 2 * sizeof(void *)>> : std::true_type {};
88 
89  // Provide a type function to map parameters that won't observe extra copies
90  // or moves and which are small enough to likely pass in register to values
91  // and all other types to l-value reference types. We use this to compute the
92  // types used in our erased call utility to minimize copies and moves unless
93  // doing so would force things unnecessarily into memory.
94  //
95  // The heuristic used is related to common ABI register passing conventions.
96  // It doesn't have to be exact though, and in one way it is more strict
97  // because we want to still be able to observe either moves *or* copies.
98  template <typename T> struct AdjustedParamTBase {
99  static_assert(!std::is_reference<T>::value,
100  "references should be handled by template specialization");
101  using type = typename std::conditional<
104  IsSizeLessThanThresholdT<T>::value,
105  T, T &>::type;
106  };
107 
108  // This specialization ensures that 'AdjustedParam<V<T>&>' or
109  // 'AdjustedParam<V<T>&&>' does not trigger a compile-time error when 'T' is
110  // an incomplete type and V a templated type.
111  template <typename T> struct AdjustedParamTBase<T &> { using type = T &; };
112  template <typename T> struct AdjustedParamTBase<T &&> { using type = T &; };
113 
114  template <typename T>
115  using AdjustedParamT = typename AdjustedParamTBase<T>::type;
116 
117  // The type of the erased function pointer we use as a callback to dispatch to
118  // the stored callable when it is trivial to move and destroy.
119  using CallPtrT = ReturnT (*)(void *CallableAddr,
120  AdjustedParamT<ParamTs>... Params);
121  using MovePtrT = void (*)(void *LHSCallableAddr, void *RHSCallableAddr);
122  using DestroyPtrT = void (*)(void *CallableAddr);
123 
124  /// A struct to hold a single trivial callback with sufficient alignment for
125  /// our bitpacking.
126  struct alignas(8) TrivialCallback {
127  CallPtrT CallPtr;
128  };
129 
130  /// A struct we use to aggregate three callbacks when we need full set of
131  /// operations.
132  struct alignas(8) NonTrivialCallbacks {
133  CallPtrT CallPtr;
134  MovePtrT MovePtr;
135  DestroyPtrT DestroyPtr;
136  };
137 
138  // Create a pointer union between either a pointer to a static trivial call
139  // pointer in a struct or a pointer to a static struct of the call, move, and
140  // destroy pointers.
141  using CallbackPointerUnionT =
142  PointerUnion<TrivialCallback *, NonTrivialCallbacks *>;
143 
144  // The main storage buffer. This will either have a pointer to out-of-line
145  // storage or an inline buffer storing the callable.
146  union StorageUnionT {
147  // For out-of-line storage we keep a pointer to the underlying storage and
148  // the size. This is enough to deallocate the memory.
149  struct OutOfLineStorageT {
150  void *StoragePtr;
151  size_t Size;
152  size_t Alignment;
153  } OutOfLineStorage;
154  static_assert(
155  sizeof(OutOfLineStorageT) <= InlineStorageSize,
156  "Should always use all of the out-of-line storage for inline storage!");
157 
158  // For in-line storage, we just provide an aligned character buffer. We
159  // provide three pointers worth of storage here.
160  // This is mutable as an inlined `const unique_function<void() const>` may
161  // still modify its own mutable members.
162  mutable
163  typename std::aligned_storage<InlineStorageSize, alignof(void *)>::type
164  InlineStorage;
165  } StorageUnion;
166 
167  // A compressed pointer to either our dispatching callback or our table of
168  // dispatching callbacks and the flag for whether the callable itself is
169  // stored inline or not.
170  PointerIntPair<CallbackPointerUnionT, 1, bool> CallbackAndInlineFlag;
171 
172  bool isInlineStorage() const { return CallbackAndInlineFlag.getInt(); }
173 
174  bool isTrivialCallback() const {
175  return CallbackAndInlineFlag.getPointer().template is<TrivialCallback *>();
176  }
177 
178  CallPtrT getTrivialCallback() const {
179  return CallbackAndInlineFlag.getPointer().template get<TrivialCallback *>()->CallPtr;
180  }
181 
182  NonTrivialCallbacks *getNonTrivialCallbacks() const {
183  return CallbackAndInlineFlag.getPointer()
184  .template get<NonTrivialCallbacks *>();
185  }
186 
187  CallPtrT getCallPtr() const {
188  return isTrivialCallback() ? getTrivialCallback()
189  : getNonTrivialCallbacks()->CallPtr;
190  }
191 
192  // These three functions are only const in the narrow sense. They return
193  // mutable pointers to function state.
194  // This allows unique_function<T const>::operator() to be const, even if the
195  // underlying functor may be internally mutable.
196  //
197  // const callers must ensure they're only used in const-correct ways.
198  void *getCalleePtr() const {
199  return isInlineStorage() ? getInlineStorage() : getOutOfLineStorage();
200  }
201  void *getInlineStorage() const { return &StorageUnion.InlineStorage; }
202  void *getOutOfLineStorage() const {
203  return StorageUnion.OutOfLineStorage.StoragePtr;
204  }
205 
206  size_t getOutOfLineStorageSize() const {
207  return StorageUnion.OutOfLineStorage.Size;
208  }
210  return StorageUnion.OutOfLineStorage.Alignment;
211  }
212 
213  void setOutOfLineStorage(void *Ptr, size_t Size, size_t Alignment) {
214  StorageUnion.OutOfLineStorage = {Ptr, Size, Alignment};
215  }
216 
217  template <typename CalledAsT>
218  static ReturnT CallImpl(void *CallableAddr,
219  AdjustedParamT<ParamTs>... Params) {
220  auto &Func = *reinterpret_cast<CalledAsT *>(CallableAddr);
221  return Func(std::forward<ParamTs>(Params)...);
222  }
223 
224  template <typename CallableT>
225  static void MoveImpl(void *LHSCallableAddr, void *RHSCallableAddr) noexcept {
226  new (LHSCallableAddr)
227  CallableT(std::move(*reinterpret_cast<CallableT *>(RHSCallableAddr)));
228  }
229 
230  template <typename CallableT>
231  static void DestroyImpl(void *CallableAddr) noexcept {
232  reinterpret_cast<CallableT *>(CallableAddr)->~CallableT();
233  }
234 
235  // The pointers to call/move/destroy functions are determined for each
236  // callable type (and called-as type, which determines the overload chosen).
237  // (definitions are out-of-line).
238 
239  // By default, we need an object that contains all the different
240  // type erased behaviors needed. Create a static instance of the struct type
241  // here and each instance will contain a pointer to it.
242  // Wrap in a struct to avoid https://gcc.gnu.org/PR71954
243  template <typename CallableT, typename CalledAs, typename Enable = void>
245  static NonTrivialCallbacks Callbacks;
246  };
247  // See if we can create a trivial callback. We need the callable to be
248  // trivially moved and trivially destroyed so that we don't have to store
249  // type erased callbacks for those operations.
250  template <typename CallableT, typename CalledAs>
251  struct CallbacksHolder<CallableT, CalledAs, EnableIfTrivial<CallableT>> {
252  static TrivialCallback Callbacks;
253  };
254 
255  // A simple tag type so the call-as type to be passed to the constructor.
256  template <typename T> struct CalledAs {};
257 
258  // Essentially the "main" unique_function constructor, but subclasses
259  // provide the qualified type to be used for the call.
260  // (We always store a T, even if the call will use a pointer to const T).
261  template <typename CallableT, typename CalledAsT>
262  UniqueFunctionBase(CallableT Callable, CalledAs<CalledAsT>) {
263  bool IsInlineStorage = true;
264  void *CallableAddr = getInlineStorage();
265  if (sizeof(CallableT) > InlineStorageSize ||
266  alignof(CallableT) > alignof(decltype(StorageUnion.InlineStorage))) {
267  IsInlineStorage = false;
268  // Allocate out-of-line storage. FIXME: Use an explicit alignment
269  // parameter in C++17 mode.
270  auto Size = sizeof(CallableT);
271  auto Alignment = alignof(CallableT);
272  CallableAddr = allocate_buffer(Size, Alignment);
273  setOutOfLineStorage(CallableAddr, Size, Alignment);
274  }
275 
276  // Now move into the storage.
277  new (CallableAddr) CallableT(std::move(Callable));
278  CallbackAndInlineFlag.setPointerAndInt(
280  }
281 
283  if (!CallbackAndInlineFlag.getPointer())
284  return;
285 
286  // Cache this value so we don't re-check it after type-erased operations.
287  bool IsInlineStorage = isInlineStorage();
288 
289  if (!isTrivialCallback())
290  getNonTrivialCallbacks()->DestroyPtr(
291  IsInlineStorage ? getInlineStorage() : getOutOfLineStorage());
292 
293  if (!IsInlineStorage)
296  }
297 
299  // Copy the callback and inline flag.
300  CallbackAndInlineFlag = RHS.CallbackAndInlineFlag;
301 
302  // If the RHS is empty, just copying the above is sufficient.
303  if (!RHS)
304  return;
305 
306  if (!isInlineStorage()) {
307  // The out-of-line case is easiest to move.
308  StorageUnion.OutOfLineStorage = RHS.StorageUnion.OutOfLineStorage;
309  } else if (isTrivialCallback()) {
310  // Move is trivial, just memcpy the bytes across.
311  memcpy(getInlineStorage(), RHS.getInlineStorage(), InlineStorageSize);
312  } else {
313  // Non-trivial move, so dispatch to a type-erased implementation.
315  RHS.getInlineStorage());
316  }
317 
318  // Clear the old callback and inline flag to get back to as-if-null.
319  RHS.CallbackAndInlineFlag = {};
320 
321 #ifndef NDEBUG
322  // In debug builds, we also scribble across the rest of the storage.
323  memset(RHS.getInlineStorage(), 0xAD, InlineStorageSize);
324 #endif
325  }
326 
328  if (this == &RHS)
329  return *this;
330 
331  // Because we don't try to provide any exception safety guarantees we can
332  // implement move assignment very simply by first destroying the current
333  // object and then move-constructing over top of it.
334  this->~UniqueFunctionBase();
335  new (this) UniqueFunctionBase(std::move(RHS));
336  return *this;
337  }
338 
339  UniqueFunctionBase() = default;
340 
341 public:
342  explicit operator bool() const {
343  return (bool)CallbackAndInlineFlag.getPointer();
344  }
345 };
346 
347 template <typename R, typename... P>
348 template <typename CallableT, typename CalledAsT, typename Enable>
349 typename UniqueFunctionBase<R, P...>::NonTrivialCallbacks UniqueFunctionBase<
350  R, P...>::CallbacksHolder<CallableT, CalledAsT, Enable>::Callbacks = {
351  &CallImpl<CalledAsT>, &MoveImpl<CallableT>, &DestroyImpl<CallableT>};
352 
353 template <typename R, typename... P>
354 template <typename CallableT, typename CalledAsT>
355 typename UniqueFunctionBase<R, P...>::TrivialCallback
356  UniqueFunctionBase<R, P...>::CallbacksHolder<
357  CallableT, CalledAsT, EnableIfTrivial<CallableT>>::Callbacks{
358  &CallImpl<CalledAsT>};
359 
360 } // namespace detail
361 
362 template <typename R, typename... P>
363 class unique_function<R(P...)> : public detail::UniqueFunctionBase<R, P...> {
364  using Base = detail::UniqueFunctionBase<R, P...>;
365 
366 public:
367  unique_function() = default;
368  unique_function(std::nullptr_t) {}
369  unique_function(unique_function &&) = default;
370  unique_function(const unique_function &) = delete;
371  unique_function &operator=(unique_function &&) = default;
372  unique_function &operator=(const unique_function &) = delete;
373 
374  template <typename CallableT>
376  CallableT Callable,
379  : Base(std::forward<CallableT>(Callable),
380  typename Base::template CalledAs<CallableT>{}) {}
381 
382  R operator()(P... Params) {
383  return this->getCallPtr()(this->getCalleePtr(), Params...);
384  }
385 };
386 
387 template <typename R, typename... P>
389  : public detail::UniqueFunctionBase<R, P...> {
390  using Base = detail::UniqueFunctionBase<R, P...>;
391 
392 public:
393  unique_function() = default;
394  unique_function(std::nullptr_t) {}
395  unique_function(unique_function &&) = default;
396  unique_function(const unique_function &) = delete;
397  unique_function &operator=(unique_function &&) = default;
398  unique_function &operator=(const unique_function &) = delete;
399 
400  template <typename CallableT>
402  CallableT Callable,
405  : Base(std::forward<CallableT>(Callable),
406  typename Base::template CalledAs<const CallableT>{}) {}
407 
408  R operator()(P... Params) const {
409  return this->getCallPtr()(this->getCalleePtr(), Params...);
410  }
411 };
412 
413 } // end namespace llvm
414 
415 #endif // LLVM_ADT_FUNCTIONEXTRAS_H
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
llvm::detail::UniqueFunctionBase::getOutOfLineStorageSize
size_t getOutOfLineStorageSize() const
Definition: FunctionExtras.h:206
llvm::detail::UniqueFunctionBase::MoveImpl
static void MoveImpl(void *LHSCallableAddr, void *RHSCallableAddr) noexcept
Definition: FunctionExtras.h:225
llvm
---------------------— PointerInfo ------------------------------------—
Definition: AllocatorList.h:23
llvm::unique_function
unique_function is a type-erasing functor similar to std::function.
Definition: FunctionExtras.h:55
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
llvm::detail::UniqueFunctionBase::getCalleePtr
void * getCalleePtr() const
Definition: FunctionExtras.h:198
llvm::detail::UniqueFunctionBase::getNonTrivialCallbacks
NonTrivialCallbacks * getNonTrivialCallbacks() const
Definition: FunctionExtras.h:182
llvm::allocate_buffer
LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * allocate_buffer(size_t Size, size_t Alignment)
Allocate a buffer of memory with the given size and alignment.
Definition: MemAlloc.cpp:14
llvm::detail::UniqueFunctionBase::CalledAs
Definition: FunctionExtras.h:256
llvm::detail::UniqueFunctionBase::getOutOfLineStorage
void * getOutOfLineStorage() const
Definition: FunctionExtras.h:202
llvm::unique_function< R(P...) const >::unique_function
unique_function(CallableT Callable, detail::EnableUnlessSameType< CallableT, unique_function > *=nullptr, detail::EnableIfCallable< const CallableT, R, P... > *=nullptr)
Definition: FunctionExtras.h:401
llvm::deallocate_buffer
void deallocate_buffer(void *Ptr, size_t Size, size_t Alignment)
Deallocate a buffer of memory with the given size and alignment.
Definition: MemAlloc.cpp:23
llvm::detail::UniqueFunctionBase::CallPtr
struct IsSizeLessThanThresholdT< T, std::enable_if_t< sizeof(T)<=2 *sizeof(void *)> > :std::true_type {};template< typename T > struct AdjustedParamTBase { static_assert(!std::is_reference< T >::value, "references should be handled by template specialization");using type=typename std::conditional< llvm::is_trivially_copy_constructible< T >::value &&llvm::is_trivially_move_constructible< T >::value &&IsSizeLessThanThresholdT< T >::value, T, T & >::type;};template< typename T > struct AdjustedParamTBase< T & > { using type=T &;};template< typename T > struct AdjustedParamTBase< T && > { using type=T &;};template< typename T > using AdjustedParamT=typename AdjustedParamTBase< T >::type;using CallPtrT=ReturnT(*)(void *CallableAddr, AdjustedParamT< ParamTs >... Params);using MovePtrT=void(*)(void *LHSCallableAddr, void *RHSCallableAddr);using DestroyPtrT=void(*)(void *CallableAddr);struct alignas(8) TrivialCallback { CallPtrT CallPtr;};struct alignas(8) NonTrivialCallbacks { CallPtrT CallPtr;MovePtrT MovePtr;DestroyPtrT DestroyPtr;};using CallbackPointerUnionT=PointerUnion< TrivialCallback *, NonTrivialCallbacks * >;union StorageUnionT { struct OutOfLineStorageT { void *StoragePtr;size_t Size;size_t Alignment;} OutOfLineStorage;static_assert(sizeof(OutOfLineStorageT)<=InlineStorageSize, "Should always use all of the out-of-line storage for inline storage!");mutable typename std::aligned_storage< InlineStorageSize, alignof(void *)>::type InlineStorage;} StorageUnion;PointerIntPair< CallbackPointerUnionT, 1, bool > CallbackAndInlineFlag;bool isInlineStorage() const { return CallbackAndInlineFlag.getInt();} bool isTrivialCallback() const { return CallbackAndInlineFlag.getPointer().template is< TrivialCallback * >();} CallPtrT getTrivialCallback() const { return CallbackAndInlineFlag.getPointer().template get< TrivialCallback * >() -> CallPtr
Definition: FunctionExtras.h:86
llvm::detail::UniqueFunctionBase::IsSizeLessThanThresholdT
Definition: FunctionExtras.h:83
T
#define T
Definition: Mips16ISelLowering.cpp:341
llvm::MipsISD::Ret
@ Ret
Definition: MipsISelLowering.h:116
llvm::disjunction
Definition: STLForwardCompat.h:40
llvm::RISCVFenceField::R
@ R
Definition: RISCVBaseInfo.h:193
PointerIntPair.h
llvm::detail::UniqueFunctionBase::CallImpl
static ReturnT CallImpl(void *CallableAddr, AdjustedParamT< ParamTs >... Params)
Definition: FunctionExtras.h:218
llvm::detail::UniqueFunctionBase::CallbacksHolder< CallableT, CalledAs, EnableIfTrivial< CallableT > >::Callbacks
static TrivialCallback Callbacks
Definition: FunctionExtras.h:252
llvm::detail::EnableUnlessSameType
std::enable_if_t<!std::is_same< remove_cvref_t< CallableT >, ThisT >::value > EnableUnlessSameType
Definition: FunctionExtras.h:65
llvm::detail::UniqueFunctionBase::setOutOfLineStorage
void setOutOfLineStorage(void *Ptr, size_t Size, size_t Alignment)
Definition: FunctionExtras.h:213
llvm::detail::UniqueFunctionBase::InlineStorageSize
static constexpr size_t InlineStorageSize
Definition: FunctionExtras.h:80
STLForwardCompat.h
llvm::detail::UniqueFunctionBase::getInlineStorage
void * getInlineStorage() const
Definition: FunctionExtras.h:201
llvm::detail::UniqueFunctionBase::UniqueFunctionBase
UniqueFunctionBase()=default
llvm::detail::UniqueFunctionBase::UniqueFunctionBase
UniqueFunctionBase(UniqueFunctionBase &&RHS) noexcept
Definition: FunctionExtras.h:298
MemAlloc.h
llvm::detail::EnableIfCallable
std::enable_if_t< llvm::disjunction< std::is_void< Ret >, std::is_same< decltype(std::declval< CallableT >()(std::declval< Params >()...)), Ret >, std::is_same< const decltype(std::declval< CallableT >()(std::declval< Params >()...)), Ret >, std::is_convertible< decltype(std::declval< CallableT >()(std::declval< Params >()...)), Ret > >::value > EnableIfCallable
Definition: FunctionExtras.h:76
type
AMD64 Optimization Manual has some nice information about optimizing integer multiplication by a constant How much of it applies to Intel s X86 implementation There are definite trade offs to xmm0 cvttss2siq rdx jb L3 subss xmm0 rax cvttss2siq rdx xorq rdx rax ret instead of xmm1 cvttss2siq rcx movaps xmm2 subss xmm2 cvttss2siq rax rdx xorq rax ucomiss xmm0 cmovb rax ret Seems like the jb branch has high likelihood of being taken It would have saved a few instructions It s not possible to reference and DH registers in an instruction requiring REX prefix divb and mulb both produce results in AH If isel emits a CopyFromReg which gets turned into a movb and that can be allocated a r8b r15b To get around isel emits a CopyFromReg from AX and then right shift it down by and truncate it It s not pretty but it works We need some register allocation magic to make the hack go which would often require a callee saved register Callees usually need to keep this value live for most of their body so it doesn t add a significant burden on them We currently implement this in however this is suboptimal because it means that it would be quite awkward to implement the optimization for callers A better implementation would be to relax the LLVM IR rules for sret arguments to allow a function with an sret argument to have a non void return type
Definition: README-X86-64.txt:70
const
aarch64 promote const
Definition: AArch64PromoteConstant.cpp:232
move
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
Definition: README.txt:546
llvm::detail::UniqueFunctionBase::operator=
UniqueFunctionBase & operator=(UniqueFunctionBase &&RHS) noexcept
Definition: FunctionExtras.h:327
llvm::is_trivially_move_constructible
An implementation of std::is_trivially_move_constructible since we have users with STLs that don't ye...
Definition: type_traits.h:109
llvm::unique_function< R(P...) const >::operator()
R operator()(P... Params) const
Definition: FunctionExtras.h:408
llvm::unique_function< R(P...)>::operator()
R operator()(P... Params)
Definition: FunctionExtras.h:382
llvm::detail::UniqueFunctionBase::UniqueFunctionBase
UniqueFunctionBase(CallableT Callable, CalledAs< CalledAsT >)
Definition: FunctionExtras.h:262
memcpy
<%struct.s * > cast struct s *S to sbyte *< sbyte * > sbyte uint cast struct s *agg result to sbyte *< sbyte * > sbyte uint cast struct s *memtmp to sbyte *< sbyte * > sbyte uint ret void llc ends up issuing two memcpy or custom lower memcpy(of small size) to be ldmia/stmia. I think option 2 is better but the current register allocator cannot allocate a chunk of registers at a time. A feasible temporary solution is to use specific physical registers at the lowering time for small(<
PointerUnion.h
llvm::detail::UniqueFunctionBase
Definition: FunctionExtras.h:78
llvm::is_trivially_copy_constructible
An implementation of std::is_trivially_copy_constructible since we have users with STLs that don't ye...
Definition: type_traits.h:98
llvm::detail::UniqueFunctionBase::CallbacksHolder
Definition: FunctionExtras.h:244
llvm::unique_function< R(P...)>::unique_function
unique_function(CallableT Callable, detail::EnableUnlessSameType< CallableT, unique_function > *=nullptr, detail::EnableIfCallable< CallableT, R, P... > *=nullptr)
Definition: FunctionExtras.h:375
std
Definition: BitVector.h:838
type_traits.h
llvm::unique_function< R(P...)>::unique_function
unique_function(std::nullptr_t)
Definition: FunctionExtras.h:368
llvm::detail::UniqueFunctionBase::DestroyImpl
static void DestroyImpl(void *CallableAddr) noexcept
Definition: FunctionExtras.h:231
llvm::detail::UniqueFunctionBase::CallbacksHolder::Callbacks
static NonTrivialCallbacks Callbacks
Definition: FunctionExtras.h:245
llvm::detail::UniqueFunctionBase::getCallPtr
CallPtrT getCallPtr() const
Definition: FunctionExtras.h:187
llvm::detail::UniqueFunctionBase::getOutOfLineStorageAlignment
size_t getOutOfLineStorageAlignment() const
Definition: FunctionExtras.h:209
llvm::detail::EnableIfTrivial
std::enable_if_t< llvm::is_trivially_move_constructible< T >::value &&std::is_trivially_destructible< T >::value > EnableIfTrivial
Definition: FunctionExtras.h:62
llvm::unique_function< R(P...) const >::unique_function
unique_function(std::nullptr_t)
Definition: FunctionExtras.h:394
llvm::detail::UniqueFunctionBase::~UniqueFunctionBase
~UniqueFunctionBase()
Definition: FunctionExtras.h:282