Go to the documentation of this file.
44 #ifndef LLVM_ADT_HASHING_H
45 #define LLVM_ADT_HASHING_H
59 template <
typename T,
typename Enable>
struct DenseMapInfo;
85 operator size_t()
const {
return value; }
88 return lhs.value == rhs.value;
91 return lhs.value != rhs.value;
105 template <
typename T>
106 std::enable_if_t<is_integral_or_enum<T>::value, hash_code>
hash_value(T value);
111 template <
typename T> hash_code
hash_value(
const T *ptr);
114 template <
typename T,
typename U>
115 hash_code
hash_value(
const std::pair<T, U> &arg);
118 template <
typename... Ts>
119 hash_code
hash_value(
const std::tuple<Ts...> &arg);
122 template <
typename T>
123 hash_code
hash_value(
const std::basic_string<T> &arg);
185 const uint64_t kMul = 0x9ddfea08eb382d69ULL;
196 uint8_t
b =
s[len >> 1];
197 uint8_t
c =
s[len - 1];
247 if (length >= 4 && length <= 8)
249 if (length > 8 && length <= 16)
251 if (length > 16 && length <= 32)
332 const uint64_t seed_prime = 0xff51afd7ed558ccdULL;
351 : std::integral_constant<bool, ((is_integral_or_enum<T>::value ||
352 std::is_pointer<T>::value) &&
353 64 % sizeof(T) == 0)> {};
360 : std::integral_constant<bool, (is_hashable_data<T>::value &&
361 is_hashable_data<U>::value &&
362 (sizeof(T) + sizeof(U)) ==
363 sizeof(std::pair<T, U>))> {};
367 template <
typename T>
368 std::enable_if_t<is_hashable_data<T>::value,
T>
375 template <
typename T>
376 std::enable_if_t<!is_hashable_data<T>::value,
size_t>
389 template <
typename T>
392 size_t store_size =
sizeof(value) - offset;
393 if (buffer_ptr + store_size > buffer_end)
395 const char *value_data =
reinterpret_cast<const char *
>(&value);
396 memcpy(buffer_ptr, value_data + offset, store_size);
397 buffer_ptr += store_size;
406 template <
typename InputIteratorT>
409 char buffer[64], *buffer_ptr = buffer;
410 char *
const buffer_end =
std::end(buffer);
415 return hash_short(buffer, buffer_ptr - buffer, seed);
416 assert(buffer_ptr == buffer_end);
420 while (first !=
last) {
435 length += buffer_ptr - buffer;
449 template <
typename ValueT>
450 std::enable_if_t<is_hashable_data<ValueT>::value,
hash_code>
453 const char *s_begin =
reinterpret_cast<const char *
>(first);
454 const char *s_end =
reinterpret_cast<const char *
>(
last);
455 const size_t length = std::distance(s_begin, s_end);
459 const char *s_aligned_end = s_begin + (length & ~63);
462 while (s_begin != s_aligned_end) {
467 state.mix(s_end - 64);
469 return state.finalize(length);
482 template <
typename InputIteratorT>
518 template <
typename T>
519 char *
combine_data(
size_t &length,
char *buffer_ptr,
char *buffer_end, T data) {
525 size_t partial_store_size = buffer_end - buffer_ptr;
526 memcpy(buffer_ptr, &data, partial_store_size);
557 template <
typename T,
typename ...Ts>
559 const T &arg,
const Ts &...args) {
563 return combine(length, buffer_ptr, buffer_end, args...);
585 length += buffer_ptr -
buffer;
624 const char *
s =
reinterpret_cast<const char *
>(&value);
634 template <
typename T>
644 reinterpret_cast<uintptr_t
>(ptr));
649 template <
typename T,
typename U>
658 template <
typename... Ts, std::size_t... Indices>
660 std::index_sequence<Indices...>) {
667 template <
typename... Ts>
671 arg,
typename std::index_sequence_for<Ts...>());
676 template <
typename T>
void set_fixed_execution_hash_seed(uint64_t fixed_value)
Override the execution seed with a fixed value.
This is an optimization pass for GlobalISel generic memory operations.
constexpr bool IsBigEndianHost
void swapByteOrder(T &Value)
friend size_t hash_value(const hash_code &code)
Allow a hash_code to be directly run through hash_value.
char * combine_data(size_t &length, char *buffer_ptr, char *buffer_end, T data)
Combine one chunk of data into the current in-flight hash.
uint64_t hash_17to32_bytes(const char *s, size_t len, uint64_t seed)
const_iterator end(StringRef path)
Get end iterator over path.
uint64_t finalize(size_t length)
Compute the final 64-bit hash code value based on the current state and the length of bytes hashed.
static constexpr uint64_t k2
hash_code hash_value(const APFloat &Arg)
See friend declarations above.
the resulting code requires compare and branches when and if * p
uint64_t shift_mix(uint64_t val)
=0.0 ? 0.0 :(a > 0.0 ? 1.0 :-1.0) a
uint64_t hash_1to3_bytes(const char *s, size_t len, uint64_t seed)
It looks like we only need to define PPCfmarto for these because according to these instructions perform RTO on fma s result
An information struct used to provide DenseMap with the various necessary components for a given valu...
std::enable_if_t< is_hashable_data< T >::value, T > get_hashable_data(const T &value)
Helper to get the hashable data representation for a type.
hash_code combine(size_t length, char *buffer_ptr, char *buffer_end, const T &arg, const Ts &...args)
Recursive, variadic combining method.
uint64_t rotate(uint64_t val, size_t shift)
Bitwise right rotate.
friend bool operator==(const hash_code &lhs, const hash_code &rhs)
the resulting code requires compare and branches when and if the revised code is with conditional branches instead of More there is a byte word extend before each where there should be only and the condition codes are not remembered when the same two values are compared twice More LSR enhancements i8 and i32 load store addressing modes are identical int b
hash_code combine(size_t length, char *buffer_ptr, char *buffer_end)
Base case for recursive, variadic combining.
hash_code hash_value_tuple_helper(const std::tuple< Ts... > &arg, std::index_sequence< Indices... >)
uint64_t hash_4to8_bytes(const char *s, size_t len, uint64_t seed)
uint64_t fetch64(const char *p)
Trait to indicate whether a type's bits can be hashed directly.
uint64_t hash_16_bytes(uint64_t low, uint64_t high)
uint32_t fetch32(const char *p)
the resulting code requires compare and branches when and if the revised code is with conditional branches instead of More there is a byte word extend before each where there should be only and the condition codes are not remembered when the same two values are compared twice More LSR enhancements i8 and i32 load store addressing modes are identical int int c
static constexpr uint64_t k1
static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b)
Mix 32-bytes from the input sequence into the 16-bytes of 'a' and 'b', including whatever is already ...
static hash_code getTombstoneKey()
static constexpr uint64_t k3
friend bool operator!=(const hash_code &lhs, const hash_code &rhs)
hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last)
Implement the combining of integral values into a hash_code.
The initial backend is deliberately restricted to z10 We should add support for later architectures at some point If an asm ties an i32 r result to an i64 the input will be treated as an leaving the upper bits uninitialised For i64 store i32 val
hash_combine_recursive_helper()
Construct a recursive hash combining helper.
multiplies can be turned into SHL s
uint64_t hash_9to16_bytes(const char *s, size_t len, uint64_t seed)
Helper class to manage the recursive combining of hash_combine arguments.
uint64_t get_execution_seed()
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
<%struct.s * > cast struct s *S to sbyte *< sbyte * > sbyte uint cast struct s *agg result to sbyte *< sbyte * > sbyte uint cast struct s *memtmp to sbyte *< sbyte * > sbyte uint ret void llc ends up issuing two memcpy or custom lower memcpy(of small size) to be ldmia/stmia. I think option 2 is better but the current register allocator cannot allocate a chunk of registers at a time. A feasible temporary solution is to use specific physical registers at the lowering time for small(<
void mix(const char *s)
Mix in a 64-byte buffer of data.
hash_code hash_integer_value(uint64_t value)
Helper to hash the value of a single integer.
uint64_t fixed_seed_override
A global, fixed seed-override variable.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
The intermediate state used during hashing.
hash_code hash_value(const std::basic_string< T > &arg)
Compute a hash_code for a standard string.
uint64_t hash_short(const char *s, size_t length, uint64_t seed)
static bool isEqual(hash_code LHS, hash_code RHS)
std::enable_if_t< is_hashable_data< ValueT >::value, hash_code > hash_combine_range_impl(ValueT *first, ValueT *last)
Implement the combining of integral values into a hash_code.
*Add support for compiling functions in both ARM and Thumb then taking the smallest *Add support for compiling individual basic blocks in thumb when in a larger ARM function This can be used for presumed cold code
static hash_state create(const char *s, uint64_t seed)
Create a new hash_state structure and initialize it based on the seed and the first 64-byte chunk.
into llvm powi allowing the code generator to produce balanced multiplication trees the intrinsic needs to be extended to support and second the code generator needs to be enhanced to lower these to multiplication trees Interesting testcase for add shift mul int y
hash_code()=default
Default construct a hash_code.
uint64_t hash_33to64_bytes(const char *s, size_t len, uint64_t seed)
static unsigned getHashValue(hash_code val)
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T &value, size_t offset=0)
Helper to store data from a value into a buffer and advance the pointer into that buffer.
static hash_code getEmptyKey()
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
http eax xorl edx cl sete al setne dl sall eax sall edx But that requires good bit subreg support this might be better It s an extra shift
hash_code(size_t value)
Form a hash code directly from a numerical value.
the resulting code requires compare and branches when and if the revised code is with conditional branches instead of More there is a byte word extend before each where there should be only and the condition codes are not remembered when the same two values are compared twice More LSR enhancements i8 and i32 load store addressing modes are identical int int int d
static constexpr uint64_t k0
Some primes between 2^63 and 2^64 for various uses.
An opaque object representing a hash code.