68#define DEBUG_TYPE "on-disk-cas"
86 return ID.takeError();
89 "corrupt object '" +
toHex(*
ID) +
"'");
99 enum class StorageKind : uint8_t {
116 StandaloneLeaf0 = 12,
119 static StringRef getStandaloneFilePrefix(StorageKind SK) {
123 case TrieRecord::StorageKind::Standalone:
125 case TrieRecord::StorageKind::StandaloneLeaf:
127 case TrieRecord::StorageKind::StandaloneLeaf0:
132 enum Limits : int64_t {
134 MaxEmbeddedSize = 64LL * 1024LL - 1,
138 StorageKind SK = StorageKind::Unknown;
143 static uint64_t pack(Data
D) {
144 assert(
D.Offset.get() < (int64_t)(1ULL << 56));
145 uint64_t
Packed = uint64_t(
D.SK) << 56 |
D.Offset.get();
146 assert(
D.SK != StorageKind::Unknown || Packed == 0);
148 Data RoundTrip = unpack(Packed);
150 assert(
D.Offset.get() == RoundTrip.Offset.get());
156 static Data unpack(uint64_t Packed) {
160 D.SK = (StorageKind)(Packed >> 56);
165 TrieRecord() : Storage(0) {}
167 Data
load()
const {
return unpack(Storage); }
168 bool compare_exchange_strong(Data &Existing, Data New);
171 std::atomic<uint64_t> Storage;
181struct DataRecordHandle {
184 enum class NumRefsFlags : uint8_t {
194 enum class DataSizeFlags {
203 enum class RefKindFlags {
212 DataSizeShift = NumRefsShift + NumRefsBits,
214 RefKindShift = DataSizeShift + DataSizeBits,
217 static_assert(((UINT32_MAX << NumRefsBits) & (uint32_t)NumRefsFlags::Max) ==
220 static_assert(((UINT32_MAX << DataSizeBits) & (uint32_t)DataSizeFlags::Max) ==
223 static_assert(((UINT32_MAX << RefKindBits) & (uint32_t)RefKindFlags::Max) ==
229 NumRefsFlags NumRefs;
230 DataSizeFlags DataSize;
231 RefKindFlags RefKind;
233 static uint64_t pack(LayoutFlags LF) {
234 unsigned Packed = ((unsigned)LF.NumRefs << NumRefsShift) |
235 ((
unsigned)LF.DataSize << DataSizeShift) |
236 ((unsigned)LF.RefKind << RefKindShift);
238 LayoutFlags RoundTrip = unpack(Packed);
239 assert(LF.NumRefs == RoundTrip.NumRefs);
240 assert(LF.DataSize == RoundTrip.DataSize);
241 assert(LF.RefKind == RoundTrip.RefKind);
245 static LayoutFlags unpack(uint64_t Storage) {
246 assert(Storage <= UINT8_MAX &&
"Expect storage to fit in a byte");
249 (NumRefsFlags)((Storage >> NumRefsShift) & ((1U << NumRefsBits) - 1));
250 LF.DataSize = (DataSizeFlags)((Storage >> DataSizeShift) &
251 ((1U << DataSizeBits) - 1));
253 (RefKindFlags)((Storage >> RefKindShift) & ((1U << RefKindBits) - 1));
263 using PackTy = uint32_t;
266 static constexpr unsigned LayoutFlagsShift =
267 (
sizeof(PackTy) - 1) * CHAR_BIT;
271 InternalRefArrayRef Refs;
275 LayoutFlags getLayoutFlags()
const {
276 return LayoutFlags::unpack(H->Packed >> Header::LayoutFlagsShift);
280 void skipDataSize(LayoutFlags LF, int64_t &RelOffset)
const;
281 uint32_t getNumRefs()
const;
282 void skipNumRefs(LayoutFlags LF, int64_t &RelOffset)
const;
283 int64_t getRefsRelOffset()
const;
284 int64_t getDataRelOffset()
const;
286 static uint64_t getTotalSize(uint64_t DataRelOffset, uint64_t
DataSize) {
287 return DataRelOffset +
DataSize + 1;
289 uint64_t getTotalSize()
const {
296 explicit Layout(
const Input &
I);
299 uint64_t DataSize = 0;
300 uint32_t NumRefs = 0;
301 int64_t RefsRelOffset = 0;
302 int64_t DataRelOffset = 0;
303 uint64_t getTotalSize()
const {
304 return DataRecordHandle::getTotalSize(DataRelOffset, DataSize);
308 InternalRefArrayRef getRefs()
const {
309 assert(H &&
"Expected valid handle");
310 auto *BeginByte =
reinterpret_cast<const char *
>(H) + getRefsRelOffset();
311 size_t Size = getNumRefs();
313 return InternalRefArrayRef();
314 if (getLayoutFlags().RefKind == RefKindFlags::InternalRef4B)
315 return ArrayRef(
reinterpret_cast<const InternalRef4B *
>(BeginByte),
Size);
316 return ArrayRef(
reinterpret_cast<const InternalRef *
>(BeginByte),
Size);
319 ArrayRef<char> getData()
const {
320 assert(H &&
"Expected valid handle");
321 return ArrayRef(
reinterpret_cast<const char *
>(H) + getDataRelOffset(),
325 static DataRecordHandle create(function_ref<
char *(
size_t Size)>
Alloc,
327 static Expected<DataRecordHandle>
328 createWithError(function_ref<Expected<char *>(
size_t Size)>
Alloc,
330 static DataRecordHandle construct(
char *Mem,
const Input &
I);
332 static DataRecordHandle
get(
const char *Mem) {
333 return DataRecordHandle(
334 *
reinterpret_cast<const DataRecordHandle::Header *
>(Mem));
336 static Expected<DataRecordHandle>
337 getFromDataPool(
const OnDiskDataAllocator &Pool, FileOffset
Offset);
339 explicit operator bool()
const {
return H; }
340 const Header &getHeader()
const {
return *H; }
342 DataRecordHandle() =
default;
343 explicit DataRecordHandle(
const Header &H) : H(&H) {}
346 static DataRecordHandle constructImpl(
char *Mem,
const Input &
I,
348 const Header *H =
nullptr;
352struct OnDiskContent {
353 std::optional<DataRecordHandle> Record;
354 std::optional<ArrayRef<char>> Bytes;
358class StandaloneDataInMemory {
360 OnDiskContent getContent()
const;
362 StandaloneDataInMemory(std::unique_ptr<sys::fs::mapped_file_region> Region,
363 TrieRecord::StorageKind SK)
364 : Region(std::
move(Region)), SK(SK) {
366 bool IsStandalone =
false;
368 case TrieRecord::StorageKind::Standalone:
369 case TrieRecord::StorageKind::StandaloneLeaf:
370 case TrieRecord::StorageKind::StandaloneLeaf0:
381 std::unique_ptr<sys::fs::mapped_file_region> Region;
382 TrieRecord::StorageKind SK;
386template <
size_t NumShards>
class StandaloneDataMap {
387 static_assert(
isPowerOf2_64(NumShards),
"Expected power of 2");
390 uintptr_t insert(ArrayRef<uint8_t> Hash, TrieRecord::StorageKind SK,
391 std::unique_ptr<sys::fs::mapped_file_region> Region);
393 const StandaloneDataInMemory *
lookup(ArrayRef<uint8_t> Hash)
const;
394 bool count(ArrayRef<uint8_t> Hash)
const {
return bool(
lookup(Hash)); }
399 DenseMap<const uint8_t *, std::unique_ptr<StandaloneDataInMemory>> Map;
400 mutable std::mutex Mutex;
402 Shard &getShard(ArrayRef<uint8_t> Hash) {
403 return const_cast<Shard &
>(
404 const_cast<const StandaloneDataMap *
>(
this)->getShard(Hash));
406 const Shard &getShard(ArrayRef<uint8_t> Hash)
const {
407 static_assert(NumShards <= 256,
"Expected only 8 bits of shard");
408 return Shards[Hash[0] % NumShards];
411 Shard Shards[NumShards];
414using StandaloneDataMapTy = StandaloneDataMap<16>;
417class InternalRefVector {
419 void push_back(InternalRef
Ref) {
421 return FullRefs.push_back(
Ref);
423 return SmallRefs.push_back(*Small);
426 FullRefs.reserve(SmallRefs.size() + 1);
427 for (InternalRef4B Small : SmallRefs)
428 FullRefs.push_back(Small);
429 FullRefs.push_back(
Ref);
433 operator InternalRefArrayRef()
const {
434 assert(SmallRefs.empty() || FullRefs.empty());
435 return NeedsFull ? InternalRefArrayRef(FullRefs)
436 : InternalRefArrayRef(SmallRefs);
440 bool NeedsFull =
false;
450 if (Expected<char *> Mem =
Alloc(
L.getTotalSize()))
451 return constructImpl(*Mem,
I, L);
453 return Mem.takeError();
476uintptr_t StandaloneDataMap<N>::insert(
478 std::unique_ptr<sys::fs::mapped_file_region>
Region) {
479 auto &S = getShard(Hash);
480 std::lock_guard<std::mutex> Lock(S.Mutex);
481 auto &V = S.Map[Hash.
data()];
483 V = std::make_unique<StandaloneDataInMemory>(std::move(
Region), SK);
484 return reinterpret_cast<uintptr_t
>(V.get());
488const StandaloneDataInMemory *
490 auto &S = getShard(Hash);
491 std::lock_guard<std::mutex> Lock(S.Mutex);
492 auto I = S.Map.find(Hash.
data());
493 if (
I == S.Map.end())
507 TempFile(
StringRef Name,
int FD) : TmpName(
std::string(Name)), FD(FD) {}
512 TempFile(TempFile &&
Other) { *
this = std::move(
Other); }
513 TempFile &operator=(TempFile &&
Other) {
514 TmpName = std::move(
Other.TmpName);
528 Error keep(
const Twine &Name);
535class MappedTempFile {
537 char *
data()
const {
return Map.
data(); }
538 size_t size()
const {
return Map.
size(); }
541 assert(Map &&
"Map already destroyed");
543 return Temp.discard();
546 Error keep(
const Twine &Name) {
547 assert(Map &&
"Map already destroyed");
549 return Temp.keep(Name);
552 MappedTempFile(TempFile Temp, sys::fs::mapped_file_region Map)
557 sys::fs::mapped_file_region Map;
571 std::error_code RemoveEC;
605 TempFile Ret(ResultPath,
FD);
606 return std::move(Ret);
609bool TrieRecord::compare_exchange_strong(
Data &Existing,
Data New) {
610 uint64_t ExistingPacked = pack(Existing);
612 if (Storage.compare_exchange_strong(ExistingPacked, NewPacked))
614 Existing = unpack(ExistingPacked);
621 auto HeaderData = Pool.
get(
Offset,
sizeof(DataRecordHandle::Header));
623 return HeaderData.takeError();
625 auto Record = DataRecordHandle::get(HeaderData->data());
629 "data record span passed the end of the data pool");
634DataRecordHandle DataRecordHandle::constructImpl(
char *Mem,
const Input &
I,
636 char *
Next = Mem +
sizeof(Header);
639 Header::PackTy Packed = 0;
640 Packed |= LayoutFlags::pack(L.Flags) << Header::LayoutFlagsShift;
643 switch (L.Flags.DataSize) {
644 case DataSizeFlags::Uses1B:
645 assert(
I.Data.size() <= UINT8_MAX);
646 Packed |= (Header::PackTy)
I.Data.size()
647 << ((
sizeof(Packed) - 2) * CHAR_BIT);
649 case DataSizeFlags::Uses2B:
650 assert(
I.Data.size() <= UINT16_MAX);
651 Packed |= (Header::PackTy)
I.Data.size()
652 << ((
sizeof(Packed) - 4) * CHAR_BIT);
654 case DataSizeFlags::Uses4B:
658 case DataSizeFlags::Uses8B:
668 switch (L.Flags.NumRefs) {
669 case NumRefsFlags::Uses0B:
671 case NumRefsFlags::Uses1B:
672 assert(
I.Refs.size() <= UINT8_MAX);
673 Packed |= (Header::PackTy)
I.Refs.size()
674 << ((
sizeof(Packed) - 2) * CHAR_BIT);
676 case NumRefsFlags::Uses2B:
677 assert(
I.Refs.size() <= UINT16_MAX);
678 Packed |= (Header::PackTy)
I.Refs.size()
679 << ((
sizeof(Packed) - 4) * CHAR_BIT);
681 case NumRefsFlags::Uses4B:
685 case NumRefsFlags::Uses8B:
692 if (!
I.Refs.empty()) {
693 assert((
L.Flags.RefKind == RefKindFlags::InternalRef4B) ==
I.Refs.is4B());
694 ArrayRef<uint8_t> RefsBuffer =
I.Refs.getBuffer();
702 Next[
I.Data.size()] = 0;
705 Header *
H =
new (Mem) Header{
Packed};
710 assert(
Record.getLayoutFlags().DataSize ==
L.Flags.DataSize);
716DataRecordHandle::Layout::Layout(
const Input &
I) {
718 uint64_t RelOffset =
sizeof(Header);
722 NumRefs =
I.Refs.size();
726 I.Refs.is4B() ? RefKindFlags::InternalRef4B : RefKindFlags::InternalRef;
731 if (
DataSize <= UINT8_MAX && Has1B) {
732 Flags.DataSize = DataSizeFlags::Uses1B;
734 }
else if (
DataSize <= UINT16_MAX && Has2B) {
735 Flags.DataSize = DataSizeFlags::Uses2B;
737 }
else if (
DataSize <= UINT32_MAX) {
738 Flags.DataSize = DataSizeFlags::Uses4B;
741 Flags.DataSize = DataSizeFlags::Uses8B;
747 Flags.NumRefs = NumRefsFlags::Uses0B;
748 }
else if (NumRefs <= UINT8_MAX && Has1B) {
749 Flags.NumRefs = NumRefsFlags::Uses1B;
751 }
else if (NumRefs <= UINT16_MAX && Has2B) {
752 Flags.NumRefs = NumRefsFlags::Uses2B;
755 Flags.NumRefs = NumRefsFlags::Uses4B;
766 auto GrowSizeFieldsBy4B = [&]() {
770 assert(Flags.NumRefs != NumRefsFlags::Uses8B &&
771 "Expected to be able to grow NumRefs8B");
777 if (Flags.DataSize < DataSizeFlags::Uses4B)
778 Flags.DataSize = DataSizeFlags::Uses4B;
779 else if (Flags.DataSize < DataSizeFlags::Uses8B)
780 Flags.DataSize = DataSizeFlags::Uses8B;
781 else if (Flags.NumRefs < NumRefsFlags::Uses4B)
782 Flags.NumRefs = NumRefsFlags::Uses4B;
784 Flags.NumRefs = NumRefsFlags::Uses8B;
788 if (Flags.RefKind == RefKindFlags::InternalRef) {
792 GrowSizeFieldsBy4B();
795 RefsRelOffset = RelOffset;
796 RelOffset += 8 * NumRefs;
804 uint64_t RefListSize = 4 * NumRefs;
806 GrowSizeFieldsBy4B();
807 RefsRelOffset = RelOffset;
808 RelOffset += RefListSize;
812 DataRelOffset = RelOffset;
815uint64_t DataRecordHandle::getDataSize()
const {
816 int64_t RelOffset =
sizeof(Header);
817 auto *DataSizePtr =
reinterpret_cast<const char *
>(
H) + RelOffset;
818 switch (getLayoutFlags().
DataSize) {
819 case DataSizeFlags::Uses1B:
820 return (
H->Packed >> ((
sizeof(Header::PackTy) - 2) * CHAR_BIT)) & UINT8_MAX;
821 case DataSizeFlags::Uses2B:
822 return (
H->Packed >> ((
sizeof(Header::PackTy) - 4) * CHAR_BIT)) &
824 case DataSizeFlags::Uses4B:
826 case DataSizeFlags::Uses8B:
832void DataRecordHandle::skipDataSize(LayoutFlags LF, int64_t &RelOffset)
const {
833 if (LF.DataSize >= DataSizeFlags::Uses4B)
835 if (LF.DataSize >= DataSizeFlags::Uses8B)
839uint32_t DataRecordHandle::getNumRefs()
const {
840 LayoutFlags LF = getLayoutFlags();
841 int64_t RelOffset =
sizeof(Header);
842 skipDataSize(LF, RelOffset);
843 auto *NumRefsPtr =
reinterpret_cast<const char *
>(
H) + RelOffset;
844 switch (LF.NumRefs) {
845 case NumRefsFlags::Uses0B:
847 case NumRefsFlags::Uses1B:
848 return (
H->Packed >> ((
sizeof(Header::PackTy) - 2) * CHAR_BIT)) & UINT8_MAX;
849 case NumRefsFlags::Uses2B:
850 return (
H->Packed >> ((
sizeof(Header::PackTy) - 4) * CHAR_BIT)) &
852 case NumRefsFlags::Uses4B:
854 case NumRefsFlags::Uses8B:
860void DataRecordHandle::skipNumRefs(LayoutFlags LF, int64_t &RelOffset)
const {
861 if (LF.NumRefs >= NumRefsFlags::Uses4B)
863 if (LF.NumRefs >= NumRefsFlags::Uses8B)
867int64_t DataRecordHandle::getRefsRelOffset()
const {
868 LayoutFlags LF = getLayoutFlags();
869 int64_t RelOffset =
sizeof(Header);
870 skipDataSize(LF, RelOffset);
871 skipNumRefs(LF, RelOffset);
875int64_t DataRecordHandle::getDataRelOffset()
const {
876 LayoutFlags LF = getLayoutFlags();
877 int64_t RelOffset =
sizeof(Header);
878 skipDataSize(LF, RelOffset);
879 skipNumRefs(LF, RelOffset);
880 uint32_t RefSize = LF.RefKind == RefKindFlags::InternalRef4B ? 4 : 8;
881 RelOffset += RefSize * getNumRefs();
887 if (
auto E = UpstreamDB->validate(Deep, Hasher))
893 auto formatError = [&](
Twine Msg) {
901 if (
Record.Data.size() !=
sizeof(TrieRecord))
902 return formatError(
"wrong data record size");
904 return formatError(
"wrong data record alignment");
906 auto *R =
reinterpret_cast<const TrieRecord *
>(
Record.Data.data());
907 TrieRecord::Data
D = R->load();
908 std::unique_ptr<MemoryBuffer> FileBuffer;
914 return formatError(
"invalid record kind value");
917 auto I = getIndexProxyFromRef(
Ref);
919 return I.takeError();
922 case TrieRecord::StorageKind::Unknown:
927 case TrieRecord::StorageKind::DataPool:
930 if (
D.Offset.get() <= 0 ||
931 D.Offset.get() +
sizeof(DataRecordHandle::Header) >= DataPool.size())
932 return formatError(
"datapool record out of bound");
934 case TrieRecord::StorageKind::Standalone:
935 case TrieRecord::StorageKind::StandaloneLeaf:
936 case TrieRecord::StorageKind::StandaloneLeaf0:
938 getStandalonePath(TrieRecord::getStandaloneFilePrefix(
D.SK), *
I, Path);
945 return formatError(
"record file \'" + Path +
"\' does not exist");
947 FileBuffer = std::move(*File);
949 return formatError(
"record file \'" + Path +
"\' does not exist");
955 auto dataError = [&](
Twine Msg) {
957 "bad data for digest \'" +
toHex(
I->Hash) +
964 case TrieRecord::StorageKind::Unknown:
966 case TrieRecord::StorageKind::DataPool: {
967 auto DataRecord = DataRecordHandle::getFromDataPool(DataPool,
D.Offset);
969 return dataError(
toString(DataRecord.takeError()));
971 for (
auto InternRef : DataRecord->getRefs()) {
972 auto Index = getIndexProxyFromRef(InternRef);
974 return Index.takeError();
977 StoredData = DataRecord->getData();
980 case TrieRecord::StorageKind::Standalone: {
981 if (FileBuffer->getBufferSize() <
sizeof(DataRecordHandle::Header))
982 return dataError(
"data record is not big enough to read the header");
983 auto DataRecord = DataRecordHandle::get(FileBuffer->getBufferStart());
984 if (DataRecord.getTotalSize() < FileBuffer->getBufferSize())
986 "data record span passed the end of the standalone file");
987 for (
auto InternRef : DataRecord.getRefs()) {
988 auto Index = getIndexProxyFromRef(InternRef);
990 return Index.takeError();
993 StoredData = DataRecord.getData();
996 case TrieRecord::StorageKind::StandaloneLeaf:
997 case TrieRecord::StorageKind::StandaloneLeaf0: {
999 if (
D.SK == TrieRecord::StorageKind::StandaloneLeaf0) {
1000 if (!FileBuffer->getBuffer().ends_with(
'\0'))
1001 return dataError(
"standalone file is not zero terminated");
1009 Hasher(Refs, StoredData, ComputedHash);
1011 return dataError(
"hash mismatch, got \'" +
toHex(ComputedHash) +
1019 auto formatError = [&](
Twine Msg) {
1028 return formatError(
"zero is not a valid ref");
1038 return formatError(
"not found using hash " +
toHex(Hash));
1040 ObjectID OtherRef = getExternalReference(makeInternalRef(OtherI.
Offset));
1041 if (OtherRef != ExternalRef)
1042 return formatError(
"ref does not match indexed offset " +
1044 " for hash " +
toHex(Hash));
1049 OS <<
"on-disk-root-path: " << RootPath <<
"\n";
1061 auto *R =
reinterpret_cast<const TrieRecord *
>(
Data.data());
1062 TrieRecord::Data
D = R->load();
1065 case TrieRecord::StorageKind::Unknown:
1068 case TrieRecord::StorageKind::DataPool:
1072 case TrieRecord::StorageKind::Standalone:
1073 OS <<
"standalone-data ";
1075 case TrieRecord::StorageKind::StandaloneLeaf:
1076 OS <<
"standalone-leaf ";
1078 case TrieRecord::StorageKind::StandaloneLeaf0:
1079 OS <<
"standalone-leaf+0";
1082 OS <<
" Offset=" << (
void *)
D.Offset.get();
1090 Pool, [](PoolInfo LHS, PoolInfo RHS) {
return LHS.Offset < RHS.Offset; });
1091 for (PoolInfo PI : Pool) {
1092 OS <<
"- addr=" << (
void *)PI.Offset <<
" ";
1093 auto D = DataRecordHandle::getFromDataPool(DataPool,
FileOffset(PI.Offset));
1095 OS <<
"error: " <<
toString(
D.takeError());
1099 OS <<
"record refs=" <<
D->getNumRefs() <<
" data=" <<
D->getDataSize()
1100 <<
" size=" <<
D->getTotalSize()
1101 <<
" end=" << (
void *)(PI.Offset +
D->getTotalSize()) <<
"\n";
1107 auto P = Index.insertLazy(
1113 new (TentativeValue.
Data.
data()) TrieRecord();
1116 return P.takeError();
1118 assert(*
P &&
"Expected insertion");
1119 return getIndexProxyFromPointer(*
P);
1126 return IndexProxy{
P.getOffset(),
P->Hash,
1127 *
const_cast<TrieRecord *
>(
1128 reinterpret_cast<const TrieRecord *
>(
P->Data.data()))};
1132 auto I = indexHash(Hash);
1134 return I.takeError();
1135 return getExternalReference(*
I);
1138ObjectID OnDiskGraphDB::getExternalReference(
const IndexProxy &
I) {
1139 return getExternalReference(makeInternalRef(
I.Offset));
1142std::optional<ObjectID>
1144 bool CheckUpstream) {
1146 [&](std::optional<IndexProxy>
I) -> std::optional<ObjectID> {
1147 if (!CheckUpstream || !UpstreamDB)
1148 return std::nullopt;
1149 std::optional<ObjectID> UpstreamID =
1150 UpstreamDB->getExistingReference(Digest);
1152 return std::nullopt;
1155 return std::nullopt;
1158 return getExternalReference(*
I);
1163 return tryUpstream(std::nullopt);
1165 TrieRecord::Data Obj =
I.Ref.load();
1166 if (Obj.SK == TrieRecord::StorageKind::Unknown)
1167 return tryUpstream(
I);
1168 return getExternalReference(makeInternalRef(
I.Offset));
1173 auto P = Index.recoverFromFileOffset(
Ref.getFileOffset());
1175 return P.takeError();
1176 return getIndexProxyFromPointer(*
P);
1180 auto I = getIndexProxyFromRef(
Ref);
1182 return I.takeError();
1196 reinterpret_cast<const StandaloneDataInMemory *
>(
Data & (-1ULL << 1));
1197 return SDIM->getContent();
1202 assert(DataHandle.getData().end()[0] == 0 &&
"Null termination");
1203 return OnDiskContent{DataHandle, std::nullopt};
1209 return *Content.Bytes;
1210 assert(Content.Record &&
"Expected record or bytes");
1211 return Content.Record->getData();
1215 if (std::optional<DataRecordHandle>
Record =
1217 return Record->getRefs();
1218 return std::nullopt;
1224 auto I = getIndexProxyFromRef(
Ref);
1226 return I.takeError();
1227 TrieRecord::Data Object =
I->Ref.load();
1229 if (Object.SK == TrieRecord::StorageKind::Unknown)
1230 return faultInFromUpstream(ExternalRef);
1232 if (Object.SK == TrieRecord::StorageKind::DataPool)
1242 switch (Object.SK) {
1243 case TrieRecord::StorageKind::Unknown:
1244 case TrieRecord::StorageKind::DataPool:
1246 case TrieRecord::StorageKind::Standalone:
1247 case TrieRecord::StorageKind::StandaloneLeaf0:
1248 case TrieRecord::StorageKind::StandaloneLeaf:
1258 getStandalonePath(TrieRecord::getStandaloneFilePrefix(Object.SK), *
I, Path);
1273 auto Region = std::make_unique<sys::fs::mapped_file_region>(
1279 static_cast<StandaloneDataMapTy *
>(StandaloneData)
1280 ->insert(
I->Hash, Object.SK, std::move(
Region)));
1284 auto Presence = getObjectPresence(
Ref,
true);
1286 return Presence.takeError();
1288 switch (*Presence) {
1289 case ObjectPresence::Missing:
1291 case ObjectPresence::InPrimaryDB:
1293 case ObjectPresence::OnlyInUpstreamDB:
1294 if (
auto FaultInResult = faultInFromUpstream(
Ref); !FaultInResult)
1295 return FaultInResult.takeError();
1302OnDiskGraphDB::getObjectPresence(
ObjectID ExternalRef,
1303 bool CheckUpstream)
const {
1305 auto I = getIndexProxyFromRef(
Ref);
1307 return I.takeError();
1309 TrieRecord::Data Object =
I->Ref.load();
1310 if (Object.SK != TrieRecord::StorageKind::Unknown)
1311 return ObjectPresence::InPrimaryDB;
1313 if (!CheckUpstream || !UpstreamDB)
1314 return ObjectPresence::Missing;
1316 std::optional<ObjectID> UpstreamID =
1317 UpstreamDB->getExistingReference(getDigest(*
I));
1318 return UpstreamID.has_value() ? ObjectPresence::OnlyInUpstreamDB
1319 : ObjectPresence::Missing;
1326void OnDiskGraphDB::getStandalonePath(
StringRef Prefix,
const IndexProxy &
I,
1328 Path.assign(RootPath.begin(), RootPath.end());
1333OnDiskContent StandaloneDataInMemory::getContent()
const {
1339 case TrieRecord::StorageKind::Standalone:
1341 case TrieRecord::StorageKind::StandaloneLeaf0:
1342 Leaf = Leaf0 =
true;
1344 case TrieRecord::StorageKind::StandaloneLeaf:
1351 assert(
Data.drop_back(Leaf0).end()[0] == 0 &&
1352 "Standalone node data missing null termination");
1353 return OnDiskContent{std::nullopt,
1357 DataRecordHandle
Record = DataRecordHandle::get(
Region->data());
1359 "Standalone object record missing null termination for data");
1360 return OnDiskContent{
Record, std::nullopt};
1367 assert(
Size &&
"Unexpected request for an empty temp file");
1370 return File.takeError();
1384 return MappedTempFile(std::move(*File), std::move(Map));
1392Error OnDiskGraphDB::createStandaloneLeaf(IndexProxy &
I, ArrayRef<char>
Data) {
1393 assert(
Data.size() > TrieRecord::MaxEmbeddedSize &&
1394 "Expected a bigger file for external content...");
1397 TrieRecord::StorageKind SK = Leaf0 ? TrieRecord::StorageKind::StandaloneLeaf0
1398 : TrieRecord::StorageKind::StandaloneLeaf;
1400 SmallString<256>
Path;
1401 int64_t FileSize =
Data.size() + Leaf0;
1402 getStandalonePath(TrieRecord::getStandaloneFilePrefix(SK),
I, Path);
1410 return File.takeError();
1420 TrieRecord::Data Existing;
1422 TrieRecord::Data Leaf{SK, FileOffset()};
1423 if (
I.Ref.compare_exchange_strong(Existing, Leaf)) {
1424 recordStandaloneSizeIncrease(FileSize);
1430 if (Existing.SK == TrieRecord::StorageKind::Unknown)
1438 auto I = getIndexProxyFromRef(getInternalRef(
ID));
1440 return I.takeError();
1444 TrieRecord::Data Existing =
I->Ref.load();
1445 if (Existing.SK != TrieRecord::StorageKind::Unknown)
1450 if (Refs.
empty() &&
Data.size() > TrieRecord::MaxEmbeddedSize)
1451 return createStandaloneLeaf(*
I,
Data);
1456 InternalRefVector InternalRefs;
1458 InternalRefs.push_back(getInternalRef(
Ref));
1462 DataRecordHandle::Input
Input{InternalRefs,
Data};
1465 TrieRecord::StorageKind SK = TrieRecord::StorageKind::Unknown;
1468 std::optional<MappedTempFile> File;
1469 std::optional<uint64_t> FileSize;
1471 getStandalonePath(TrieRecord::getStandaloneFilePrefix(
1472 TrieRecord::StorageKind::Standalone),
1475 return std::move(E);
1478 SK = TrieRecord::StorageKind::Standalone;
1479 return File->data();
1482 if (
Size <= TrieRecord::MaxEmbeddedSize) {
1483 SK = TrieRecord::StorageKind::DataPool;
1484 auto P = DataPool.allocate(
Size);
1486 char *NewAlloc =
nullptr;
1488 P.takeError(), [&](std::unique_ptr<StringError> E) ->
Error {
1489 if (E->convertToErrorCode() == std::errc::not_enough_memory)
1490 return AllocStandaloneFile(Size).moveInto(NewAlloc);
1491 return Error(std::move(E));
1495 return std::move(NewE);
1497 PoolOffset =
P->getOffset();
1499 dbgs() <<
"pool-alloc addr=" << (
void *)PoolOffset.
get()
1501 <<
" end=" << (
void *)(PoolOffset.
get() +
Size) <<
"\n";
1503 return (*P)->data();
1505 return AllocStandaloneFile(
Size);
1512 assert(
Record.getData().end()[0] == 0 &&
"Expected null-termination");
1514 assert(SK != TrieRecord::StorageKind::Unknown);
1515 assert(
bool(File) !=
bool(PoolOffset) &&
1516 "Expected either a mapped file or a pooled offset");
1522 TrieRecord::Data Existing =
I->Ref.load();
1524 TrieRecord::Data NewObject{SK, PoolOffset};
1526 if (Existing.SK == TrieRecord::StorageKind::Unknown) {
1528 if (
Error E = File->keep(Path))
1540 if (Existing.SK == TrieRecord::StorageKind::Unknown) {
1541 if (
I->Ref.compare_exchange_strong(Existing, NewObject)) {
1543 recordStandaloneSizeIncrease(*FileSize);
1549 if (Existing.SK == TrieRecord::StorageKind::Unknown)
1556void OnDiskGraphDB::recordStandaloneSizeIncrease(
size_t SizeIncrease) {
1557 standaloneStorageSize().fetch_add(SizeIncrease, std::memory_order_relaxed);
1560std::atomic<uint64_t> &OnDiskGraphDB::standaloneStorageSize()
const {
1562 assert(UserHeader.
size() ==
sizeof(std::atomic<uint64_t>));
1564 return *
reinterpret_cast<std::atomic<uint64_t> *
>(UserHeader.
data());
1567uint64_t OnDiskGraphDB::getStandaloneStorageSize()
const {
1568 return standaloneStorageSize().load(std::memory_order_relaxed);
1572 return Index.size() + DataPool.size() + getStandaloneStorageSize();
1576 unsigned IndexPercent = Index.size() * 100ULL / Index.capacity();
1577 unsigned DataPercent = DataPool.size() * 100ULL / DataPool.capacity();
1578 return std::max(IndexPercent, DataPercent);
1583 unsigned HashByteSize, OnDiskGraphDB *UpstreamDB,
1588 constexpr uint64_t MB = 1024ull * 1024ull;
1589 constexpr uint64_t GB = 1024ull * 1024ull * 1024ull;
1592 uint64_t MaxDataPoolSize = 24 * GB;
1595 MaxIndexSize = 1 * GB;
1596 MaxDataPoolSize = 2 * GB;
1601 return CustomSize.takeError();
1603 MaxIndexSize = MaxDataPoolSize = **CustomSize;
1607 std::optional<OnDiskTrieRawHashMap> Index;
1610 HashByteSize * CHAR_BIT,
1611 sizeof(TrieRecord), MaxIndexSize,
1614 return std::move(E);
1616 uint32_t UserHeaderSize =
sizeof(std::atomic<uint64_t>);
1620 std::optional<OnDiskDataAllocator> DataPool;
1626 MaxDataPoolSize, MB, UserHeaderSize,
1627 [](
void *UserHeaderPtr) {
1628 new (UserHeaderPtr) std::atomic<uint64_t>(0);
1630 .moveInto(DataPool))
1631 return std::move(E);
1632 if (DataPool->getUserHeader().size() != UserHeaderSize)
1634 "unexpected user header in '" + DataPoolPath +
1637 return std::unique_ptr<OnDiskGraphDB>(
new OnDiskGraphDB(
1638 AbsPath, std::move(*Index), std::move(*DataPool), UpstreamDB, Policy));
1645 RootPath(RootPath.str()), UpstreamDB(UpstreamDB), FIPolicy(Policy) {
1655 StandaloneData =
new StandaloneDataMapTy();
1659 delete static_cast<StandaloneDataMapTy *
>(StandaloneData);
1668 struct UpstreamCursor {
1685 auto enqueueNode = [&](
ObjectID PrimaryID, std::optional<ObjectHandle>
Node) {
1694 enqueueNode(PrimaryID, UpstreamNode);
1696 while (!CursorStack.
empty()) {
1697 UpstreamCursor &Cur = CursorStack.
back();
1698 if (Cur.RefI == Cur.RefE) {
1705 assert(PrimaryNodesStack.
size() >= Cur.RefsCount + 1);
1706 ObjectID PrimaryID = *(PrimaryNodesStack.
end() - Cur.RefsCount - 1);
1707 auto PrimaryRefs =
ArrayRef(PrimaryNodesStack)
1708 .slice(PrimaryNodesStack.
size() - Cur.RefsCount);
1713 PrimaryNodesStack.
truncate(PrimaryNodesStack.
size() - Cur.RefsCount);
1718 ObjectID UpstreamID = *(Cur.RefI++);
1719 auto PrimaryID =
getReference(UpstreamDB->getDigest(UpstreamID));
1721 return PrimaryID.takeError();
1726 enqueueNode(*PrimaryID, std::nullopt);
1729 Expected<std::optional<ObjectHandle>> UpstreamNode =
1730 UpstreamDB->load(UpstreamID);
1733 enqueueNode(*PrimaryID, *UpstreamNode);
1748 auto Data = UpstreamDB->getObjectData(UpstreamNode);
1749 auto UpstreamRefs = UpstreamDB->getObjectRefs(UpstreamNode);
1752 for (ObjectID UpstreamRef : UpstreamRefs) {
1755 return Ref.takeError();
1762Expected<std::optional<ObjectHandle>>
1763OnDiskGraphDB::faultInFromUpstream(
ObjectID PrimaryID) {
1765 return std::nullopt;
1767 auto UpstreamID = UpstreamDB->getReference(
getDigest(PrimaryID));
1769 return UpstreamID.takeError();
1771 Expected<std::optional<ObjectHandle>> UpstreamNode =
1772 UpstreamDB->load(*UpstreamID);
1776 return std::nullopt;
1779 ? importSingleNode(PrimaryID, **UpstreamNode)
1780 : importFullTree(PrimaryID, **UpstreamNode))
1781 return std::move(
E);
1782 return load(PrimaryID);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Mark last scratch load
AMDGPU Prepare AGPR Alloc
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_UNLIKELY(EXPR)
This file defines the DenseMap class.
static cl::opt< int > PageSize("imp-null-check-page-size", cl::desc("The page size of the target in bytes"), cl::init(4096), cl::Hidden)
static bool lookup(const GsymReader &GR, DataExtractor &Data, uint64_t &Offset, uint64_t BaseAddr, uint64_t Addr, SourceLocations &SrcLocs, llvm::Error &Err)
A Lookup helper functions.
This file declares interface for OnDiskDataAllocator, a file backed data pool can be used to allocate...
static constexpr StringLiteral FilePrefixLeaf0
static constexpr StringLiteral DataPoolTableName
static constexpr StringLiteral FilePrefixObject
static constexpr StringLiteral FilePrefixLeaf
static constexpr StringLiteral IndexFilePrefix
static OnDiskContent getContentFromHandle(const OnDiskDataAllocator &DataPool, ObjectHandle OH)
static constexpr StringLiteral DataPoolFilePrefix
static Error createCorruptObjectError(Expected< ArrayRef< uint8_t > > ID)
static size_t getPageSize()
static Expected< MappedTempFile > createTempFile(StringRef FinalPath, uint64_t Size)
static constexpr StringLiteral IndexTableName
This declares OnDiskGraphDB, an ondisk CAS database with a fixed length hash.
This file declares interface for OnDiskTrieRawHashMap, a thread-safe and (mostly) lock-free hash map ...
Provides a library for accessing information about this process and other processes on the operating ...
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
bool empty() const
empty - Check if the array is empty.
Lightweight error class with error context and mandatory checking.
static ErrorSuccess success()
Create a success value.
Tagged union holding either a T or a Error.
Error takeError()
Take ownership of the stored error.
static ErrorOr< std::unique_ptr< MemoryBuffer > > getFile(const Twine &Filename, bool IsText=false, bool RequiresNullTerminator=true, bool IsVolatile=false, std::optional< Align > Alignment=std::nullopt)
Open the specified file as a MemoryBuffer, returning a new MemoryBuffer if successful,...
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void truncate(size_type N)
Like resize, but requires that N is less than size().
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
StringRef - Represent a constant reference to a string, i.e.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
FileOffset is a wrapper around uint64_t to represent the offset of data from the beginning of the fil...
Handle to a loaded object in a ObjectStore instance.
LLVM_ABI_FOR_TEST Expected< ArrayRef< char > > get(FileOffset Offset, size_t Size) const
Get the data of Size stored at the given Offset.
static LLVM_ABI_FOR_TEST Expected< OnDiskDataAllocator > create(const Twine &Path, const Twine &TableName, uint64_t MaxFileSize, std::optional< uint64_t > NewFileInitialSize, uint32_t UserHeaderSize=0, function_ref< void(void *)> UserHeaderInit=nullptr)
LLVM_ABI_FOR_TEST size_t size() const
OnDiskTrieRawHashMap is a persistent trie data structure used as hash maps.
static LLVM_ABI_FOR_TEST Expected< OnDiskTrieRawHashMap > create(const Twine &Path, const Twine &TrieName, size_t NumHashBits, uint64_t DataSize, uint64_t MaxFileSize, std::optional< uint64_t > NewFileInitialSize, std::optional< size_t > NewTableNumRootBits=std::nullopt, std::optional< size_t > NewTableNumSubtrieBits=std::nullopt)
Gets or creates a file at Path with a hash-mapped trie named TrieName.
static std::optional< InternalRef4B > tryToShrink(InternalRef Ref)
Shrink to 4B reference.
Array of internal node references.
Standard 8 byte reference inside OnDiskGraphDB.
static InternalRef getFromOffset(FileOffset Offset)
Handle for a loaded node object.
static ObjectHandle fromFileOffset(FileOffset Offset)
static ObjectHandle fromMemory(uintptr_t Ptr)
ObjectHandle(uint64_t Opaque)
uint64_t getOpaqueData() const
On-disk CAS nodes database, independent of a particular hashing algorithm.
FaultInPolicy
How to fault-in nodes if an upstream database is used.
@ SingleNode
Copy only the requested node.
void print(raw_ostream &OS) const
LLVM_ABI_FOR_TEST Expected< std::optional< ObjectHandle > > load(ObjectID Ref)
Expected< bool > isMaterialized(ObjectID Ref)
Check whether the object associated with Ref is stored in the CAS.
Error validate(bool Deep, HashingFuncT Hasher) const
Validate the OnDiskGraphDB.
object_refs_range getObjectRefs(ObjectHandle Node) const
unsigned getHardStorageLimitUtilization() const
LLVM_ABI_FOR_TEST Error validateObjectID(ObjectID ID)
Checks that ID exists in the index.
LLVM_ABI_FOR_TEST Error store(ObjectID ID, ArrayRef< ObjectID > Refs, ArrayRef< char > Data)
Associate data & references with a particular object ID.
ArrayRef< uint8_t > getDigest(ObjectID Ref) const
static LLVM_ABI_FOR_TEST Expected< std::unique_ptr< OnDiskGraphDB > > open(StringRef Path, StringRef HashName, unsigned HashByteSize, OnDiskGraphDB *UpstreamDB=nullptr, FaultInPolicy Policy=FaultInPolicy::FullTree)
Open the on-disk store from a directory.
LLVM_ABI_FOR_TEST size_t getStorageSize() const
bool containsObject(ObjectID Ref, bool CheckUpstream=true) const
Check whether the object associated with Ref is stored in the CAS.
LLVM_ABI_FOR_TEST ~OnDiskGraphDB()
LLVM_ABI_FOR_TEST Expected< ObjectID > getReference(ArrayRef< uint8_t > Hash)
Form a reference for the provided hash.
function_ref< void( ArrayRef< ArrayRef< uint8_t > >, ArrayRef< char >, SmallVectorImpl< uint8_t > &)> HashingFuncT
Hashing function type for validation.
LLVM_ABI_FOR_TEST ArrayRef< char > getObjectData(ObjectHandle Node) const
LLVM_ABI_FOR_TEST std::optional< ObjectID > getExistingReference(ArrayRef< uint8_t > Digest, bool CheckUpstream=true)
Get an existing reference to the object Digest.
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
static unsigned getPageSizeEstimate()
Get the process's estimated page size.
LLVM_ABI Error keep(const Twine &Name)
static LLVM_ABI Expected< TempFile > create(const Twine &Model, unsigned Mode=all_read|all_write, OpenFlags ExtraFlags=OF_None)
This creates a temporary file with createUniqueFile and schedules it for deletion with sys::RemoveFil...
Represents the result of a call to sys::fs::status().
This class represents a memory mapped file.
LLVM_ABI size_t size() const
@ readonly
May only access map via const_data as read only.
@ readwrite
May access map via data and modify it. Written to path.
LLVM_ABI char * data() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
constexpr StringLiteral CASFormatVersion
The version for all the ondisk database files.
Expected< std::optional< uint64_t > > getOverriddenMaxMappingSize()
Retrieves an overridden maximum mapping size for CAS files, if any, speicified by LLVM_CAS_MAX_MAPPIN...
Expected< size_t > preallocateFileTail(int FD, size_t CurrentSize, size_t NewSize)
Allocate space for the file FD on disk, if the filesystem supports it.
bool useSmallMappingSize(const Twine &Path)
Whether to use a small file mapping for ondisk databases created in Path.
uint64_t getDataSize(const FuncRecordTy *Record)
Return the coverage map data size for the function.
uint64_t read64le(const void *P)
void write64le(void *P, uint64_t V)
void write32le(void *P, uint32_t V)
uint32_t read32le(const void *P)
LLVM_ABI std::error_code closeFile(file_t &F)
Close the file object.
LLVM_ABI std::error_code rename(const Twine &from, const Twine &to)
Rename from to to.
std::error_code resize_file_before_mapping_readwrite(int FD, uint64_t Size)
Resize FD to Size before mapping mapped_file_region::readwrite.
LLVM_ABI bool exists(const basic_file_status &status)
Does file exist?
LLVM_ABI std::error_code createUniqueFile(const Twine &Model, int &ResultFD, SmallVectorImpl< char > &ResultPath, OpenFlags Flags=OF_None, unsigned Mode=all_read|all_write)
Create a uniquely named file.
LLVM_ABI std::error_code remove(const Twine &path, bool IgnoreNonExisting=true)
Remove path.
LLVM_ABI Expected< file_t > openNativeFileForRead(const Twine &Name, OpenFlags Flags=OF_None, SmallVectorImpl< char > *RealPath=nullptr)
Opens the file with the given name in a read-only mode, returning its open file descriptor.
LLVM_ABI std::error_code create_directories(const Twine &path, bool IgnoreExisting=true, perms Perms=owner_all|group_all)
Create all the non-existent directories in path.
LLVM_ABI file_t convertFDToNativeFile(int FD)
Converts from a Posix file descriptor number to a native file handle.
LLVM_ABI std::error_code status(const Twine &path, file_status &result, bool follow=true)
Get file status as if by POSIX stat().
LLVM_ABI void append(SmallVectorImpl< char > &path, const Twine &a, const Twine &b="", const Twine &c="", const Twine &d="")
Append to path.
ScopedSetting scopedDisable()
This is an optimization pass for GlobalISel generic memory operations.
Error createFileError(const Twine &F, Error E)
Concatenate a source file path and/or name with an Error.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
ArrayRef< CharT > arrayRefFromStringRef(StringRef Input)
Construct a string ref from an array ref of unsigned chars.
std::error_code make_error_code(BitcodeError E)
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
Error handleErrors(Error E, HandlerTs &&... Hs)
Pass the ErrorInfo(s) contained in E to their respective handlers.
FunctionAddr VTableAddr uintptr_t uintptr_t DataSize
std::string utohexstr(uint64_t X, bool LowerCase=false, unsigned Width=0)
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Error createStringError(std::error_code EC, char const *Fmt, const Ts &... Vals)
Create formatted StringError object.
std::optional< T > expectedToOptional(Expected< T > &&E)
Convert an Expected to an Optional without doing anything.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
@ Ref
The access may reference the value stored in memory.
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
FunctionAddr VTableAddr uintptr_t uintptr_t Data
FunctionAddr VTableAddr Next
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
ArrayRef(const T &OneElt) -> ArrayRef< T >
std::string toString(const APInt &I, unsigned Radix, bool Signed, bool formatAsCLiteral=false, bool UpperCase=true, bool InsertSeparators=false)
OutputIt copy(R &&Range, OutputIt Out)
void toHex(ArrayRef< uint8_t > Input, bool LowerCase, SmallVectorImpl< char > &Output)
Convert buffer Input to its hexadecimal representation. The returned string is double the size of Inp...
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Error errorCodeToError(std::error_code EC)
Helper for converting an std::error_code to a Error.
void consumeError(Error Err)
Consume a Error without doing anything.
bool isAddrAligned(Align Lhs, const void *Addr)
Checks that Addr is a multiple of the alignment.
Implement std::hash so that hash_code can be used in STL containers.
Proxy for an on-disk index record.
This struct is a compact representation of a valid (non-zero power of two) alignment.
static constexpr Align Of()
Allow constructions of constexpr Align from types.
Const value proxy to access the records stored in TrieRawHashMap.
Value proxy to access the records stored in TrieRawHashMap.
MutableArrayRef< char > Data