LLVM 19.0.0git
LowerMemIntrinsics.cpp
Go to the documentation of this file.
1//===- LowerMemIntrinsics.cpp ----------------------------------*- C++ -*--===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
12#include "llvm/IR/IRBuilder.h"
14#include "llvm/IR/MDBuilder.h"
15#include "llvm/Support/Debug.h"
18#include <optional>
19
20#define DEBUG_TYPE "lower-mem-intrinsics"
21
22using namespace llvm;
23
25 Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr,
26 ConstantInt *CopyLen, Align SrcAlign, Align DstAlign, bool SrcIsVolatile,
27 bool DstIsVolatile, bool CanOverlap, const TargetTransformInfo &TTI,
28 std::optional<uint32_t> AtomicElementSize) {
29 // No need to expand zero length copies.
30 if (CopyLen->isZero())
31 return;
32
33 BasicBlock *PreLoopBB = InsertBefore->getParent();
34 BasicBlock *PostLoopBB = nullptr;
35 Function *ParentFunc = PreLoopBB->getParent();
36 LLVMContext &Ctx = PreLoopBB->getContext();
37 const DataLayout &DL = ParentFunc->getParent()->getDataLayout();
38 MDBuilder MDB(Ctx);
39 MDNode *NewDomain = MDB.createAnonymousAliasScopeDomain("MemCopyDomain");
40 StringRef Name = "MemCopyAliasScope";
41 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
42
43 unsigned SrcAS = cast<PointerType>(SrcAddr->getType())->getAddressSpace();
44 unsigned DstAS = cast<PointerType>(DstAddr->getType())->getAddressSpace();
45
46 Type *TypeOfCopyLen = CopyLen->getType();
48 Ctx, CopyLen, SrcAS, DstAS, SrcAlign.value(), DstAlign.value(),
49 AtomicElementSize);
50 assert((!AtomicElementSize || !LoopOpType->isVectorTy()) &&
51 "Atomic memcpy lowering is not supported for vector operand type");
52
53 unsigned LoopOpSize = DL.getTypeStoreSize(LoopOpType);
54 assert((!AtomicElementSize || LoopOpSize % *AtomicElementSize == 0) &&
55 "Atomic memcpy lowering is not supported for selected operand size");
56
57 uint64_t LoopEndCount = CopyLen->getZExtValue() / LoopOpSize;
58
59 if (LoopEndCount != 0) {
60 // Split
61 PostLoopBB = PreLoopBB->splitBasicBlock(InsertBefore, "memcpy-split");
62 BasicBlock *LoopBB =
63 BasicBlock::Create(Ctx, "load-store-loop", ParentFunc, PostLoopBB);
64 PreLoopBB->getTerminator()->setSuccessor(0, LoopBB);
65
66 IRBuilder<> PLBuilder(PreLoopBB->getTerminator());
67
68 Align PartDstAlign(commonAlignment(DstAlign, LoopOpSize));
69 Align PartSrcAlign(commonAlignment(SrcAlign, LoopOpSize));
70
71 IRBuilder<> LoopBuilder(LoopBB);
72 PHINode *LoopIndex = LoopBuilder.CreatePHI(TypeOfCopyLen, 2, "loop-index");
73 LoopIndex->addIncoming(ConstantInt::get(TypeOfCopyLen, 0U), PreLoopBB);
74 // Loop Body
75 Value *SrcGEP =
76 LoopBuilder.CreateInBoundsGEP(LoopOpType, SrcAddr, LoopIndex);
77 LoadInst *Load = LoopBuilder.CreateAlignedLoad(LoopOpType, SrcGEP,
78 PartSrcAlign, SrcIsVolatile);
79 if (!CanOverlap) {
80 // Set alias scope for loads.
81 Load->setMetadata(LLVMContext::MD_alias_scope,
82 MDNode::get(Ctx, NewScope));
83 }
84 Value *DstGEP =
85 LoopBuilder.CreateInBoundsGEP(LoopOpType, DstAddr, LoopIndex);
86 StoreInst *Store = LoopBuilder.CreateAlignedStore(
87 Load, DstGEP, PartDstAlign, DstIsVolatile);
88 if (!CanOverlap) {
89 // Indicate that stores don't overlap loads.
90 Store->setMetadata(LLVMContext::MD_noalias, MDNode::get(Ctx, NewScope));
91 }
92 if (AtomicElementSize) {
93 Load->setAtomic(AtomicOrdering::Unordered);
94 Store->setAtomic(AtomicOrdering::Unordered);
95 }
96 Value *NewIndex =
97 LoopBuilder.CreateAdd(LoopIndex, ConstantInt::get(TypeOfCopyLen, 1U));
98 LoopIndex->addIncoming(NewIndex, LoopBB);
99
100 // Create the loop branch condition.
101 Constant *LoopEndCI = ConstantInt::get(TypeOfCopyLen, LoopEndCount);
102 LoopBuilder.CreateCondBr(LoopBuilder.CreateICmpULT(NewIndex, LoopEndCI),
103 LoopBB, PostLoopBB);
104 }
105
106 uint64_t BytesCopied = LoopEndCount * LoopOpSize;
107 uint64_t RemainingBytes = CopyLen->getZExtValue() - BytesCopied;
108 if (RemainingBytes) {
109 IRBuilder<> RBuilder(PostLoopBB ? PostLoopBB->getFirstNonPHI()
110 : InsertBefore);
111
112 SmallVector<Type *, 5> RemainingOps;
113 TTI.getMemcpyLoopResidualLoweringType(RemainingOps, Ctx, RemainingBytes,
114 SrcAS, DstAS, SrcAlign.value(),
115 DstAlign.value(), AtomicElementSize);
116
117 for (auto *OpTy : RemainingOps) {
118 Align PartSrcAlign(commonAlignment(SrcAlign, BytesCopied));
119 Align PartDstAlign(commonAlignment(DstAlign, BytesCopied));
120
121 // Calculate the new index
122 unsigned OperandSize = DL.getTypeStoreSize(OpTy);
123 assert(
124 (!AtomicElementSize || OperandSize % *AtomicElementSize == 0) &&
125 "Atomic memcpy lowering is not supported for selected operand size");
126
127 uint64_t GepIndex = BytesCopied / OperandSize;
128 assert(GepIndex * OperandSize == BytesCopied &&
129 "Division should have no Remainder!");
130
131 Value *SrcGEP = RBuilder.CreateInBoundsGEP(
132 OpTy, SrcAddr, ConstantInt::get(TypeOfCopyLen, GepIndex));
133 LoadInst *Load =
134 RBuilder.CreateAlignedLoad(OpTy, SrcGEP, PartSrcAlign, SrcIsVolatile);
135 if (!CanOverlap) {
136 // Set alias scope for loads.
137 Load->setMetadata(LLVMContext::MD_alias_scope,
138 MDNode::get(Ctx, NewScope));
139 }
140 Value *DstGEP = RBuilder.CreateInBoundsGEP(
141 OpTy, DstAddr, ConstantInt::get(TypeOfCopyLen, GepIndex));
142 StoreInst *Store = RBuilder.CreateAlignedStore(Load, DstGEP, PartDstAlign,
143 DstIsVolatile);
144 if (!CanOverlap) {
145 // Indicate that stores don't overlap loads.
146 Store->setMetadata(LLVMContext::MD_noalias, MDNode::get(Ctx, NewScope));
147 }
148 if (AtomicElementSize) {
149 Load->setAtomic(AtomicOrdering::Unordered);
150 Store->setAtomic(AtomicOrdering::Unordered);
151 }
152 BytesCopied += OperandSize;
153 }
154 }
155 assert(BytesCopied == CopyLen->getZExtValue() &&
156 "Bytes copied should match size in the call!");
157}
158
159// \returns \p Len udiv \p OpSize, checking for optimization opportunities.
161 Value *Len, Value *OpSize,
162 unsigned OpSizeVal) {
163 // For powers of 2, we can lshr by log2 instead of using udiv.
164 if (isPowerOf2_32(OpSizeVal))
165 return B.CreateLShr(Len, Log2_32(OpSizeVal));
166 return B.CreateUDiv(Len, OpSize);
167}
168
169// \returns \p Len urem \p OpSize, checking for optimization opportunities.
171 Value *Len, Value *OpSize,
172 unsigned OpSizeVal) {
173 // For powers of 2, we can and by (OpSizeVal - 1) instead of using urem.
174 if (isPowerOf2_32(OpSizeVal))
175 return B.CreateAnd(Len, OpSizeVal - 1);
176 return B.CreateURem(Len, OpSize);
177}
178
180 Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, Value *CopyLen,
181 Align SrcAlign, Align DstAlign, bool SrcIsVolatile, bool DstIsVolatile,
182 bool CanOverlap, const TargetTransformInfo &TTI,
183 std::optional<uint32_t> AtomicElementSize) {
184 BasicBlock *PreLoopBB = InsertBefore->getParent();
185 BasicBlock *PostLoopBB =
186 PreLoopBB->splitBasicBlock(InsertBefore, "post-loop-memcpy-expansion");
187
188 Function *ParentFunc = PreLoopBB->getParent();
189 const DataLayout &DL = ParentFunc->getParent()->getDataLayout();
190 LLVMContext &Ctx = PreLoopBB->getContext();
191 MDBuilder MDB(Ctx);
192 MDNode *NewDomain = MDB.createAnonymousAliasScopeDomain("MemCopyDomain");
193 StringRef Name = "MemCopyAliasScope";
194 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
195
196 unsigned SrcAS = cast<PointerType>(SrcAddr->getType())->getAddressSpace();
197 unsigned DstAS = cast<PointerType>(DstAddr->getType())->getAddressSpace();
198
199 Type *LoopOpType = TTI.getMemcpyLoopLoweringType(
200 Ctx, CopyLen, SrcAS, DstAS, SrcAlign.value(), DstAlign.value(),
201 AtomicElementSize);
202 assert((!AtomicElementSize || !LoopOpType->isVectorTy()) &&
203 "Atomic memcpy lowering is not supported for vector operand type");
204 unsigned LoopOpSize = DL.getTypeStoreSize(LoopOpType);
205 assert((!AtomicElementSize || LoopOpSize % *AtomicElementSize == 0) &&
206 "Atomic memcpy lowering is not supported for selected operand size");
207
208 IRBuilder<> PLBuilder(PreLoopBB->getTerminator());
209
210 // Calculate the loop trip count, and remaining bytes to copy after the loop.
211 Type *CopyLenType = CopyLen->getType();
212 IntegerType *ILengthType = dyn_cast<IntegerType>(CopyLenType);
213 assert(ILengthType &&
214 "expected size argument to memcpy to be an integer type!");
215 Type *Int8Type = Type::getInt8Ty(Ctx);
216 bool LoopOpIsInt8 = LoopOpType == Int8Type;
217 ConstantInt *CILoopOpSize = ConstantInt::get(ILengthType, LoopOpSize);
218 Value *RuntimeLoopCount = LoopOpIsInt8
219 ? CopyLen
220 : getRuntimeLoopCount(DL, PLBuilder, CopyLen,
221 CILoopOpSize, LoopOpSize);
222
223 BasicBlock *LoopBB =
224 BasicBlock::Create(Ctx, "loop-memcpy-expansion", ParentFunc, PostLoopBB);
225 IRBuilder<> LoopBuilder(LoopBB);
226
227 Align PartSrcAlign(commonAlignment(SrcAlign, LoopOpSize));
228 Align PartDstAlign(commonAlignment(DstAlign, LoopOpSize));
229
230 PHINode *LoopIndex = LoopBuilder.CreatePHI(CopyLenType, 2, "loop-index");
231 LoopIndex->addIncoming(ConstantInt::get(CopyLenType, 0U), PreLoopBB);
232
233 Value *SrcGEP = LoopBuilder.CreateInBoundsGEP(LoopOpType, SrcAddr, LoopIndex);
234 LoadInst *Load = LoopBuilder.CreateAlignedLoad(LoopOpType, SrcGEP,
235 PartSrcAlign, SrcIsVolatile);
236 if (!CanOverlap) {
237 // Set alias scope for loads.
238 Load->setMetadata(LLVMContext::MD_alias_scope, MDNode::get(Ctx, NewScope));
239 }
240 Value *DstGEP = LoopBuilder.CreateInBoundsGEP(LoopOpType, DstAddr, LoopIndex);
242 LoopBuilder.CreateAlignedStore(Load, DstGEP, PartDstAlign, DstIsVolatile);
243 if (!CanOverlap) {
244 // Indicate that stores don't overlap loads.
245 Store->setMetadata(LLVMContext::MD_noalias, MDNode::get(Ctx, NewScope));
246 }
247 if (AtomicElementSize) {
248 Load->setAtomic(AtomicOrdering::Unordered);
249 Store->setAtomic(AtomicOrdering::Unordered);
250 }
251 Value *NewIndex =
252 LoopBuilder.CreateAdd(LoopIndex, ConstantInt::get(CopyLenType, 1U));
253 LoopIndex->addIncoming(NewIndex, LoopBB);
254
255 bool requiresResidual =
256 !LoopOpIsInt8 && !(AtomicElementSize && LoopOpSize == AtomicElementSize);
257 if (requiresResidual) {
258 Type *ResLoopOpType = AtomicElementSize
259 ? Type::getIntNTy(Ctx, *AtomicElementSize * 8)
260 : Int8Type;
261 unsigned ResLoopOpSize = DL.getTypeStoreSize(ResLoopOpType);
262 assert((ResLoopOpSize == AtomicElementSize ? *AtomicElementSize : 1) &&
263 "Store size is expected to match type size");
264
265 Value *RuntimeResidual = getRuntimeLoopRemainder(DL, PLBuilder, CopyLen,
266 CILoopOpSize, LoopOpSize);
267 Value *RuntimeBytesCopied = PLBuilder.CreateSub(CopyLen, RuntimeResidual);
268
269 // Loop body for the residual copy.
270 BasicBlock *ResLoopBB = BasicBlock::Create(Ctx, "loop-memcpy-residual",
271 PreLoopBB->getParent(),
272 PostLoopBB);
273 // Residual loop header.
274 BasicBlock *ResHeaderBB = BasicBlock::Create(
275 Ctx, "loop-memcpy-residual-header", PreLoopBB->getParent(), nullptr);
276
277 // Need to update the pre-loop basic block to branch to the correct place.
278 // branch to the main loop if the count is non-zero, branch to the residual
279 // loop if the copy size is smaller then 1 iteration of the main loop but
280 // non-zero and finally branch to after the residual loop if the memcpy
281 // size is zero.
282 ConstantInt *Zero = ConstantInt::get(ILengthType, 0U);
283 PLBuilder.CreateCondBr(PLBuilder.CreateICmpNE(RuntimeLoopCount, Zero),
284 LoopBB, ResHeaderBB);
285 PreLoopBB->getTerminator()->eraseFromParent();
286
287 LoopBuilder.CreateCondBr(
288 LoopBuilder.CreateICmpULT(NewIndex, RuntimeLoopCount), LoopBB,
289 ResHeaderBB);
290
291 // Determine if we need to branch to the residual loop or bypass it.
292 IRBuilder<> RHBuilder(ResHeaderBB);
293 RHBuilder.CreateCondBr(RHBuilder.CreateICmpNE(RuntimeResidual, Zero),
294 ResLoopBB, PostLoopBB);
295
296 // Copy the residual with single byte load/store loop.
297 IRBuilder<> ResBuilder(ResLoopBB);
298 PHINode *ResidualIndex =
299 ResBuilder.CreatePHI(CopyLenType, 2, "residual-loop-index");
300 ResidualIndex->addIncoming(Zero, ResHeaderBB);
301
302 Value *FullOffset = ResBuilder.CreateAdd(RuntimeBytesCopied, ResidualIndex);
303 Value *SrcGEP =
304 ResBuilder.CreateInBoundsGEP(ResLoopOpType, SrcAddr, FullOffset);
305 LoadInst *Load = ResBuilder.CreateAlignedLoad(ResLoopOpType, SrcGEP,
306 PartSrcAlign, SrcIsVolatile);
307 if (!CanOverlap) {
308 // Set alias scope for loads.
309 Load->setMetadata(LLVMContext::MD_alias_scope,
310 MDNode::get(Ctx, NewScope));
311 }
312 Value *DstGEP =
313 ResBuilder.CreateInBoundsGEP(ResLoopOpType, DstAddr, FullOffset);
314 StoreInst *Store = ResBuilder.CreateAlignedStore(Load, DstGEP, PartDstAlign,
315 DstIsVolatile);
316 if (!CanOverlap) {
317 // Indicate that stores don't overlap loads.
318 Store->setMetadata(LLVMContext::MD_noalias, MDNode::get(Ctx, NewScope));
319 }
320 if (AtomicElementSize) {
321 Load->setAtomic(AtomicOrdering::Unordered);
322 Store->setAtomic(AtomicOrdering::Unordered);
323 }
324 Value *ResNewIndex = ResBuilder.CreateAdd(
325 ResidualIndex, ConstantInt::get(CopyLenType, ResLoopOpSize));
326 ResidualIndex->addIncoming(ResNewIndex, ResLoopBB);
327
328 // Create the loop branch condition.
329 ResBuilder.CreateCondBr(
330 ResBuilder.CreateICmpULT(ResNewIndex, RuntimeResidual), ResLoopBB,
331 PostLoopBB);
332 } else {
333 // In this case the loop operand type was a byte, and there is no need for a
334 // residual loop to copy the remaining memory after the main loop.
335 // We do however need to patch up the control flow by creating the
336 // terminators for the preloop block and the memcpy loop.
337 ConstantInt *Zero = ConstantInt::get(ILengthType, 0U);
338 PLBuilder.CreateCondBr(PLBuilder.CreateICmpNE(RuntimeLoopCount, Zero),
339 LoopBB, PostLoopBB);
340 PreLoopBB->getTerminator()->eraseFromParent();
341 LoopBuilder.CreateCondBr(
342 LoopBuilder.CreateICmpULT(NewIndex, RuntimeLoopCount), LoopBB,
343 PostLoopBB);
344 }
345}
346
347// Lower memmove to IR. memmove is required to correctly copy overlapping memory
348// regions; therefore, it has to check the relative positions of the source and
349// destination pointers and choose the copy direction accordingly.
350//
351// The code below is an IR rendition of this C function:
352//
353// void* memmove(void* dst, const void* src, size_t n) {
354// unsigned char* d = dst;
355// const unsigned char* s = src;
356// if (s < d) {
357// // copy backwards
358// while (n--) {
359// d[n] = s[n];
360// }
361// } else {
362// // copy forward
363// for (size_t i = 0; i < n; ++i) {
364// d[i] = s[i];
365// }
366// }
367// return dst;
368// }
369static void createMemMoveLoop(Instruction *InsertBefore, Value *SrcAddr,
370 Value *DstAddr, Value *CopyLen, Align SrcAlign,
371 Align DstAlign, bool SrcIsVolatile,
372 bool DstIsVolatile,
373 const TargetTransformInfo &TTI) {
374 Type *TypeOfCopyLen = CopyLen->getType();
375 BasicBlock *OrigBB = InsertBefore->getParent();
376 Function *F = OrigBB->getParent();
377 const DataLayout &DL = F->getParent()->getDataLayout();
378 // TODO: Use different element type if possible?
379 Type *EltTy = Type::getInt8Ty(F->getContext());
380
381 // Create the a comparison of src and dst, based on which we jump to either
382 // the forward-copy part of the function (if src >= dst) or the backwards-copy
383 // part (if src < dst).
384 // SplitBlockAndInsertIfThenElse conveniently creates the basic if-then-else
385 // structure. Its block terminators (unconditional branches) are replaced by
386 // the appropriate conditional branches when the loop is built.
387 ICmpInst *PtrCompare = new ICmpInst(InsertBefore->getIterator(), ICmpInst::ICMP_ULT,
388 SrcAddr, DstAddr, "compare_src_dst");
389 Instruction *ThenTerm, *ElseTerm;
390 SplitBlockAndInsertIfThenElse(PtrCompare, InsertBefore->getIterator(), &ThenTerm,
391 &ElseTerm);
392
393 // Each part of the function consists of two blocks:
394 // copy_backwards: used to skip the loop when n == 0
395 // copy_backwards_loop: the actual backwards loop BB
396 // copy_forward: used to skip the loop when n == 0
397 // copy_forward_loop: the actual forward loop BB
398 BasicBlock *CopyBackwardsBB = ThenTerm->getParent();
399 CopyBackwardsBB->setName("copy_backwards");
400 BasicBlock *CopyForwardBB = ElseTerm->getParent();
401 CopyForwardBB->setName("copy_forward");
402 BasicBlock *ExitBB = InsertBefore->getParent();
403 ExitBB->setName("memmove_done");
404
405 unsigned PartSize = DL.getTypeStoreSize(EltTy);
406 Align PartSrcAlign(commonAlignment(SrcAlign, PartSize));
407 Align PartDstAlign(commonAlignment(DstAlign, PartSize));
408
409 // Initial comparison of n == 0 that lets us skip the loops altogether. Shared
410 // between both backwards and forward copy clauses.
411 ICmpInst *CompareN =
412 new ICmpInst(OrigBB->getTerminator()->getIterator(), ICmpInst::ICMP_EQ, CopyLen,
413 ConstantInt::get(TypeOfCopyLen, 0), "compare_n_to_0");
414
415 // Copying backwards.
416 BasicBlock *LoopBB =
417 BasicBlock::Create(F->getContext(), "copy_backwards_loop", F, CopyForwardBB);
418 IRBuilder<> LoopBuilder(LoopBB);
419
420 PHINode *LoopPhi = LoopBuilder.CreatePHI(TypeOfCopyLen, 0);
421 Value *IndexPtr = LoopBuilder.CreateSub(
422 LoopPhi, ConstantInt::get(TypeOfCopyLen, 1), "index_ptr");
423 Value *Element = LoopBuilder.CreateAlignedLoad(
424 EltTy, LoopBuilder.CreateInBoundsGEP(EltTy, SrcAddr, IndexPtr),
425 PartSrcAlign, "element");
426 LoopBuilder.CreateAlignedStore(
427 Element, LoopBuilder.CreateInBoundsGEP(EltTy, DstAddr, IndexPtr),
428 PartDstAlign);
429 LoopBuilder.CreateCondBr(
430 LoopBuilder.CreateICmpEQ(IndexPtr, ConstantInt::get(TypeOfCopyLen, 0)),
431 ExitBB, LoopBB);
432 LoopPhi->addIncoming(IndexPtr, LoopBB);
433 LoopPhi->addIncoming(CopyLen, CopyBackwardsBB);
434 BranchInst::Create(ExitBB, LoopBB, CompareN, ThenTerm->getIterator());
435 ThenTerm->eraseFromParent();
436
437 // Copying forward.
438 BasicBlock *FwdLoopBB =
439 BasicBlock::Create(F->getContext(), "copy_forward_loop", F, ExitBB);
440 IRBuilder<> FwdLoopBuilder(FwdLoopBB);
441 PHINode *FwdCopyPhi = FwdLoopBuilder.CreatePHI(TypeOfCopyLen, 0, "index_ptr");
442 Value *SrcGEP = FwdLoopBuilder.CreateInBoundsGEP(EltTy, SrcAddr, FwdCopyPhi);
443 Value *FwdElement =
444 FwdLoopBuilder.CreateAlignedLoad(EltTy, SrcGEP, PartSrcAlign, "element");
445 Value *DstGEP = FwdLoopBuilder.CreateInBoundsGEP(EltTy, DstAddr, FwdCopyPhi);
446 FwdLoopBuilder.CreateAlignedStore(FwdElement, DstGEP, PartDstAlign);
447 Value *FwdIndexPtr = FwdLoopBuilder.CreateAdd(
448 FwdCopyPhi, ConstantInt::get(TypeOfCopyLen, 1), "index_increment");
449 FwdLoopBuilder.CreateCondBr(FwdLoopBuilder.CreateICmpEQ(FwdIndexPtr, CopyLen),
450 ExitBB, FwdLoopBB);
451 FwdCopyPhi->addIncoming(FwdIndexPtr, FwdLoopBB);
452 FwdCopyPhi->addIncoming(ConstantInt::get(TypeOfCopyLen, 0), CopyForwardBB);
453
454 BranchInst::Create(ExitBB, FwdLoopBB, CompareN, ElseTerm->getIterator());
455 ElseTerm->eraseFromParent();
456}
457
458static void createMemSetLoop(Instruction *InsertBefore, Value *DstAddr,
459 Value *CopyLen, Value *SetValue, Align DstAlign,
460 bool IsVolatile) {
461 Type *TypeOfCopyLen = CopyLen->getType();
462 BasicBlock *OrigBB = InsertBefore->getParent();
463 Function *F = OrigBB->getParent();
464 const DataLayout &DL = F->getParent()->getDataLayout();
465 BasicBlock *NewBB =
466 OrigBB->splitBasicBlock(InsertBefore, "split");
467 BasicBlock *LoopBB
468 = BasicBlock::Create(F->getContext(), "loadstoreloop", F, NewBB);
469
470 IRBuilder<> Builder(OrigBB->getTerminator());
471
472 Builder.CreateCondBr(
473 Builder.CreateICmpEQ(ConstantInt::get(TypeOfCopyLen, 0), CopyLen), NewBB,
474 LoopBB);
475 OrigBB->getTerminator()->eraseFromParent();
476
477 unsigned PartSize = DL.getTypeStoreSize(SetValue->getType());
478 Align PartAlign(commonAlignment(DstAlign, PartSize));
479
480 IRBuilder<> LoopBuilder(LoopBB);
481 PHINode *LoopIndex = LoopBuilder.CreatePHI(TypeOfCopyLen, 0);
482 LoopIndex->addIncoming(ConstantInt::get(TypeOfCopyLen, 0), OrigBB);
483
484 LoopBuilder.CreateAlignedStore(
485 SetValue,
486 LoopBuilder.CreateInBoundsGEP(SetValue->getType(), DstAddr, LoopIndex),
487 PartAlign, IsVolatile);
488
489 Value *NewIndex =
490 LoopBuilder.CreateAdd(LoopIndex, ConstantInt::get(TypeOfCopyLen, 1));
491 LoopIndex->addIncoming(NewIndex, LoopBB);
492
493 LoopBuilder.CreateCondBr(LoopBuilder.CreateICmpULT(NewIndex, CopyLen), LoopBB,
494 NewBB);
495}
496
497template <typename T>
499 if (SE) {
500 auto *SrcSCEV = SE->getSCEV(Memcpy->getRawSource());
501 auto *DestSCEV = SE->getSCEV(Memcpy->getRawDest());
502 if (SE->isKnownPredicateAt(CmpInst::ICMP_NE, SrcSCEV, DestSCEV, Memcpy))
503 return false;
504 }
505 return true;
506}
507
510 ScalarEvolution *SE) {
511 bool CanOverlap = canOverlap(Memcpy, SE);
512 if (ConstantInt *CI = dyn_cast<ConstantInt>(Memcpy->getLength())) {
514 /* InsertBefore */ Memcpy,
515 /* SrcAddr */ Memcpy->getRawSource(),
516 /* DstAddr */ Memcpy->getRawDest(),
517 /* CopyLen */ CI,
518 /* SrcAlign */ Memcpy->getSourceAlign().valueOrOne(),
519 /* DestAlign */ Memcpy->getDestAlign().valueOrOne(),
520 /* SrcIsVolatile */ Memcpy->isVolatile(),
521 /* DstIsVolatile */ Memcpy->isVolatile(),
522 /* CanOverlap */ CanOverlap,
523 /* TargetTransformInfo */ TTI);
524 } else {
526 /* InsertBefore */ Memcpy,
527 /* SrcAddr */ Memcpy->getRawSource(),
528 /* DstAddr */ Memcpy->getRawDest(),
529 /* CopyLen */ Memcpy->getLength(),
530 /* SrcAlign */ Memcpy->getSourceAlign().valueOrOne(),
531 /* DestAlign */ Memcpy->getDestAlign().valueOrOne(),
532 /* SrcIsVolatile */ Memcpy->isVolatile(),
533 /* DstIsVolatile */ Memcpy->isVolatile(),
534 /* CanOverlap */ CanOverlap,
535 /* TargetTransformInfo */ TTI);
536 }
537}
538
540 const TargetTransformInfo &TTI) {
541 Value *CopyLen = Memmove->getLength();
542 Value *SrcAddr = Memmove->getRawSource();
543 Value *DstAddr = Memmove->getRawDest();
544 Align SrcAlign = Memmove->getSourceAlign().valueOrOne();
545 Align DstAlign = Memmove->getDestAlign().valueOrOne();
546 bool SrcIsVolatile = Memmove->isVolatile();
547 bool DstIsVolatile = SrcIsVolatile;
548 IRBuilder<> CastBuilder(Memmove);
549
550 unsigned SrcAS = SrcAddr->getType()->getPointerAddressSpace();
551 unsigned DstAS = DstAddr->getType()->getPointerAddressSpace();
552 if (SrcAS != DstAS) {
553 if (!TTI.addrspacesMayAlias(SrcAS, DstAS)) {
554 // We may not be able to emit a pointer comparison, but we don't have
555 // to. Expand as memcpy.
556 if (ConstantInt *CI = dyn_cast<ConstantInt>(CopyLen)) {
557 createMemCpyLoopKnownSize(/*InsertBefore=*/Memmove, SrcAddr, DstAddr,
558 CI, SrcAlign, DstAlign, SrcIsVolatile,
559 DstIsVolatile,
560 /*CanOverlap=*/false, TTI);
561 } else {
562 createMemCpyLoopUnknownSize(/*InsertBefore=*/Memmove, SrcAddr, DstAddr,
563 CopyLen, SrcAlign, DstAlign, SrcIsVolatile,
564 DstIsVolatile,
565 /*CanOverlap=*/false, TTI);
566 }
567
568 return true;
569 }
570
571 if (TTI.isValidAddrSpaceCast(DstAS, SrcAS))
572 DstAddr = CastBuilder.CreateAddrSpaceCast(DstAddr, SrcAddr->getType());
573 else if (TTI.isValidAddrSpaceCast(SrcAS, DstAS))
574 SrcAddr = CastBuilder.CreateAddrSpaceCast(SrcAddr, DstAddr->getType());
575 else {
576 // We don't know generically if it's legal to introduce an
577 // addrspacecast. We need to know either if it's legal to insert an
578 // addrspacecast, or if the address spaces cannot alias.
580 dbgs() << "Do not know how to expand memmove between different "
581 "address spaces\n");
582 return false;
583 }
584 }
585
587 /*InsertBefore=*/Memmove, SrcAddr, DstAddr, CopyLen, SrcAlign, DstAlign,
588 SrcIsVolatile, DstIsVolatile, TTI);
589 return true;
590}
591
593 createMemSetLoop(/* InsertBefore */ Memset,
594 /* DstAddr */ Memset->getRawDest(),
595 /* CopyLen */ Memset->getLength(),
596 /* SetValue */ Memset->getValue(),
597 /* Alignment */ Memset->getDestAlign().valueOrOne(),
598 Memset->isVolatile());
599}
600
603 ScalarEvolution *SE) {
604 if (ConstantInt *CI = dyn_cast<ConstantInt>(AtomicMemcpy->getLength())) {
606 /* InsertBefore */ AtomicMemcpy,
607 /* SrcAddr */ AtomicMemcpy->getRawSource(),
608 /* DstAddr */ AtomicMemcpy->getRawDest(),
609 /* CopyLen */ CI,
610 /* SrcAlign */ AtomicMemcpy->getSourceAlign().valueOrOne(),
611 /* DestAlign */ AtomicMemcpy->getDestAlign().valueOrOne(),
612 /* SrcIsVolatile */ AtomicMemcpy->isVolatile(),
613 /* DstIsVolatile */ AtomicMemcpy->isVolatile(),
614 /* CanOverlap */ false, // SrcAddr & DstAddr may not overlap by spec.
615 /* TargetTransformInfo */ TTI,
616 /* AtomicCpySize */ AtomicMemcpy->getElementSizeInBytes());
617 } else {
619 /* InsertBefore */ AtomicMemcpy,
620 /* SrcAddr */ AtomicMemcpy->getRawSource(),
621 /* DstAddr */ AtomicMemcpy->getRawDest(),
622 /* CopyLen */ AtomicMemcpy->getLength(),
623 /* SrcAlign */ AtomicMemcpy->getSourceAlign().valueOrOne(),
624 /* DestAlign */ AtomicMemcpy->getDestAlign().valueOrOne(),
625 /* SrcIsVolatile */ AtomicMemcpy->isVolatile(),
626 /* DstIsVolatile */ AtomicMemcpy->isVolatile(),
627 /* CanOverlap */ false, // SrcAddr & DstAddr may not overlap by spec.
628 /* TargetTransformInfo */ TTI,
629 /* AtomicCpySize */ AtomicMemcpy->getElementSizeInBytes());
630 }
631}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DEBUG(X)
Definition: Debug.h:101
std::string Name
static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF)
Definition: Execution.cpp:41
static bool canOverlap(MemTransferBase< T > *Memcpy, ScalarEvolution *SE)
static Value * getRuntimeLoopRemainder(const DataLayout &DL, IRBuilderBase &B, Value *Len, Value *OpSize, unsigned OpSizeVal)
static void createMemSetLoop(Instruction *InsertBefore, Value *DstAddr, Value *CopyLen, Value *SetValue, Align DstAlign, bool IsVolatile)
static Value * getRuntimeLoopCount(const DataLayout &DL, IRBuilderBase &B, Value *Len, Value *OpSize, unsigned OpSizeVal)
static void createMemMoveLoop(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, Value *CopyLen, Align SrcAlign, Align DstAlign, bool SrcIsVolatile, bool DstIsVolatile, const TargetTransformInfo &TTI)
#define F(x, y, z)
Definition: MD5.cpp:55
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This pass exposes codegen information to IR-level passes.
This class represents the atomic memcpy intrinsic i.e.
uint32_t getElementSizeInBytes() const
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:360
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:199
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:570
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:206
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:168
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:221
static BranchInst * Create(BasicBlock *IfTrue, BasicBlock::iterator InsertBefore)
@ ICMP_NE
not equal
Definition: InstrTypes.h:1015
This is the shared class of boolean and integer constants.
Definition: Constants.h:80
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:205
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:154
This is an important base class in LLVM.
Definition: Constant.h:41
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
This instruction compares its operands according to the predicate given to the constructor.
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:94
Value * CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2257
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1807
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Definition: IRBuilder.h:1876
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2397
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2241
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1344
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Definition: IRBuilder.h:1120
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1327
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition: IRBuilder.h:1826
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2132
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2666
const BasicBlock * getParent() const
Definition: Instruction.h:152
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
bool isVolatile() const LLVM_READONLY
Return true if this instruction has a volatile memory access.
void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
Class to represent integer types.
Definition: DerivedTypes.h:40
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:184
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
Definition: MDBuilder.h:167
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
Definition: MDBuilder.h:160
Metadata node.
Definition: Metadata.h:1067
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1541
This class wraps the llvm.memcpy intrinsic.
Value * getLength() const
Value * getRawDest() const
MaybeAlign getDestAlign() const
bool isVolatile() const
This class wraps the llvm.memmove intrinsic.
Value * getValue() const
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
Common base class for all memory transfer intrinsics.
Value * getRawSource() const
Return the arguments to the instruction.
MaybeAlign getSourceAlign() const
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:293
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
The main scalar evolution driver.
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
bool isKnownPredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const
Return false if a AS0 address cannot possibly alias a AS1 address.
void getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type * > &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign, std::optional< uint32_t > AtomicCpySize=std::nullopt) const
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Query the target whether the specified address space cast from FromAS to ToAS is valid.
Type * getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign, std::optional< uint32_t > AtomicElementSize=std::nullopt) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static IntegerType * getInt8Ty(LLVMContext &C)
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
self_iterator getIterator()
Definition: ilist_node.h:109
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void createMemCpyLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, ConstantInt *CopyLen, Align SrcAlign, Align DestAlign, bool SrcIsVolatile, bool DstIsVolatile, bool CanOverlap, const TargetTransformInfo &TTI, std::optional< uint32_t > AtomicCpySize=std::nullopt)
Emit a loop implementing the semantics of an llvm.memcpy whose size is a compile time constant.
bool expandMemMoveAsLoop(MemMoveInst *MemMove, const TargetTransformInfo &TTI)
Expand MemMove as a loop.
void createMemCpyLoopUnknownSize(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, Value *CopyLen, Align SrcAlign, Align DestAlign, bool SrcIsVolatile, bool DstIsVolatile, bool CanOverlap, const TargetTransformInfo &TTI, std::optional< unsigned > AtomicSize=std::nullopt)
Emit a loop implementing the semantics of llvm.memcpy where the size is not a compile-time constant.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:324
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:275
void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:212
void expandMemCpyAsLoop(MemCpyInst *MemCpy, const TargetTransformInfo &TTI, ScalarEvolution *SE=nullptr)
Expand MemCpy as a loop. MemCpy is not deleted.
void expandAtomicMemCpyAsLoop(AtomicMemCpyInst *AtomicMemCpy, const TargetTransformInfo &TTI, ScalarEvolution *SE)
Expand AtomicMemCpy as a loop. AtomicMemCpy is not deleted.
void expandMemSetAsLoop(MemSetInst *MemSet)
Expand MemSet as a loop. MemSet is not deleted.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141