Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[MemCpyOpt] Forward memcpy based on the actual copy memory location. #87190

Merged
merged 12 commits into from
Jul 12, 2024
99 changes: 74 additions & 25 deletions llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include "llvm/Transforms/Scalar/MemCpyOptimizer.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/iterator_range.h"
Expand Down Expand Up @@ -1124,28 +1125,79 @@ bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
MemCpyInst *MDep,
BatchAAResults &BAA) {
// We can only transforms memcpy's where the dest of one is the source of the
// other.
if (M->getSource() != MDep->getDest() || MDep->isVolatile())
return false;

// If dep instruction is reading from our current input, then it is a noop
// transfer and substituting the input won't change this instruction. Just
// ignore the input and let someone else zap MDep. This handles cases like:
// transfer and substituting the input won't change this instruction. Just
// ignore the input and let someone else zap MDep. This handles cases like:
// memcpy(a <- a)
// memcpy(b <- a)
if (M->getSource() == MDep->getSource())
return false;

// Second, the length of the memcpy's must be the same, or the preceding one
// We can only optimize non-volatile memcpy's.
if (MDep->isVolatile())
return false;

int64_t MForwardOffset = 0;
const DataLayout &DL = M->getModule()->getDataLayout();
// We can only transforms memcpy's where the dest of one is the source of the
// other, or they have an offset in a range.
if (M->getSource() != MDep->getDest()) {
std::optional<int64_t> Offset =
M->getSource()->getPointerOffsetFrom(MDep->getDest(), DL);
if (!Offset || *Offset < 0)
return false;
MForwardOffset = *Offset;
}

// The length of the memcpy's must be the same, or the preceding one
// must be larger than the following one.
if (MDep->getLength() != M->getLength()) {
if (MForwardOffset != 0 || MDep->getLength() != M->getLength()) {
auto *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
auto *MLen = dyn_cast<ConstantInt>(M->getLength());
if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
if (!MDepLen || !MLen ||
MDepLen->getZExtValue() < MLen->getZExtValue() + MForwardOffset)
return false;
}

IRBuilder<> Builder(M);
auto *CopySource = MDep->getSource();
Instruction *NewCopySource = nullptr;
auto CleanupOnRet = llvm::make_scope_exit([&NewCopySource] {
if (NewCopySource && NewCopySource->use_empty())
// Safety: It's safe here because we will only allocate more instructions
// after finishing all BatchAA queries, but we have to be careful if we
// want to do something like this in another place. Then we'd probably
// have to delay instruction removal until all transforms on an
// instruction finished.
NewCopySource->eraseFromParent();
});
MaybeAlign CopySourceAlign = MDep->getSourceAlign();
// We just need to calculate the actual size of the copy.
auto MCopyLoc = MemoryLocation::getForSource(MDep).getWithNewSize(
MemoryLocation::getForSource(M).Size);

// When the forwarding offset is greater than 0, we transform
// memcpy(d1 <- s1)
// memcpy(d2 <- d1+o)
// to
// memcpy(d2 <- s1+o)
if (MForwardOffset > 0) {
// The copy destination of `M` maybe can serve as the source of copying.
std::optional<int64_t> MDestOffset =
M->getRawDest()->getPointerOffsetFrom(MDep->getRawSource(), DL);
if (MDestOffset == MForwardOffset)
CopySource = M->getDest();
else {
NewCopySource = cast<Instruction>(Builder.CreateInBoundsPtrAdd(
CopySource, Builder.getInt64(MForwardOffset)));
CopySource = NewCopySource;
}
// We need to update `MCopyLoc` if an offset exists.
MCopyLoc = MCopyLoc.getWithNewPtr(CopySource);
if (CopySourceAlign)
CopySourceAlign = commonAlignment(*CopySourceAlign, MForwardOffset);
}

// Verify that the copied-from memory doesn't change in between the two
// transfers. For example, in:
// memcpy(a <- b)
Expand All @@ -1155,14 +1207,12 @@ bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
//
// TODO: If the code between M and MDep is transparent to the destination "c",
// then we could still perform the xform by moving M up to the first memcpy.
// TODO: It would be sufficient to check the MDep source up to the memcpy
// size of M, rather than MDep.
if (writtenBetween(MSSA, BAA, MemoryLocation::getForSource(MDep),
MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(M)))
if (writtenBetween(MSSA, BAA, MCopyLoc, MSSA->getMemoryAccess(MDep),
MSSA->getMemoryAccess(M)))
return false;

// No need to create `memcpy(a <- a)`.
if (BAA.isMustAlias(M->getDest(), MDep->getSource())) {
if (BAA.isMustAlias(M->getDest(), CopySource)) {
// Remove the instruction we're replacing.
eraseInstruction(M);
++NumMemCpyInstr;
Expand Down Expand Up @@ -1191,23 +1241,22 @@ bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,

// TODO: Is this worth it if we're creating a less aligned memcpy? For
// example we could be moving from movaps -> movq on x86.
IRBuilder<> Builder(M);
Instruction *NewM;
if (UseMemMove)
NewM = Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(),
MDep->getRawSource(), MDep->getSourceAlign(),
M->getLength(), M->isVolatile());
NewM =
Builder.CreateMemMove(M->getDest(), M->getDestAlign(), CopySource,
CopySourceAlign, M->getLength(), M->isVolatile());
else if (isa<MemCpyInlineInst>(M)) {
// llvm.memcpy may be promoted to llvm.memcpy.inline, but the converse is
// never allowed since that would allow the latter to be lowered as a call
// to an external function.
NewM = Builder.CreateMemCpyInline(
M->getRawDest(), M->getDestAlign(), MDep->getRawSource(),
MDep->getSourceAlign(), M->getLength(), M->isVolatile());
NewM = Builder.CreateMemCpyInline(M->getDest(), M->getDestAlign(),
CopySource, CopySourceAlign,
M->getLength(), M->isVolatile());
} else
NewM = Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(),
MDep->getRawSource(), MDep->getSourceAlign(),
M->getLength(), M->isVolatile());
NewM =
Builder.CreateMemCpy(M->getDest(), M->getDestAlign(), CopySource,
CopySourceAlign, M->getLength(), M->isVolatile());
NewM->copyMetadata(*M, LLVMContext::MD_DIAssignID);

assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)));
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/Transforms/MemCpyOpt/lifetime.ll
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ define void @call_slot_lifetime_bitcast(ptr %ptr) {
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 4 [[PTR:%.*]], i64 4, i1 false)
; CHECK-NEXT: [[TMP1_CAST:%.*]] = bitcast ptr [[TMP1]] to ptr
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[TMP1_CAST]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP1_CAST]], ptr align 4 [[PTR]], i64 4, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP1]], ptr align 4 [[PTR]], i64 4, i1 false)
; CHECK-NEXT: ret void
;
%tmp1 = alloca i32
Expand Down
203 changes: 203 additions & 0 deletions llvm/test/Transforms/MemCpyOpt/memcpy-memcpy-offset.ll
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please make the bulk of the coverage not copy back into the same allocation, but into a separate (noalias) destination. The same-allocation case is a confusing edge-case where we should only verify that we switch to memmove in that case.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Updated.

Original file line number Diff line number Diff line change
@@ -0,0 +1,203 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=memcpyopt -S -verify-memoryssa | FileCheck %s

%buf = type [9 x i8]

; We can forward `memcpy` because the copy location are the same,
define void @forward_offset(ptr %src, ptr %dest) {
; CHECK-LABEL: define void @forward_offset(
; CHECK-SAME: ptr [[SRC:%.*]], ptr [[DEST:%.*]]) {
; CHECK-NEXT: [[DEP_DEST:%.*]] = alloca [9 x i8], align 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 1 [[SRC]], i64 7, i1 false)
; CHECK-NEXT: [[SRC_OFFSET:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 1
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 1
; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[TMP1]], i64 6, i1 false)
; CHECK-NEXT: ret void
;
%cpy_tmp = alloca %buf, align 1
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %cpy_tmp, ptr align 1 %src, i64 7, i1 false)
%cpy_tmp_offset = getelementptr inbounds i8, ptr %cpy_tmp, i64 1
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dest, ptr align 1 %cpy_tmp_offset, i64 6, i1 false)
ret void
}

; We need to update the align value of the source of `memcpy` when forwarding.
define void @forward_offset_align(ptr %src, ptr %dest) {
; CHECK-LABEL: define void @forward_offset_align(
; CHECK-SAME: ptr [[SRC:%.*]], ptr [[DEST:%.*]]) {
; CHECK-NEXT: [[DEP_DEST:%.*]] = alloca [9 x i8], align 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 4 [[SRC]], i64 9, i1 false)
; CHECK-NEXT: [[TMP_OFFSET:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 3
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 3
; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[TMP1]], i64 5, i1 false)
; CHECK-NEXT: ret void
;
%cpy_tmp = alloca %buf, align 1
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %cpy_tmp, ptr align 4 %src, i64 9, i1 false)
%cpy_tmp_offset = getelementptr inbounds i8, ptr %cpy_tmp, i64 3
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dest, ptr align 1 %cpy_tmp_offset, i64 5, i1 false)
ret void
}

; We can change the align value to 2 when forwarding.
define void @forward_offset_align_2(ptr %src, ptr %dest) {
; CHECK-LABEL: define void @forward_offset_align_2(
; CHECK-SAME: ptr [[SRC:%.*]], ptr [[DEST:%.*]]) {
; CHECK-NEXT: [[DEP_DEST:%.*]] = alloca [9 x i8], align 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 4 [[SRC]], i64 9, i1 false)
; CHECK-NEXT: [[TMP_OFFSET:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 2
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 2
; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 1 [[DEST]], ptr align 2 [[TMP1]], i64 6, i1 false)
; CHECK-NEXT: ret void
;
%cpy_tmp = alloca %buf, align 1
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %cpy_tmp, ptr align 4 %src, i64 9, i1 false)
%cpy_tmp_offset = getelementptr inbounds i8, ptr %cpy_tmp, i64 2
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dest, ptr align 1 %cpy_tmp_offset, i64 6, i1 false)
ret void
}

; If the copy destination can be used as the copy source, we don't need to create a GEP instruction.
define void @forward_offset_without_gep(ptr %src) {
; CHECK-LABEL: define void @forward_offset_without_gep(
; CHECK-SAME: ptr [[SRC:%.*]]) {
; CHECK-NEXT: [[TMP:%.*]] = alloca [9 x i8], align 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[TMP]], ptr align 1 [[SRC]], i64 7, i1 false)
; CHECK-NEXT: [[TMP_OFFSET:%.*]] = getelementptr inbounds i8, ptr [[TMP]], i64 1
; CHECK-NEXT: [[DEST:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 1
; CHECK-NEXT: ret void
;
%cpy_tmp = alloca %buf, align 1
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %cpy_tmp, ptr align 1 %src, i64 7, i1 false)
%cpy_tmp_offset = getelementptr inbounds i8, ptr %cpy_tmp, i64 1
%dest = getelementptr inbounds i8, ptr %src, i64 1
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dest, ptr align 1 %cpy_tmp_offset, i64 6, i1 false)
ret void
}

; We need to create a GEP instruction when forwarding.
define void @forward_offset_with_gep(ptr %src) {
; CHECK-LABEL: define void @forward_offset_with_gep(
; CHECK-SAME: ptr [[SRC:%.*]]) {
; CHECK-NEXT: [[DEP_DEST:%.*]] = alloca [9 x i8], align 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 1 [[SRC]], i64 7, i1 false)
; CHECK-NEXT: [[TMP_OFFSET:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 1
; CHECK-NEXT: [[DEST:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 2
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 1
; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[TMP1]], i64 6, i1 false)
; CHECK-NEXT: ret void
;
%cpy_tmp = alloca %buf, align 1
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %cpy_tmp, ptr align 1 %src, i64 7, i1 false)
%cpy_tmp_offset = getelementptr inbounds i8, ptr %cpy_tmp, i64 1
%dest = getelementptr inbounds i8, ptr %src, i64 2
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dest, ptr align 1 %cpy_tmp_offset, i64 6, i1 false)
ret void
}

; Make sure we pass the right parameters when calling `memcpy`.
define void @forward_offset_memcpy(ptr %src, ptr %dest) {
; CHECK-LABEL: define void @forward_offset_memcpy(
; CHECK-SAME: ptr [[SRC:%.*]], ptr [[DEST:%.*]]) {
; CHECK-NEXT: [[DEP_DEST:%.*]] = alloca [9 x i8], align 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 1 [[SRC]], i64 7, i1 false)
; CHECK-NEXT: [[TMP_OFFSET:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 1
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 1
; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[TMP1]], i64 6, i1 false)
; CHECK-NEXT: call void @use(ptr [[DEST]])
; CHECK-NEXT: ret void
;
%cpy_tmp = alloca %buf, align 1
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %cpy_tmp, ptr align 1 %src, i64 7, i1 false)
%cpy_tmp_offset = getelementptr inbounds i8, ptr %cpy_tmp, i64 1
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dest, ptr align 1 %cpy_tmp_offset, i64 6, i1 false)
call void @use(ptr %dest)
ret void
}

; Make sure we pass the right parameters when calling `memcpy.inline`.
define void @forward_offset_memcpy_inline(ptr %src, ptr %dest) {
; CHECK-LABEL: define void @forward_offset_memcpy_inline(
; CHECK-SAME: ptr [[SRC:%.*]], ptr [[DEST:%.*]]) {
; CHECK-NEXT: [[DEP_DEST:%.*]] = alloca [9 x i8], align 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 1 [[SRC]], i64 7, i1 false)
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 1
; CHECK-NEXT: call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[TMP1]], i64 6, i1 false)
; CHECK-NEXT: call void @use(ptr [[DEST]])
; CHECK-NEXT: ret void
;
%cpy_tmp = alloca %buf, align 1
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %cpy_tmp, ptr align 1 %src, i64 7, i1 false)
%cpy_tmp_offset = getelementptr inbounds i8, ptr %cpy_tmp, i64 1
call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 %dest, ptr align 1 %cpy_tmp_offset, i64 6, i1 false)
call void @use(ptr %dest)
ret void
}

; We cannot forward `memcpy` because it exceeds the size of `memcpy` it depends on.
define void @do_not_forward_oversize_offset(ptr %src, ptr %dest) {
; CHECK-LABEL: define void @do_not_forward_oversize_offset(
; CHECK-SAME: ptr [[SRC:%.*]], ptr [[DEST:%.*]]) {
; CHECK-NEXT: [[DEP_DEST:%.*]] = alloca [9 x i8], align 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 1 [[SRC]], i64 6, i1 false)
; CHECK-NEXT: [[TMP_OFFSET:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[TMP_OFFSET]], i64 6, i1 false)
; CHECK-NEXT: ret void
;
%cpy_tmp = alloca %buf, align 1
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %cpy_tmp, ptr align 1 %src, i64 6, i1 false)
%cpy_tmp_offset = getelementptr inbounds i8, ptr %cpy_tmp, i64 1
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dest, ptr align 1 %cpy_tmp_offset, i64 6, i1 false)
ret void
}

; We can forward `memcpy` because the write operation does not corrupt the location to be copied.
define void @forward_offset_and_store(ptr %src, ptr %dest) {
; CHECK-LABEL: define void @forward_offset_and_store(
; CHECK-SAME: ptr [[SRC:%.*]], ptr [[DEST:%.*]]) {
; CHECK-NEXT: [[DEP_DEST:%.*]] = alloca [9 x i8], align 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 1 [[SRC]], i64 7, i1 false)
; CHECK-NEXT: store i8 1, ptr [[SRC]], align 1
; CHECK-NEXT: [[DEP_SRC_END:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 6
; CHECK-NEXT: store i8 1, ptr [[DEP_SRC_END]], align 1
; CHECK-NEXT: [[TMP_OFFSET:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 1
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 1
; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[TMP1]], i64 5, i1 false)
; CHECK-NEXT: ret void
;
%cpy_tmp = alloca %buf, align 1
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %cpy_tmp, ptr align 1 %src, i64 7, i1 false)
store i8 1, ptr %src, align 1
%src_end = getelementptr inbounds i8, ptr %src, i64 6
store i8 1, ptr %src_end, align 1
%cpy_tmp_offset = getelementptr inbounds i8, ptr %cpy_tmp, i64 1
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dest, ptr align 1 %cpy_tmp_offset, i64 5, i1 false)
ret void
}

; We cannot forward `memcpy` because the write operation alters the location to be copied.
; Also, make sure we have removed the GEP instruction that was created temporarily.
define void @do_not_forward_offset_and_store(ptr %src, ptr %dest) {
; CHECK-LABEL: define void @do_not_forward_offset_and_store(
; CHECK-SAME: ptr [[SRC:%.*]], ptr [[DEST:%.*]]) {
; CHECK-NEXT: [[DEP_DEST:%.*]] = alloca [9 x i8], align 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEP_DEST]], ptr align 1 [[SRC]], i64 7, i1 false)
; CHECK-NEXT: [[DEP:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 1
; CHECK-NEXT: store i8 1, ptr [[DEP]], align 1
; CHECK-NEXT: [[TMP_OFFSET:%.*]] = getelementptr inbounds i8, ptr [[DEP_DEST]], i64 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DEST]], ptr align 1 [[TMP_OFFSET]], i64 5, i1 false)
; CHECK-NEXT: ret void
;
%cpy_tmp = alloca %buf, align 1
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %cpy_tmp, ptr align 1 %src, i64 7, i1 false)
%src_offset = getelementptr inbounds i8, ptr %src, i64 1
store i8 1, ptr %src_offset, align 1
%cpy_tmp_offset = getelementptr inbounds i8, ptr %cpy_tmp, i64 1
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dest, ptr align 1 %cpy_tmp_offset, i64 5, i1 false)
ret void
}

declare void @use(ptr)

declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1)
declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1)
28 changes: 28 additions & 0 deletions llvm/test/Transforms/PhaseOrdering/memcpy-offset.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=memcpyopt,instcombine -S -verify-memoryssa | FileCheck --check-prefix=CUSTOM %s
; RUN: opt < %s -O2 -S | FileCheck --check-prefix=O2 %s

; Check that we eliminate all `memcpy` calls in this function.
define void @memcpy_forward_back_with_offset(ptr %arg) {
; CUSTOM-LABEL: define void @memcpy_forward_back_with_offset(
; CUSTOM-SAME: ptr [[ARG:%.*]]) {
; CUSTOM-NEXT: store i8 1, ptr [[ARG]], align 1
; CUSTOM-NEXT: ret void
;
; O2-LABEL: define void @memcpy_forward_back_with_offset(
; O2-SAME: ptr nocapture writeonly [[ARG:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
; O2-NEXT: store i8 1, ptr [[ARG]], align 1
; O2-NEXT: ret void
;
%i = alloca [753 x i8], align 1
%i1 = alloca [754 x i8], align 1
call void @llvm.memcpy.p0.p0.i64(ptr %i1, ptr %arg, i64 754, i1 false)
%i2 = getelementptr inbounds i8, ptr %i1, i64 1
call void @llvm.memcpy.p0.p0.i64(ptr %i, ptr %i2, i64 753, i1 false)
store i8 1, ptr %arg, align 1
%i3 = getelementptr inbounds i8, ptr %arg, i64 1
call void @llvm.memcpy.p0.p0.i64(ptr %i3, ptr %i, i64 753, i1 false)
ret void
}

declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1)