-
Notifications
You must be signed in to change notification settings - Fork 13.4k
[IR] Remove the AtomicMem*Inst helper classes #138710
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
Migrate their usage to the AnyMem*Inst family, and add a isAtomic() query on the base class for that hierarchy. This matches the idioms we use for e.g. isAtomic on load, store, etc.. instructions, the existing isVolatile idioms on mem* routines, and allows us to more easily share code between atomic and non-atomic variants. As with llvm#138568, the goal here is to simplify the class hierarchy and make it easier to reason about. I'm moving from easiest to hardest, and will stop at some point when I hit "good enough".
@llvm/pr-subscribers-llvm-transforms @llvm/pr-subscribers-llvm-ir Author: Philip Reames (preames) ChangesMigrate their usage to the AnyMemInst family, and add a isAtomic() query on the base class for that hierarchy. This matches the idioms we use for e.g. isAtomic on load, store, etc.. instructions, the existing isVolatile idioms on mem routines, and allows us to more easily share code between atomic and non-atomic variants. As with #138568, the goal here is to simplify the class hierarchy and make it easier to reason about. I'm moving from easiest to hardest, and will stop at some point when I hit "good enough". Longer term, I'd sorta like to merge or reverse the naming on the plain MemInst and the AnyMemInst, but that's a much larger and more risky change. Not sure I'm going to actually do that. Patch is 20.50 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/138710.diff 12 Files Affected:
diff --git a/llvm/include/llvm/Analysis/MemoryLocation.h b/llvm/include/llvm/Analysis/MemoryLocation.h
index c046e0e380a36..2de56018b49b5 100644
--- a/llvm/include/llvm/Analysis/MemoryLocation.h
+++ b/llvm/include/llvm/Analysis/MemoryLocation.h
@@ -30,8 +30,6 @@ class StoreInst;
class MemTransferInst;
class MemIntrinsic;
class AtomicCmpXchgInst;
-class AtomicMemTransferInst;
-class AtomicMemIntrinsic;
class AtomicRMWInst;
class AnyMemTransferInst;
class AnyMemIntrinsic;
@@ -253,13 +251,11 @@ class MemoryLocation {
/// Return a location representing the source of a memory transfer.
static MemoryLocation getForSource(const MemTransferInst *MTI);
- static MemoryLocation getForSource(const AtomicMemTransferInst *MTI);
static MemoryLocation getForSource(const AnyMemTransferInst *MTI);
/// Return a location representing the destination of a memory set or
/// transfer.
static MemoryLocation getForDest(const MemIntrinsic *MI);
- static MemoryLocation getForDest(const AtomicMemIntrinsic *MI);
static MemoryLocation getForDest(const AnyMemIntrinsic *MI);
static std::optional<MemoryLocation> getForDest(const CallBase *CI,
const TargetLibraryInfo &TLI);
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index 48b3067266125..ea9257bd0d95b 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -1107,100 +1107,6 @@ template <class BaseCL> class MemSetBase : public BaseCL {
}
};
-// The common base class for the atomic memset/memmove/memcpy intrinsics
-// i.e. llvm.element.unordered.atomic.memset/memcpy/memmove
-class AtomicMemIntrinsic : public MemIntrinsicBase<AtomicMemIntrinsic> {
-private:
- enum { ARG_ELEMENTSIZE = 3 };
-
-public:
- Value *getRawElementSizeInBytes() const {
- return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE));
- }
-
- ConstantInt *getElementSizeInBytesCst() const {
- return cast<ConstantInt>(getRawElementSizeInBytes());
- }
-
- uint32_t getElementSizeInBytes() const {
- return getElementSizeInBytesCst()->getZExtValue();
- }
-
- void setElementSizeInBytes(Constant *V) {
- assert(V->getType() == Type::getInt8Ty(getContext()) &&
- "setElementSizeInBytes called with value of wrong type!");
- setArgOperand(ARG_ELEMENTSIZE, V);
- }
-
- static bool classof(const IntrinsicInst *I) {
- switch (I->getIntrinsicID()) {
- case Intrinsic::memcpy_element_unordered_atomic:
- case Intrinsic::memmove_element_unordered_atomic:
- case Intrinsic::memset_element_unordered_atomic:
- return true;
- default:
- return false;
- }
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-};
-
-/// This class represents atomic memset intrinsic
-// i.e. llvm.element.unordered.atomic.memset
-class AtomicMemSetInst : public MemSetBase<AtomicMemIntrinsic> {
-public:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memset_element_unordered_atomic;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-};
-
-// This class wraps the atomic memcpy/memmove intrinsics
-// i.e. llvm.element.unordered.atomic.memcpy/memmove
-class AtomicMemTransferInst : public MemTransferBase<AtomicMemIntrinsic> {
-public:
- static bool classof(const IntrinsicInst *I) {
- switch (I->getIntrinsicID()) {
- case Intrinsic::memcpy_element_unordered_atomic:
- case Intrinsic::memmove_element_unordered_atomic:
- return true;
- default:
- return false;
- }
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-};
-
-/// This class represents the atomic memcpy intrinsic
-/// i.e. llvm.element.unordered.atomic.memcpy
-class AtomicMemCpyInst : public AtomicMemTransferInst {
-public:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memcpy_element_unordered_atomic;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-};
-
-/// This class represents the atomic memmove intrinsic
-/// i.e. llvm.element.unordered.atomic.memmove
-class AtomicMemMoveInst : public AtomicMemTransferInst {
-public:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memmove_element_unordered_atomic;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-};
-
/// This is the common base class for memset/memcpy/memmove.
class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
private:
@@ -1345,6 +1251,9 @@ class MemMoveInst : public MemTransferInst {
// i.e. llvm.element.unordered.atomic.memset/memcpy/memmove
// and llvm.memset/memcpy/memmove
class AnyMemIntrinsic : public MemIntrinsicBase<AnyMemIntrinsic> {
+private:
+ enum { ARG_ELEMENTSIZE = 3 };
+
public:
bool isVolatile() const {
// Only the non-atomic intrinsics can be volatile
@@ -1353,6 +1262,17 @@ class AnyMemIntrinsic : public MemIntrinsicBase<AnyMemIntrinsic> {
return false;
}
+ bool isAtomic() const {
+ switch (getIntrinsicID()) {
+ case Intrinsic::memcpy_element_unordered_atomic:
+ case Intrinsic::memmove_element_unordered_atomic:
+ case Intrinsic::memset_element_unordered_atomic:
+ return true;
+ default:
+ return false;
+ }
+ }
+
static bool classof(const IntrinsicInst *I) {
switch (I->getIntrinsicID()) {
case Intrinsic::memcpy:
@@ -1371,6 +1291,16 @@ class AnyMemIntrinsic : public MemIntrinsicBase<AnyMemIntrinsic> {
static bool classof(const Value *V) {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
+
+ Value *getRawElementSizeInBytes() const {
+ assert(isAtomic());
+ return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE));
+ }
+
+ uint32_t getElementSizeInBytes() const {
+ assert(isAtomic());
+ return cast<ConstantInt>(getRawElementSizeInBytes())->getZExtValue();
+ }
};
/// This class represents any memset intrinsic
diff --git a/llvm/include/llvm/Transforms/Utils/LowerMemIntrinsics.h b/llvm/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
index 1007d282b2ac5..e144198d05ec7 100644
--- a/llvm/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
+++ b/llvm/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
@@ -19,7 +19,7 @@
namespace llvm {
-class AtomicMemCpyInst;
+class AnyMemCpyInst;
class ConstantInt;
class Instruction;
class MemCpyInst;
@@ -61,11 +61,11 @@ void expandMemSetAsLoop(MemSetInst *MemSet);
/// Expand \p MemSetPattern as a loop. \p MemSet is not deleted.
void expandMemSetPatternAsLoop(MemSetPatternInst *MemSet);
-/// Expand \p AtomicMemCpy as a loop. \p AtomicMemCpy is not deleted.
-void expandAtomicMemCpyAsLoop(AtomicMemCpyInst *AtomicMemCpy,
+/// Expand an atomic \p MemCpy as a loop. \p AtomicMemCpy is not deleted.
+void expandAtomicMemCpyAsLoop(AnyMemCpyInst *AtomicMemCpy,
const TargetTransformInfo &TTI,
ScalarEvolution *SE);
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/llvm/lib/Analysis/MemoryLocation.cpp b/llvm/lib/Analysis/MemoryLocation.cpp
index 6e3232772706a..3b42bb412b9ba 100644
--- a/llvm/lib/Analysis/MemoryLocation.cpp
+++ b/llvm/lib/Analysis/MemoryLocation.cpp
@@ -95,10 +95,6 @@ MemoryLocation MemoryLocation::getForSource(const MemTransferInst *MTI) {
return getForSource(cast<AnyMemTransferInst>(MTI));
}
-MemoryLocation MemoryLocation::getForSource(const AtomicMemTransferInst *MTI) {
- return getForSource(cast<AnyMemTransferInst>(MTI));
-}
-
MemoryLocation MemoryLocation::getForSource(const AnyMemTransferInst *MTI) {
assert(MTI->getRawSource() == MTI->getArgOperand(1));
return getForArgument(MTI, 1, nullptr);
@@ -108,10 +104,6 @@ MemoryLocation MemoryLocation::getForDest(const MemIntrinsic *MI) {
return getForDest(cast<AnyMemIntrinsic>(MI));
}
-MemoryLocation MemoryLocation::getForDest(const AtomicMemIntrinsic *MI) {
- return getForDest(cast<AnyMemIntrinsic>(MI));
-}
-
MemoryLocation MemoryLocation::getForDest(const AnyMemIntrinsic *MI) {
assert(MI->getRawDest() == MI->getArgOperand(0));
return getForArgument(MI, 0, nullptr);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 744a0fa572b0c..3e6afa9b32ba7 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -6543,7 +6543,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
return;
}
case Intrinsic::memcpy_element_unordered_atomic: {
- const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
+ auto &MI = cast<AnyMemCpyInst>(I);
SDValue Dst = getValue(MI.getRawDest());
SDValue Src = getValue(MI.getRawSource());
SDValue Length = getValue(MI.getLength());
@@ -6559,7 +6559,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
return;
}
case Intrinsic::memmove_element_unordered_atomic: {
- auto &MI = cast<AtomicMemMoveInst>(I);
+ auto &MI = cast<AnyMemMoveInst>(I);
SDValue Dst = getValue(MI.getRawDest());
SDValue Src = getValue(MI.getRawSource());
SDValue Length = getValue(MI.getLength());
@@ -6575,7 +6575,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
return;
}
case Intrinsic::memset_element_unordered_atomic: {
- auto &MI = cast<AtomicMemSetInst>(I);
+ auto &MI = cast<AnyMemSetInst>(I);
SDValue Dst = getValue(MI.getRawDest());
SDValue Val = getValue(MI.getValue());
SDValue Length = getValue(MI.getLength());
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index 089bd997bc058..8adb85ec6091a 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -228,7 +228,7 @@ CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet(
CallInst *CI =
CreateIntrinsic(Intrinsic::memset_element_unordered_atomic, Tys, Ops);
- cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment);
+ cast<AnyMemSetInst>(CI)->setDestAlignment(Alignment);
// Set the TBAA info if present.
if (TBAATag)
@@ -293,7 +293,7 @@ CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
CreateIntrinsic(Intrinsic::memcpy_element_unordered_atomic, Tys, Ops);
// Set the alignment of the pointer args.
- auto *AMCI = cast<AtomicMemCpyInst>(CI);
+ auto *AMCI = cast<AnyMemCpyInst>(CI);
AMCI->setDestAlignment(DstAlign);
AMCI->setSourceAlignment(SrcAlign);
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index a798808d79656..83c1264aef12b 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -5617,7 +5617,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
case Intrinsic::memcpy_element_unordered_atomic:
case Intrinsic::memmove_element_unordered_atomic:
case Intrinsic::memset_element_unordered_atomic: {
- const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
+ const auto *AMI = cast<AnyMemIntrinsic>(&Call);
ConstantInt *ElementSizeCI =
cast<ConstantInt>(AMI->getRawElementSizeInBytes());
@@ -5632,7 +5632,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
};
Check(IsValidAlignment(AMI->getDestAlign()),
"incorrect alignment of the destination argument", Call);
- if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
+ if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
Check(IsValidAlignment(AMT->getSourceAlign()),
"incorrect alignment of the source argument", Call);
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 844e18dd7d8c5..61494b70df257 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -164,7 +164,7 @@ Instruction *InstCombinerImpl::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
// introduce the unaligned memory access which will be later transformed
// into libcall in CodeGen. This is not evident performance gain so disable
// it now.
- if (isa<AtomicMemTransferInst>(MI))
+ if (MI->isAtomic())
if (*CopyDstAlign < Size || *CopySrcAlign < Size)
return nullptr;
@@ -204,7 +204,7 @@ Instruction *InstCombinerImpl::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
L->setVolatile(MT->isVolatile());
S->setVolatile(MT->isVolatile());
}
- if (isa<AtomicMemTransferInst>(MI)) {
+ if (MI->isAtomic()) {
// atomics have to be unordered
L->setOrdering(AtomicOrdering::Unordered);
S->setOrdering(AtomicOrdering::Unordered);
@@ -255,9 +255,8 @@ Instruction *InstCombinerImpl::SimplifyAnyMemSet(AnyMemSetInst *MI) {
// introduce the unaligned memory access which will be later transformed
// into libcall in CodeGen. This is not evident performance gain so disable
// it now.
- if (isa<AtomicMemSetInst>(MI))
- if (Alignment < Len)
- return nullptr;
+ if (MI->isAtomic() && Alignment < Len)
+ return nullptr;
// memset(s,c,n) -> store s, c (for n=1,2,4,8)
if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
@@ -276,7 +275,7 @@ Instruction *InstCombinerImpl::SimplifyAnyMemSet(AnyMemSetInst *MI) {
for_each(at::getDVRAssignmentMarkers(S), replaceOpForAssignmentMarkers);
S->setAlignment(Alignment);
- if (isa<AtomicMemSetInst>(MI))
+ if (MI->isAtomic())
S->setOrdering(AtomicOrdering::Unordered);
// Set the size of the copy to 0, it will be deleted on the next iteration.
@@ -1654,29 +1653,29 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
}
IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
- if (!II) return visitCallBase(CI);
-
- // For atomic unordered mem intrinsics if len is not a positive or
- // not a multiple of element size then behavior is undefined.
- if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(II))
- if (ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength()))
- if (NumBytes->isNegative() ||
- (NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) {
- CreateNonTerminatorUnreachable(AMI);
- assert(AMI->getType()->isVoidTy() &&
- "non void atomic unordered mem intrinsic");
- return eraseInstFromFunction(*AMI);
- }
+ if (!II)
+ return visitCallBase(CI);
// Intrinsics cannot occur in an invoke or a callbr, so handle them here
// instead of in visitCallBase.
if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
bool Changed = false;
- // memmove/cpy/set of zero bytes is a noop.
- if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
+ if (ConstantInt *NumBytes = dyn_cast<ConstantInt>(MI->getLength())) {
+ // memmove/cpy/set of zero bytes is a noop.
if (NumBytes->isNullValue())
return eraseInstFromFunction(CI);
+
+ // For atomic unordered mem intrinsics if len is not a positive or
+ // not a multiple of element size then behavior is undefined.
+ if (MI->isAtomic() &&
+ (NumBytes->isNegative() ||
+ (NumBytes->getZExtValue() % MI->getElementSizeInBytes() != 0))) {
+ CreateNonTerminatorUnreachable(MI);
+ assert(MI->getType()->isVoidTy() &&
+ "non void atomic unordered mem intrinsic");
+ return eraseInstFromFunction(*MI);
+ }
}
// No other transformations apply to volatile transfers.
@@ -1692,12 +1691,11 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
if (GVSrc->isConstant()) {
Module *M = CI.getModule();
Intrinsic::ID MemCpyID =
- isa<AtomicMemMoveInst>(MMI)
- ? Intrinsic::memcpy_element_unordered_atomic
- : Intrinsic::memcpy;
- Type *Tys[3] = { CI.getArgOperand(0)->getType(),
- CI.getArgOperand(1)->getType(),
- CI.getArgOperand(2)->getType() };
+ MMI->isAtomic() ? Intrinsic::memcpy_element_unordered_atomic
+ : Intrinsic::memcpy;
+ Type *Tys[3] = {CI.getArgOperand(0)->getType(),
+ CI.getArgOperand(1)->getType(),
+ CI.getArgOperand(2)->getType()};
CI.setCalledFunction(
Intrinsic::getOrInsertDeclaration(M, MemCpyID, Tys));
Changed = true;
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 66168a92f460e..dd5870d949c33 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -670,7 +670,7 @@ static bool tryToShorten(Instruction *DeadI, int64_t &DeadStart,
assert(DeadSize > ToRemoveSize && "Can't remove more than original size");
uint64_t NewSize = DeadSize - ToRemoveSize;
- if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(DeadI)) {
+ if (auto *AMI = dyn_cast<AnyMemIntrinsic>(DeadI); AMI && AMI->isAtomic()) {
// When shortening an atomic memory intrinsic, the newly shortened
// length must remain an integer multiple of the element size.
const uint32_t ElementSize = AMI->getElementSizeInBytes();
diff --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
index 8005be8d4ca05..d9805d8b3ea59 100644
--- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
+++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -3054,7 +3054,8 @@ bool RewriteStatepointsForGC::runOnFunction(Function &F, DominatorTree &DT,
// non-leaf memcpy/memmove without deopt state just treat it as a leaf
// copy and don't produce a statepoint.
if (!AllowStatepointWithNoDeoptInfo && !Call->hasDeoptState()) {
- assert((isa<AtomicMemCpyInst>(Call) || isa<AtomicMemMoveInst>(Call)) &&
+ assert(isa<AnyMemTransferInst>(Call) &&
+ cast<AnyMemTransferInst>(Call)->isAtomic() &&
"Don't expect any other calls here!");
return false;
}
diff --git a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
index dbab56a6996ce..18b0f617ca232 100644
--- a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
+++ b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
@@ -982,9 +982,10 @@ void llvm::expandMemSetPatternAsLoop(MemSetPatternInst *Memset) {
Memset->isVolatile());
}
-void llvm::expandAtomicMemCpyAsLoop(AtomicMemCpyInst *AtomicMemcpy,
+void llvm::expandAtomicMemCpyAsLoop(AnyMemCpyInst *AtomicMemcpy,
const TargetTransformInfo &TTI,
ScalarEvolution *SE) {
+ assert(AtomicMemcpy->isAtomic());
if (ConstantInt *CI = dyn_cast<ConstantInt>(AtomicMemcpy->getLength())) {
createMemCpyLoopKnownSize(
/* InsertBefore */ AtomicMemcpy,
diff --git a/llvm/unittests/Transforms/Utils/MemTransferLowering.cpp b/llvm/unittests/Transforms/Utils/MemTransferLowering.cpp
index 68c6364c1e585..b97bc311f4655 100644
--- a/llvm/unittests/Transforms/Utils/MemTransferLowering.cpp
+++ b/llvm/unittests/Transforms/Utils/MemTransferLowering.cpp
@@ -198,9 +198,9 @@ TEST_F(MemTransferLowerTest, AtomicMemCpyKnownLength) {
TargetTransformInfo TTI(M->getDataLayout());
auto *MemCpyBB = getBasicBlockByName(F, "memcpy");
Instruction *Inst = &MemCpyBB->front();
- assert(isa<AtomicMemCpyInst>(Inst) &&
+ assert(isa<AnyMemCpyInst>(Inst) &&
"Expecting llvm.memcpy.p0i8.i64 instructon");
- AtomicMemCpyInst *MemCpyI = cast<AtomicMemCpyInst>(Inst);
+ AnyMemCpyInst *MemCpyI = cast<AnyMemCpyInst>(Inst);
auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
expandAtomicMemCpyAsLoop(MemCpyI, TTI, &SE);
auto *CopyLoopBB = getBasicBlockByName(F, "load-store-loop");
@@ -243,9 +243,9 @@ TEST_F(MemTransferLowerTest, AtomicMemCpyUnKnownLength) {
TargetTransformInfo TTI(M->getDataLayout());
auto *MemCpyBB = getBasicBlockByName(F, "memcpy");
...
[truncated]
|
@llvm/pr-subscribers-llvm-selectiondag Author: Philip Reames (preames) ChangesMigrate their usage to the AnyMemInst family, and add a isAtomic() query on the base class for that hierarchy. This matches the idioms we use for e.g. isAtomic on load, store, etc.. instructions, the existing isVolatile idioms on mem routines, and allows us to more easily share code between atomic and non-atomic variants. As with #138568, the goal here is to simplify the class hierarchy and make it easier to reason about. I'm moving from easiest to hardest, and will stop at some point when I hit "good enough". Longer term, I'd sorta like to merge or reverse the naming on the plain MemInst and the AnyMemInst, but that's a much larger and more risky change. Not sure I'm going to actually do that. Patch is 20.50 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/138710.diff 12 Files Affected:
diff --git a/llvm/include/llvm/Analysis/MemoryLocation.h b/llvm/include/llvm/Analysis/MemoryLocation.h
index c046e0e380a36..2de56018b49b5 100644
--- a/llvm/include/llvm/Analysis/MemoryLocation.h
+++ b/llvm/include/llvm/Analysis/MemoryLocation.h
@@ -30,8 +30,6 @@ class StoreInst;
class MemTransferInst;
class MemIntrinsic;
class AtomicCmpXchgInst;
-class AtomicMemTransferInst;
-class AtomicMemIntrinsic;
class AtomicRMWInst;
class AnyMemTransferInst;
class AnyMemIntrinsic;
@@ -253,13 +251,11 @@ class MemoryLocation {
/// Return a location representing the source of a memory transfer.
static MemoryLocation getForSource(const MemTransferInst *MTI);
- static MemoryLocation getForSource(const AtomicMemTransferInst *MTI);
static MemoryLocation getForSource(const AnyMemTransferInst *MTI);
/// Return a location representing the destination of a memory set or
/// transfer.
static MemoryLocation getForDest(const MemIntrinsic *MI);
- static MemoryLocation getForDest(const AtomicMemIntrinsic *MI);
static MemoryLocation getForDest(const AnyMemIntrinsic *MI);
static std::optional<MemoryLocation> getForDest(const CallBase *CI,
const TargetLibraryInfo &TLI);
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index 48b3067266125..ea9257bd0d95b 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -1107,100 +1107,6 @@ template <class BaseCL> class MemSetBase : public BaseCL {
}
};
-// The common base class for the atomic memset/memmove/memcpy intrinsics
-// i.e. llvm.element.unordered.atomic.memset/memcpy/memmove
-class AtomicMemIntrinsic : public MemIntrinsicBase<AtomicMemIntrinsic> {
-private:
- enum { ARG_ELEMENTSIZE = 3 };
-
-public:
- Value *getRawElementSizeInBytes() const {
- return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE));
- }
-
- ConstantInt *getElementSizeInBytesCst() const {
- return cast<ConstantInt>(getRawElementSizeInBytes());
- }
-
- uint32_t getElementSizeInBytes() const {
- return getElementSizeInBytesCst()->getZExtValue();
- }
-
- void setElementSizeInBytes(Constant *V) {
- assert(V->getType() == Type::getInt8Ty(getContext()) &&
- "setElementSizeInBytes called with value of wrong type!");
- setArgOperand(ARG_ELEMENTSIZE, V);
- }
-
- static bool classof(const IntrinsicInst *I) {
- switch (I->getIntrinsicID()) {
- case Intrinsic::memcpy_element_unordered_atomic:
- case Intrinsic::memmove_element_unordered_atomic:
- case Intrinsic::memset_element_unordered_atomic:
- return true;
- default:
- return false;
- }
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-};
-
-/// This class represents atomic memset intrinsic
-// i.e. llvm.element.unordered.atomic.memset
-class AtomicMemSetInst : public MemSetBase<AtomicMemIntrinsic> {
-public:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memset_element_unordered_atomic;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-};
-
-// This class wraps the atomic memcpy/memmove intrinsics
-// i.e. llvm.element.unordered.atomic.memcpy/memmove
-class AtomicMemTransferInst : public MemTransferBase<AtomicMemIntrinsic> {
-public:
- static bool classof(const IntrinsicInst *I) {
- switch (I->getIntrinsicID()) {
- case Intrinsic::memcpy_element_unordered_atomic:
- case Intrinsic::memmove_element_unordered_atomic:
- return true;
- default:
- return false;
- }
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-};
-
-/// This class represents the atomic memcpy intrinsic
-/// i.e. llvm.element.unordered.atomic.memcpy
-class AtomicMemCpyInst : public AtomicMemTransferInst {
-public:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memcpy_element_unordered_atomic;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-};
-
-/// This class represents the atomic memmove intrinsic
-/// i.e. llvm.element.unordered.atomic.memmove
-class AtomicMemMoveInst : public AtomicMemTransferInst {
-public:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memmove_element_unordered_atomic;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-};
-
/// This is the common base class for memset/memcpy/memmove.
class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
private:
@@ -1345,6 +1251,9 @@ class MemMoveInst : public MemTransferInst {
// i.e. llvm.element.unordered.atomic.memset/memcpy/memmove
// and llvm.memset/memcpy/memmove
class AnyMemIntrinsic : public MemIntrinsicBase<AnyMemIntrinsic> {
+private:
+ enum { ARG_ELEMENTSIZE = 3 };
+
public:
bool isVolatile() const {
// Only the non-atomic intrinsics can be volatile
@@ -1353,6 +1262,17 @@ class AnyMemIntrinsic : public MemIntrinsicBase<AnyMemIntrinsic> {
return false;
}
+ bool isAtomic() const {
+ switch (getIntrinsicID()) {
+ case Intrinsic::memcpy_element_unordered_atomic:
+ case Intrinsic::memmove_element_unordered_atomic:
+ case Intrinsic::memset_element_unordered_atomic:
+ return true;
+ default:
+ return false;
+ }
+ }
+
static bool classof(const IntrinsicInst *I) {
switch (I->getIntrinsicID()) {
case Intrinsic::memcpy:
@@ -1371,6 +1291,16 @@ class AnyMemIntrinsic : public MemIntrinsicBase<AnyMemIntrinsic> {
static bool classof(const Value *V) {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
+
+ Value *getRawElementSizeInBytes() const {
+ assert(isAtomic());
+ return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE));
+ }
+
+ uint32_t getElementSizeInBytes() const {
+ assert(isAtomic());
+ return cast<ConstantInt>(getRawElementSizeInBytes())->getZExtValue();
+ }
};
/// This class represents any memset intrinsic
diff --git a/llvm/include/llvm/Transforms/Utils/LowerMemIntrinsics.h b/llvm/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
index 1007d282b2ac5..e144198d05ec7 100644
--- a/llvm/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
+++ b/llvm/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
@@ -19,7 +19,7 @@
namespace llvm {
-class AtomicMemCpyInst;
+class AnyMemCpyInst;
class ConstantInt;
class Instruction;
class MemCpyInst;
@@ -61,11 +61,11 @@ void expandMemSetAsLoop(MemSetInst *MemSet);
/// Expand \p MemSetPattern as a loop. \p MemSet is not deleted.
void expandMemSetPatternAsLoop(MemSetPatternInst *MemSet);
-/// Expand \p AtomicMemCpy as a loop. \p AtomicMemCpy is not deleted.
-void expandAtomicMemCpyAsLoop(AtomicMemCpyInst *AtomicMemCpy,
+/// Expand an atomic \p MemCpy as a loop. \p AtomicMemCpy is not deleted.
+void expandAtomicMemCpyAsLoop(AnyMemCpyInst *AtomicMemCpy,
const TargetTransformInfo &TTI,
ScalarEvolution *SE);
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/llvm/lib/Analysis/MemoryLocation.cpp b/llvm/lib/Analysis/MemoryLocation.cpp
index 6e3232772706a..3b42bb412b9ba 100644
--- a/llvm/lib/Analysis/MemoryLocation.cpp
+++ b/llvm/lib/Analysis/MemoryLocation.cpp
@@ -95,10 +95,6 @@ MemoryLocation MemoryLocation::getForSource(const MemTransferInst *MTI) {
return getForSource(cast<AnyMemTransferInst>(MTI));
}
-MemoryLocation MemoryLocation::getForSource(const AtomicMemTransferInst *MTI) {
- return getForSource(cast<AnyMemTransferInst>(MTI));
-}
-
MemoryLocation MemoryLocation::getForSource(const AnyMemTransferInst *MTI) {
assert(MTI->getRawSource() == MTI->getArgOperand(1));
return getForArgument(MTI, 1, nullptr);
@@ -108,10 +104,6 @@ MemoryLocation MemoryLocation::getForDest(const MemIntrinsic *MI) {
return getForDest(cast<AnyMemIntrinsic>(MI));
}
-MemoryLocation MemoryLocation::getForDest(const AtomicMemIntrinsic *MI) {
- return getForDest(cast<AnyMemIntrinsic>(MI));
-}
-
MemoryLocation MemoryLocation::getForDest(const AnyMemIntrinsic *MI) {
assert(MI->getRawDest() == MI->getArgOperand(0));
return getForArgument(MI, 0, nullptr);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 744a0fa572b0c..3e6afa9b32ba7 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -6543,7 +6543,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
return;
}
case Intrinsic::memcpy_element_unordered_atomic: {
- const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
+ auto &MI = cast<AnyMemCpyInst>(I);
SDValue Dst = getValue(MI.getRawDest());
SDValue Src = getValue(MI.getRawSource());
SDValue Length = getValue(MI.getLength());
@@ -6559,7 +6559,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
return;
}
case Intrinsic::memmove_element_unordered_atomic: {
- auto &MI = cast<AtomicMemMoveInst>(I);
+ auto &MI = cast<AnyMemMoveInst>(I);
SDValue Dst = getValue(MI.getRawDest());
SDValue Src = getValue(MI.getRawSource());
SDValue Length = getValue(MI.getLength());
@@ -6575,7 +6575,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
return;
}
case Intrinsic::memset_element_unordered_atomic: {
- auto &MI = cast<AtomicMemSetInst>(I);
+ auto &MI = cast<AnyMemSetInst>(I);
SDValue Dst = getValue(MI.getRawDest());
SDValue Val = getValue(MI.getValue());
SDValue Length = getValue(MI.getLength());
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index 089bd997bc058..8adb85ec6091a 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -228,7 +228,7 @@ CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet(
CallInst *CI =
CreateIntrinsic(Intrinsic::memset_element_unordered_atomic, Tys, Ops);
- cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment);
+ cast<AnyMemSetInst>(CI)->setDestAlignment(Alignment);
// Set the TBAA info if present.
if (TBAATag)
@@ -293,7 +293,7 @@ CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
CreateIntrinsic(Intrinsic::memcpy_element_unordered_atomic, Tys, Ops);
// Set the alignment of the pointer args.
- auto *AMCI = cast<AtomicMemCpyInst>(CI);
+ auto *AMCI = cast<AnyMemCpyInst>(CI);
AMCI->setDestAlignment(DstAlign);
AMCI->setSourceAlignment(SrcAlign);
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index a798808d79656..83c1264aef12b 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -5617,7 +5617,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
case Intrinsic::memcpy_element_unordered_atomic:
case Intrinsic::memmove_element_unordered_atomic:
case Intrinsic::memset_element_unordered_atomic: {
- const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
+ const auto *AMI = cast<AnyMemIntrinsic>(&Call);
ConstantInt *ElementSizeCI =
cast<ConstantInt>(AMI->getRawElementSizeInBytes());
@@ -5632,7 +5632,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
};
Check(IsValidAlignment(AMI->getDestAlign()),
"incorrect alignment of the destination argument", Call);
- if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
+ if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
Check(IsValidAlignment(AMT->getSourceAlign()),
"incorrect alignment of the source argument", Call);
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 844e18dd7d8c5..61494b70df257 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -164,7 +164,7 @@ Instruction *InstCombinerImpl::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
// introduce the unaligned memory access which will be later transformed
// into libcall in CodeGen. This is not evident performance gain so disable
// it now.
- if (isa<AtomicMemTransferInst>(MI))
+ if (MI->isAtomic())
if (*CopyDstAlign < Size || *CopySrcAlign < Size)
return nullptr;
@@ -204,7 +204,7 @@ Instruction *InstCombinerImpl::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
L->setVolatile(MT->isVolatile());
S->setVolatile(MT->isVolatile());
}
- if (isa<AtomicMemTransferInst>(MI)) {
+ if (MI->isAtomic()) {
// atomics have to be unordered
L->setOrdering(AtomicOrdering::Unordered);
S->setOrdering(AtomicOrdering::Unordered);
@@ -255,9 +255,8 @@ Instruction *InstCombinerImpl::SimplifyAnyMemSet(AnyMemSetInst *MI) {
// introduce the unaligned memory access which will be later transformed
// into libcall in CodeGen. This is not evident performance gain so disable
// it now.
- if (isa<AtomicMemSetInst>(MI))
- if (Alignment < Len)
- return nullptr;
+ if (MI->isAtomic() && Alignment < Len)
+ return nullptr;
// memset(s,c,n) -> store s, c (for n=1,2,4,8)
if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
@@ -276,7 +275,7 @@ Instruction *InstCombinerImpl::SimplifyAnyMemSet(AnyMemSetInst *MI) {
for_each(at::getDVRAssignmentMarkers(S), replaceOpForAssignmentMarkers);
S->setAlignment(Alignment);
- if (isa<AtomicMemSetInst>(MI))
+ if (MI->isAtomic())
S->setOrdering(AtomicOrdering::Unordered);
// Set the size of the copy to 0, it will be deleted on the next iteration.
@@ -1654,29 +1653,29 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
}
IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
- if (!II) return visitCallBase(CI);
-
- // For atomic unordered mem intrinsics if len is not a positive or
- // not a multiple of element size then behavior is undefined.
- if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(II))
- if (ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength()))
- if (NumBytes->isNegative() ||
- (NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) {
- CreateNonTerminatorUnreachable(AMI);
- assert(AMI->getType()->isVoidTy() &&
- "non void atomic unordered mem intrinsic");
- return eraseInstFromFunction(*AMI);
- }
+ if (!II)
+ return visitCallBase(CI);
// Intrinsics cannot occur in an invoke or a callbr, so handle them here
// instead of in visitCallBase.
if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
bool Changed = false;
- // memmove/cpy/set of zero bytes is a noop.
- if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
+ if (ConstantInt *NumBytes = dyn_cast<ConstantInt>(MI->getLength())) {
+ // memmove/cpy/set of zero bytes is a noop.
if (NumBytes->isNullValue())
return eraseInstFromFunction(CI);
+
+ // For atomic unordered mem intrinsics if len is not a positive or
+ // not a multiple of element size then behavior is undefined.
+ if (MI->isAtomic() &&
+ (NumBytes->isNegative() ||
+ (NumBytes->getZExtValue() % MI->getElementSizeInBytes() != 0))) {
+ CreateNonTerminatorUnreachable(MI);
+ assert(MI->getType()->isVoidTy() &&
+ "non void atomic unordered mem intrinsic");
+ return eraseInstFromFunction(*MI);
+ }
}
// No other transformations apply to volatile transfers.
@@ -1692,12 +1691,11 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
if (GVSrc->isConstant()) {
Module *M = CI.getModule();
Intrinsic::ID MemCpyID =
- isa<AtomicMemMoveInst>(MMI)
- ? Intrinsic::memcpy_element_unordered_atomic
- : Intrinsic::memcpy;
- Type *Tys[3] = { CI.getArgOperand(0)->getType(),
- CI.getArgOperand(1)->getType(),
- CI.getArgOperand(2)->getType() };
+ MMI->isAtomic() ? Intrinsic::memcpy_element_unordered_atomic
+ : Intrinsic::memcpy;
+ Type *Tys[3] = {CI.getArgOperand(0)->getType(),
+ CI.getArgOperand(1)->getType(),
+ CI.getArgOperand(2)->getType()};
CI.setCalledFunction(
Intrinsic::getOrInsertDeclaration(M, MemCpyID, Tys));
Changed = true;
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 66168a92f460e..dd5870d949c33 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -670,7 +670,7 @@ static bool tryToShorten(Instruction *DeadI, int64_t &DeadStart,
assert(DeadSize > ToRemoveSize && "Can't remove more than original size");
uint64_t NewSize = DeadSize - ToRemoveSize;
- if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(DeadI)) {
+ if (auto *AMI = dyn_cast<AnyMemIntrinsic>(DeadI); AMI && AMI->isAtomic()) {
// When shortening an atomic memory intrinsic, the newly shortened
// length must remain an integer multiple of the element size.
const uint32_t ElementSize = AMI->getElementSizeInBytes();
diff --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
index 8005be8d4ca05..d9805d8b3ea59 100644
--- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
+++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -3054,7 +3054,8 @@ bool RewriteStatepointsForGC::runOnFunction(Function &F, DominatorTree &DT,
// non-leaf memcpy/memmove without deopt state just treat it as a leaf
// copy and don't produce a statepoint.
if (!AllowStatepointWithNoDeoptInfo && !Call->hasDeoptState()) {
- assert((isa<AtomicMemCpyInst>(Call) || isa<AtomicMemMoveInst>(Call)) &&
+ assert(isa<AnyMemTransferInst>(Call) &&
+ cast<AnyMemTransferInst>(Call)->isAtomic() &&
"Don't expect any other calls here!");
return false;
}
diff --git a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
index dbab56a6996ce..18b0f617ca232 100644
--- a/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
+++ b/llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp
@@ -982,9 +982,10 @@ void llvm::expandMemSetPatternAsLoop(MemSetPatternInst *Memset) {
Memset->isVolatile());
}
-void llvm::expandAtomicMemCpyAsLoop(AtomicMemCpyInst *AtomicMemcpy,
+void llvm::expandAtomicMemCpyAsLoop(AnyMemCpyInst *AtomicMemcpy,
const TargetTransformInfo &TTI,
ScalarEvolution *SE) {
+ assert(AtomicMemcpy->isAtomic());
if (ConstantInt *CI = dyn_cast<ConstantInt>(AtomicMemcpy->getLength())) {
createMemCpyLoopKnownSize(
/* InsertBefore */ AtomicMemcpy,
diff --git a/llvm/unittests/Transforms/Utils/MemTransferLowering.cpp b/llvm/unittests/Transforms/Utils/MemTransferLowering.cpp
index 68c6364c1e585..b97bc311f4655 100644
--- a/llvm/unittests/Transforms/Utils/MemTransferLowering.cpp
+++ b/llvm/unittests/Transforms/Utils/MemTransferLowering.cpp
@@ -198,9 +198,9 @@ TEST_F(MemTransferLowerTest, AtomicMemCpyKnownLength) {
TargetTransformInfo TTI(M->getDataLayout());
auto *MemCpyBB = getBasicBlockByName(F, "memcpy");
Instruction *Inst = &MemCpyBB->front();
- assert(isa<AtomicMemCpyInst>(Inst) &&
+ assert(isa<AnyMemCpyInst>(Inst) &&
"Expecting llvm.memcpy.p0i8.i64 instructon");
- AtomicMemCpyInst *MemCpyI = cast<AtomicMemCpyInst>(Inst);
+ AnyMemCpyInst *MemCpyI = cast<AnyMemCpyInst>(Inst);
auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
expandAtomicMemCpyAsLoop(MemCpyI, TTI, &SE);
auto *CopyLoopBB = getBasicBlockByName(F, "load-store-loop");
@@ -243,9 +243,9 @@ TEST_F(MemTransferLowerTest, AtomicMemCpyUnKnownLength) {
TargetTransformInfo TTI(M->getDataLayout());
auto *MemCpyBB = getBasicBlockByName(F, "memcpy");
...
[truncated]
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
@@ -61,11 +61,11 @@ void expandMemSetAsLoop(MemSetInst *MemSet); | |||
/// Expand \p MemSetPattern as a loop. \p MemSet is not deleted. | |||
void expandMemSetPatternAsLoop(MemSetPatternInst *MemSet); | |||
|
|||
/// Expand \p AtomicMemCpy as a loop. \p AtomicMemCpy is not deleted. | |||
void expandAtomicMemCpyAsLoop(AtomicMemCpyInst *AtomicMemCpy, | |||
/// Expand an atomic \p MemCpy as a loop. \p AtomicMemCpy is not deleted. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
\p
refers to parameter names, so this doc comment shouldn't change.
@@ -670,7 +670,7 @@ static bool tryToShorten(Instruction *DeadI, int64_t &DeadStart, | |||
assert(DeadSize > ToRemoveSize && "Can't remove more than original size"); | |||
|
|||
uint64_t NewSize = DeadSize - ToRemoveSize; | |||
if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(DeadI)) { | |||
if (auto *AMI = dyn_cast<AnyMemIntrinsic>(DeadI); AMI && AMI->isAtomic()) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
DeadI is already AnyMemIntrinsic.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It's not actually, but there's a DeadIntrinsic cast which is. Adjusting.
You can test this locally with the following command:git-clang-format --diff HEAD~1 HEAD --extensions cpp,h -- llvm/include/llvm/Analysis/MemoryLocation.h llvm/include/llvm/IR/IntrinsicInst.h llvm/include/llvm/Transforms/Utils/LowerMemIntrinsics.h llvm/lib/Analysis/MemoryLocation.cpp llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp llvm/lib/IR/IRBuilder.cpp llvm/lib/IR/Verifier.cpp llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp llvm/lib/Transforms/Utils/LowerMemIntrinsics.cpp llvm/unittests/Transforms/Utils/MemTransferLowering.cpp View the diff from clang-format here.diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 3e78b20e4..4d69fa232 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -1718,9 +1718,8 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
if (GVSrc->isConstant()) {
Module *M = CI.getModule();
Intrinsic::ID MemCpyID =
- MMI->isAtomic()
- ? Intrinsic::memcpy_element_unordered_atomic
- : Intrinsic::memcpy;
+ MMI->isAtomic() ? Intrinsic::memcpy_element_unordered_atomic
+ : Intrinsic::memcpy;
Type *Tys[3] = { CI.getArgOperand(0)->getType(),
CI.getArgOperand(1)->getType(),
CI.getArgOperand(2)->getType() };
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/3/builds/15554 Here is the relevant piece of the build log for the reference
|
Upstream change: llvm/llvm-project#138710
Upstream change: llvm/llvm-project#138710
Migrate their usage to the `AnyMem*Inst` family, and add a isAtomic() query on the base class for that hierarchy. This matches the idioms we use for e.g. isAtomic on load, store, etc.. instructions, the existing isVolatile idioms on mem* routines, and allows us to more easily share code between atomic and non-atomic variants. As with llvm#138568, the goal here is to simplify the class hierarchy and make it easier to reason about. I'm moving from easiest to hardest, and will stop at some point when I hit "good enough". Longer term, I'd sorta like to merge or reverse the naming on the plain Mem*Inst and the AnyMem*Inst, but that's a much larger and more risky change. Not sure I'm going to actually do that.
Migrate their usage to the
AnyMem*Inst
family, and add a isAtomic() query on the base class for that hierarchy. This matches the idioms we use for e.g. isAtomic on load, store, etc.. instructions, the existing isVolatile idioms on mem* routines, and allows us to more easily share code between atomic and non-atomic variants.As with #138568, the goal here is to simplify the class hierarchy and make it easier to reason about. I'm moving from easiest to hardest, and will stop at some point when I hit "good enough". Longer term, I'd sorta like to merge or reverse the naming on the plain MemInst and the AnyMemInst, but that's a much larger and more risky change. Not sure I'm going to actually do that.