Replace Operation::isa with llvm::isa.
--
PiperOrigin-RevId: 247789235
diff --git a/mlir/examples/Linalg/Linalg1/lib/ConvertToLLVMDialect.cpp b/mlir/examples/Linalg/Linalg1/lib/ConvertToLLVMDialect.cpp
index 48884b1..f1fc4ed 100644
--- a/mlir/examples/Linalg/Linalg1/lib/ConvertToLLVMDialect.cpp
+++ b/mlir/examples/Linalg/Linalg1/lib/ConvertToLLVMDialect.cpp
@@ -148,7 +148,7 @@
: DialectOpConversion(linalg::RangeOp::getOperationName(), 1, context) {}
PatternMatchResult match(Operation *op) const override {
- if (op->isa<linalg::RangeOp>())
+ if (isa<linalg::RangeOp>(op))
return matchSuccess();
return matchFailure();
}
@@ -180,7 +180,7 @@
: DialectOpConversion(linalg::ViewOp::getOperationName(), 1, context) {}
PatternMatchResult match(Operation *op) const override {
- if (op->isa<linalg::ViewOp>())
+ if (isa<linalg::ViewOp>(op))
return matchSuccess();
return matchFailure();
}
@@ -312,7 +312,7 @@
: DialectOpConversion(linalg::SliceOp::getOperationName(), 1, context) {}
PatternMatchResult match(Operation *op) const override {
- if (op->isa<linalg::SliceOp>())
+ if (isa<linalg::SliceOp>(op))
return matchSuccess();
return matchFailure();
}
diff --git a/mlir/examples/Linalg/Linalg2/lib/Transforms.cpp b/mlir/examples/Linalg/Linalg2/lib/Transforms.cpp
index 9df0af8..d78d6aa 100644
--- a/mlir/examples/Linalg/Linalg2/lib/Transforms.cpp
+++ b/mlir/examples/Linalg/Linalg2/lib/Transforms.cpp
@@ -29,6 +29,7 @@
using llvm::ArrayRef;
using llvm::cast;
+using llvm::isa;
using llvm::SmallVector;
using mlir::FuncBuilder;
using mlir::MemRefType;
@@ -44,7 +45,7 @@
// analyses. This builds the chain.
static SmallVector<Value *, 8> getViewChain(mlir::Value *v) {
assert(v->getType().isa<ViewType>() && "ViewType expected");
- if (v->getDefiningOp()->isa<ViewOp>()) {
+ if (isa<ViewOp>(v->getDefiningOp())) {
return SmallVector<mlir::Value *, 8>{v};
}
@@ -54,7 +55,7 @@
tmp.push_back(v);
v = sliceOp.getParentView();
} while (!v->getType().isa<ViewType>());
- assert(v->getDefiningOp()->isa<ViewOp>() && "must be a ViewOp");
+ assert(isa<ViewOp>(v->getDefiningOp()) && "must be a ViewOp");
tmp.push_back(v);
return SmallVector<mlir::Value *, 8>(tmp.rbegin(), tmp.rend());
}
diff --git a/mlir/examples/Linalg/Linalg3/lib/ConvertToLLVMDialect.cpp b/mlir/examples/Linalg/Linalg3/lib/ConvertToLLVMDialect.cpp
index 22feb66..a2b39de 100644
--- a/mlir/examples/Linalg/Linalg3/lib/ConvertToLLVMDialect.cpp
+++ b/mlir/examples/Linalg/Linalg3/lib/ConvertToLLVMDialect.cpp
@@ -60,7 +60,7 @@
// Match the Op specified as template argument.
PatternMatchResult match(Operation *op) const override {
- if (op->isa<Op>())
+ if (isa<Op>(op))
return matchSuccess();
return matchFailure();
}
diff --git a/mlir/examples/Linalg/Linalg3/lib/TensorOps.cpp b/mlir/examples/Linalg/Linalg3/lib/TensorOps.cpp
index a5b094c..2209e9d 100644
--- a/mlir/examples/Linalg/Linalg3/lib/TensorOps.cpp
+++ b/mlir/examples/Linalg/Linalg3/lib/TensorOps.cpp
@@ -103,8 +103,8 @@
auto *op = getOperation();
auto *vA(getInputView(0)), *vB(getInputView(1)), *vC(getOutputView(0));
auto indexingPosPair = getViewRootIndexing(vA, 0);
- assert(indexingPosPair.first->getDefiningOp() &&
- indexingPosPair.first->getDefiningOp()->isa<RangeOp>());
+ assert(
+ llvm::isa_and_nonnull<RangeOp>(indexingPosPair.first->getDefiningOp()));
// clang-format off
ScopedContext scope(FuncBuilder(op), op->getLoc());
IndexHandle i;
@@ -177,8 +177,8 @@
auto *op = getOperation();
auto *vA(getInputView(0)), *vB(getInputView(1)), *vC(getOutputView(0));
auto indexingPosPair = getViewRootIndexing(vB, 1);
- assert(indexingPosPair.first->getDefiningOp() &&
- indexingPosPair.first->getDefiningOp()->isa<RangeOp>());
+ assert(
+ llvm::isa_and_nonnull<RangeOp>(indexingPosPair.first->getDefiningOp()));
using linalg::common::LoopNestRangeBuilder;
// clang-format off
ScopedContext scope(FuncBuilder(op), op->getLoc());
diff --git a/mlir/examples/toy/Ch5/mlir/ToyCombine.cpp b/mlir/examples/toy/Ch5/mlir/ToyCombine.cpp
index 260f6a6..2330447 100644
--- a/mlir/examples/toy/Ch5/mlir/ToyCombine.cpp
+++ b/mlir/examples/toy/Ch5/mlir/ToyCombine.cpp
@@ -188,7 +188,7 @@
TypeCastOp typeCast = llvm::cast<TypeCastOp>(op);
auto resTy = typeCast.getResult()->getType();
auto *candidateOp = op;
- while (candidateOp && candidateOp->isa<TypeCastOp>()) {
+ while (llvm::isa_and_nonnull<TypeCastOp>(candidateOp)) {
if (resTy == candidateOp->getOperand(0)->getType()) {
rewriter.replaceOp(typeCast, {candidateOp->getOperand(0)});
return matchSuccess();
diff --git a/mlir/include/mlir/IR/Matchers.h b/mlir/include/mlir/IR/Matchers.h
index fd139c6..3e337b2 100644
--- a/mlir/include/mlir/IR/Matchers.h
+++ b/mlir/include/mlir/IR/Matchers.h
@@ -121,7 +121,7 @@
/// The matcher that matches a certain kind of op.
template <typename OpClass> struct op_matcher {
- bool match(Operation *op) { return op->isa<OpClass>(); }
+ bool match(Operation *op) { return isa<OpClass>(op); }
};
} // end namespace detail
diff --git a/mlir/include/mlir/IR/Operation.h b/mlir/include/mlir/IR/Operation.h
index 088a4e4..e71e8ed 100644
--- a/mlir/include/mlir/IR/Operation.h
+++ b/mlir/include/mlir/IR/Operation.h
@@ -386,14 +386,6 @@
LogicalResult fold(SmallVectorImpl<Value *> &results);
//===--------------------------------------------------------------------===//
- // Conversions to declared operations like DimOp
- //===--------------------------------------------------------------------===//
-
- /// The is methods return true if the operation is a typed op (like DimOp) of
- /// of the given class.
- template <typename OpClass> bool isa() { return OpClass::classof(this); }
-
- //===--------------------------------------------------------------------===//
// Operation Walkers
//===--------------------------------------------------------------------===//
diff --git a/mlir/lib/AffineOps/AffineOps.cpp b/mlir/lib/AffineOps/AffineOps.cpp
index f551afb..40069f6 100644
--- a/mlir/lib/AffineOps/AffineOps.cpp
+++ b/mlir/lib/AffineOps/AffineOps.cpp
@@ -58,7 +58,7 @@
if (auto *op = value->getDefiningOp()) {
// Top level operation or constant operation is ok.
- if (op->getParentOp() == nullptr || op->isa<ConstantOp>())
+ if (op->getParentOp() == nullptr || isa<ConstantOp>(op))
return true;
// Affine apply operation is ok if all of its operands are ok.
if (auto applyOp = dyn_cast<AffineApplyOp>(op))
@@ -83,7 +83,7 @@
if (auto *op = value->getDefiningOp()) {
// Top level operation or constant operation is ok.
- if (op->getParentOp() == nullptr || op->isa<ConstantOp>())
+ if (op->getParentOp() == nullptr || isa<ConstantOp>(op))
return true;
// Affine apply operation is ok if all of its operands are ok.
if (auto applyOp = dyn_cast<AffineApplyOp>(op))
@@ -688,7 +688,7 @@
// Check that if a "block" has a terminator, it is an `AffineTerminatorOp`.
static LogicalResult checkHasAffineTerminator(OpState &op, Block &block) {
- if (block.empty() || block.back().isa<AffineTerminatorOp>())
+ if (block.empty() || isa<AffineTerminatorOp>(block.back()))
return success();
op.emitOpError("expects regions to end with '" +
diff --git a/mlir/lib/Analysis/AffineAnalysis.cpp b/mlir/lib/Analysis/AffineAnalysis.cpp
index c1f455e..861c0a1 100644
--- a/mlir/lib/Analysis/AffineAnalysis.cpp
+++ b/mlir/lib/Analysis/AffineAnalysis.cpp
@@ -775,8 +775,8 @@
if (srcAccess.memref != dstAccess.memref)
return false;
// Return 'false' if one of these accesses is not a StoreOp.
- if (!allowRAR && !srcAccess.opInst->isa<StoreOp>() &&
- !dstAccess.opInst->isa<StoreOp>())
+ if (!allowRAR && !isa<StoreOp>(srcAccess.opInst) &&
+ !isa<StoreOp>(dstAccess.opInst))
return false;
// Get composed access function for 'srcAccess'.
@@ -860,7 +860,7 @@
// Collect all load and store ops in loop nest rooted at 'forOp'.
SmallVector<Operation *, 8> loadAndStoreOpInsts;
forOp.getOperation()->walk([&](Operation *opInst) {
- if (opInst->isa<LoadOp>() || opInst->isa<StoreOp>())
+ if (isa<LoadOp>(opInst) || isa<StoreOp>(opInst))
loadAndStoreOpInsts.push_back(opInst);
});
diff --git a/mlir/lib/Analysis/LoopAnalysis.cpp b/mlir/lib/Analysis/LoopAnalysis.cpp
index 3d984c5..3ec4833 100644
--- a/mlir/lib/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Analysis/LoopAnalysis.cpp
@@ -279,7 +279,7 @@
}
static bool isVectorTransferReadOrWrite(Operation &op) {
- return op.isa<VectorTransferReadOp>() || op.isa<VectorTransferWriteOp>();
+ return isa<VectorTransferReadOp>(op) || isa<VectorTransferWriteOp>(op);
}
using VectorizableOpFun = std::function<bool(AffineForOp, Operation &)>;
@@ -300,7 +300,7 @@
// No vectorization across unknown regions.
auto regions = matcher::Op([](Operation &op) -> bool {
return op.getNumRegions() != 0 &&
- !(op.isa<AffineIfOp>() || op.isa<AffineForOp>());
+ !(isa<AffineIfOp>(op) || isa<AffineForOp>(op));
});
SmallVector<NestedMatch, 8> regionsMatched;
regions.match(forOp, ®ionsMatched);
diff --git a/mlir/lib/Analysis/NestedMatcher.cpp b/mlir/lib/Analysis/NestedMatcher.cpp
index 95270a1..f08f66d 100644
--- a/mlir/lib/Analysis/NestedMatcher.cpp
+++ b/mlir/lib/Analysis/NestedMatcher.cpp
@@ -110,9 +110,9 @@
}
}
-static bool isAffineForOp(Operation &op) { return op.isa<AffineForOp>(); }
+static bool isAffineForOp(Operation &op) { return isa<AffineForOp>(op); }
-static bool isAffineIfOp(Operation &op) { return op.isa<AffineIfOp>(); }
+static bool isAffineIfOp(Operation &op) { return isa<AffineIfOp>(op); }
namespace mlir {
namespace matcher {
@@ -154,7 +154,7 @@
}
bool isLoadOrStore(Operation &op) {
- return op.isa<LoadOp>() || op.isa<StoreOp>();
+ return isa<LoadOp>(op) || isa<StoreOp>(op);
}
} // end namespace matcher
diff --git a/mlir/lib/Analysis/TestMemRefDependenceCheck.cpp b/mlir/lib/Analysis/TestMemRefDependenceCheck.cpp
index 4005871..2b0f1ab 100644
--- a/mlir/lib/Analysis/TestMemRefDependenceCheck.cpp
+++ b/mlir/lib/Analysis/TestMemRefDependenceCheck.cpp
@@ -114,7 +114,7 @@
// Collect the loads and stores within the function.
loadsAndStores.clear();
getFunction().walk([&](Operation *op) {
- if (op->isa<LoadOp>() || op->isa<StoreOp>())
+ if (isa<LoadOp>(op) || isa<StoreOp>(op))
loadsAndStores.push_back(op);
});
diff --git a/mlir/lib/Analysis/Utils.cpp b/mlir/lib/Analysis/Utils.cpp
index cc46d65..2a46c0e 100644
--- a/mlir/lib/Analysis/Utils.cpp
+++ b/mlir/lib/Analysis/Utils.cpp
@@ -45,7 +45,7 @@
// Traverse up the hierarchy collecing all 'affine.for' operation while
// skipping over 'affine.if' operations.
while (currOp && ((currAffineForOp = dyn_cast<AffineForOp>(currOp)) ||
- currOp->isa<AffineIfOp>())) {
+ isa<AffineIfOp>(currOp))) {
if (currAffineForOp)
loops->push_back(currAffineForOp);
currOp = currOp->getParentOp();
@@ -172,7 +172,7 @@
LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth,
ComputationSliceState *sliceState,
bool addMemRefDimBounds) {
- assert((op->isa<LoadOp>() || op->isa<StoreOp>()) && "load/store op expected");
+ assert((isa<LoadOp>(op) || isa<StoreOp>(op)) && "load/store op expected");
MemRefAccess access(op);
memref = access.memref;
@@ -490,7 +490,7 @@
const MemRefAccess &srcAccess, const MemRefAccess &dstAccess,
unsigned dstLoopDepth, ComputationSliceState *sliceState) {
bool readReadAccesses =
- srcAccess.opInst->isa<LoadOp>() && dstAccess.opInst->isa<LoadOp>();
+ isa<LoadOp>(srcAccess.opInst) && isa<LoadOp>(dstAccess.opInst);
FlatAffineConstraints dependenceConstraints;
if (!checkMemrefAccessDependence(
srcAccess, dstAccess, /*loopDepth=*/1, &dependenceConstraints,
@@ -642,7 +642,7 @@
indices.push_back(index);
}
} else {
- assert(loadOrStoreOpInst->isa<StoreOp>() && "load/store op expected");
+ assert(isa<StoreOp>(loadOrStoreOpInst) && "load/store op expected");
auto storeOp = dyn_cast<StoreOp>(loadOrStoreOpInst);
opInst = loadOrStoreOpInst;
memref = storeOp.getMemRef();
@@ -658,7 +658,7 @@
return memref->getType().cast<MemRefType>().getRank();
}
-bool MemRefAccess::isStore() const { return opInst->isa<StoreOp>(); }
+bool MemRefAccess::isStore() const { return isa<StoreOp>(opInst); }
/// Returns the nesting depth of this statement, i.e., the number of loops
/// surrounding this statement.
@@ -666,7 +666,7 @@
Operation *currOp = &op;
unsigned depth = 0;
while ((currOp = currOp->getParentOp())) {
- if (currOp->isa<AffineForOp>())
+ if (isa<AffineForOp>(currOp))
depth++;
}
return depth;
@@ -698,7 +698,7 @@
// Walk this 'affine.for' operation to gather all memory regions.
bool error = false;
block.walk(start, end, [&](Operation *opInst) {
- if (!opInst->isa<LoadOp>() && !opInst->isa<StoreOp>()) {
+ if (!isa<LoadOp>(opInst) && !isa<StoreOp>(opInst)) {
// Neither load nor a store op.
return;
}
@@ -761,7 +761,7 @@
// Collect all load and store ops in loop nest rooted at 'forOp'.
SmallVector<Operation *, 8> loadAndStoreOpInsts;
forOp.getOperation()->walk([&](Operation *opInst) {
- if (opInst->isa<LoadOp>() || opInst->isa<StoreOp>())
+ if (isa<LoadOp>(opInst) || isa<StoreOp>(opInst))
loadAndStoreOpInsts.push_back(opInst);
});
diff --git a/mlir/lib/Analysis/VectorAnalysis.cpp b/mlir/lib/Analysis/VectorAnalysis.cpp
index 627ca7a..7c0176d 100644
--- a/mlir/lib/Analysis/VectorAnalysis.cpp
+++ b/mlir/lib/Analysis/VectorAnalysis.cpp
@@ -205,7 +205,7 @@
superVectorType = write.getVectorType();
mustDivide = true;
} else if (op.getNumResults() == 0) {
- if (!op.isa<ReturnOp>()) {
+ if (!isa<ReturnOp>(op)) {
op.emitError("NYI: assuming only return operations can have 0 "
" results at this point");
}
diff --git a/mlir/lib/LLVMIR/Transforms/ConvertToLLVMDialect.cpp b/mlir/lib/LLVMIR/Transforms/ConvertToLLVMDialect.cpp
index e9aee95..ad16143 100644
--- a/mlir/lib/LLVMIR/Transforms/ConvertToLLVMDialect.cpp
+++ b/mlir/lib/LLVMIR/Transforms/ConvertToLLVMDialect.cpp
@@ -209,7 +209,7 @@
// Match by type.
PatternMatchResult match(Operation *op) const override {
- if (op->isa<SourceOp>())
+ if (isa<SourceOp>(op))
return this->matchSuccess();
return this->matchFailure();
}
diff --git a/mlir/lib/Linalg/IR/LinalgOps.cpp b/mlir/lib/Linalg/IR/LinalgOps.cpp
index 8ea45df..da102b3 100644
--- a/mlir/lib/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Linalg/IR/LinalgOps.cpp
@@ -572,17 +572,17 @@
auto i = getAffineDimExpr(0, context);
auto j = getAffineDimExpr(1, context);
auto k = getAffineDimExpr(2, context);
- if (op->isa<DotOp>())
+ if (isa<DotOp>(op))
// A(r_i) * B(r_i) -> C()
return SmallVector<AffineMap, 4>{AffineMap::get(1, 0, {i}, {}),
AffineMap::get(1, 0, {i}, {}),
AffineMap()};
- if (op->isa<MatvecOp>())
+ if (isa<MatvecOp>(op))
// A(i, r_j) * B(r_j) -> C(i)
return SmallVector<AffineMap, 4>{AffineMap::get(2, 0, {i, j}, {}),
AffineMap::get(2, 0, {j}, {}),
AffineMap::get(2, 0, {i}, {})};
- if (op->isa<MatmulOp>())
+ if (isa<MatmulOp>(op))
// A(i, r_j) * B(r_j) -> C(i)
return SmallVector<AffineMap, 4>{AffineMap::get(3, 0, {i, k}, {}),
AffineMap::get(3, 0, {k, j}, {}),
diff --git a/mlir/lib/StandardOps/Ops.cpp b/mlir/lib/StandardOps/Ops.cpp
index 59c1400..d7b60a0 100644
--- a/mlir/lib/StandardOps/Ops.cpp
+++ b/mlir/lib/StandardOps/Ops.cpp
@@ -1232,7 +1232,7 @@
// Check that all of the uses of the AllocOp are other DeallocOps.
for (auto &use : memref->getUses())
- if (!use.getOwner()->isa<DeallocOp>())
+ if (!isa<DeallocOp>(use.getOwner()))
return matchFailure();
// Erase the dealloc operation.
diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
index 597efc3..9b1a42e 100644
--- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
@@ -187,7 +187,7 @@
// Emit calls. If the called function has a result, remap the corresponding
// value. Note that LLVM IR dialect CallOp has either 0 or 1 result.
- if (opInst.isa<LLVM::CallOp>()) {
+ if (isa<LLVM::CallOp>(opInst)) {
llvm::Value *result = convertCall(opInst);
if (opInst.getNumResults() != 0) {
valueMapping[opInst.getResult(0)] = result;
@@ -258,7 +258,7 @@
static Value *getPHISourceValue(Block *current, Block *pred,
unsigned numArguments, unsigned index) {
auto &terminator = *pred->getTerminator();
- if (terminator.isa<LLVM::BrOp>()) {
+ if (isa<LLVM::BrOp>(terminator)) {
return terminator.getOperand(index);
}
diff --git a/mlir/lib/Transforms/DmaGeneration.cpp b/mlir/lib/Transforms/DmaGeneration.cpp
index 937399c..00ae92b 100644
--- a/mlir/lib/Transforms/DmaGeneration.cpp
+++ b/mlir/lib/Transforms/DmaGeneration.cpp
@@ -479,7 +479,7 @@
// Get to the first load, store, or for op.
auto curBegin =
std::find_if(block->begin(), block->end(), [&](Operation &op) {
- return op.isa<LoadOp>() || op.isa<StoreOp>() || op.isa<AffineForOp>();
+ return isa<LoadOp>(op) || isa<StoreOp>(op) || isa<AffineForOp>(op);
});
for (auto it = curBegin; it != block->end(); ++it) {
@@ -522,7 +522,7 @@
runOnBlock(/*begin=*/it, /*end=*/std::next(it));
curBegin = std::next(it);
}
- } else if (!it->isa<LoadOp>() && !it->isa<StoreOp>()) {
+ } else if (!isa<LoadOp>(&*it) && !isa<StoreOp>(&*it)) {
runOnBlock(/*begin=*/curBegin, /*end=*/it);
curBegin = std::next(it);
}
diff --git a/mlir/lib/Transforms/LoopFusion.cpp b/mlir/lib/Transforms/LoopFusion.cpp
index d430c5d..4e9e48c 100644
--- a/mlir/lib/Transforms/LoopFusion.cpp
+++ b/mlir/lib/Transforms/LoopFusion.cpp
@@ -127,13 +127,13 @@
void collect(Operation *opToWalk) {
opToWalk->walk([&](Operation *op) {
- if (op->isa<AffineForOp>())
+ if (isa<AffineForOp>(op))
forOps.push_back(cast<AffineForOp>(op));
else if (op->getNumRegions() != 0)
hasNonForRegion = true;
- else if (op->isa<LoadOp>())
+ else if (isa<LoadOp>(op))
loadOpInsts.push_back(op);
- else if (op->isa<StoreOp>())
+ else if (isa<StoreOp>(op))
storeOpInsts.push_back(op);
});
}
@@ -141,8 +141,8 @@
// TODO(b/117228571) Replace when this is modeled through side-effects/op traits
static bool isMemRefDereferencingOp(Operation &op) {
- if (op.isa<LoadOp>() || op.isa<StoreOp>() || op.isa<DmaStartOp>() ||
- op.isa<DmaWaitOp>())
+ if (isa<LoadOp>(op) || isa<StoreOp>(op) || isa<DmaStartOp>(op) ||
+ isa<DmaWaitOp>(op))
return true;
return false;
}
@@ -604,7 +604,7 @@
continue;
assert(nodes.count(edge.id) > 0);
// Skip if 'edge.id' is not a loop nest.
- if (!getNode(edge.id)->op->isa<AffineForOp>())
+ if (!isa<AffineForOp>(getNode(edge.id)->op))
continue;
// Visit current input edge 'edge'.
callback(edge);
@@ -756,7 +756,7 @@
auto *forInst = forOp.getOperation();
auto *parentInst = forOp.getOperation()->getParentOp();
if (parentInst != nullptr) {
- assert(parentInst->isa<AffineForOp>() && "Expected parent AffineForOp");
+ assert(isa<AffineForOp>(parentInst) && "Expected parent AffineForOp");
// Add mapping to 'forOp' from its parent AffineForOp.
stats->loopMap[parentInst].push_back(forOp);
}
@@ -765,7 +765,7 @@
unsigned count = 0;
stats->opCountMap[forInst] = 0;
for (auto &op : *forOp.getBody()) {
- if (!op.isa<AffineForOp>() && !op.isa<AffineIfOp>())
+ if (!isa<AffineForOp>(op) && !isa<AffineIfOp>(op))
++count;
}
stats->opCountMap[forInst] = count;
@@ -1049,7 +1049,7 @@
// This can increase the loop depth at which we can fuse a slice, since we are
// pushing loop carried dependence to a greater depth in the loop nest.
static void sinkSequentialLoops(MemRefDependenceGraph::Node *node) {
- assert(node->op->isa<AffineForOp>());
+ assert(isa<AffineForOp>(node->op));
SmallVector<AffineForOp, 4> loops;
AffineForOp curr = cast<AffineForOp>(node->op);
getPerfectlyNestedLoops(loops, curr);
@@ -1829,7 +1829,7 @@
// Get 'dstNode' into which to attempt fusion.
auto *dstNode = mdg->getNode(dstId);
// Skip if 'dstNode' is not a loop nest.
- if (!dstNode->op->isa<AffineForOp>())
+ if (!isa<AffineForOp>(dstNode->op))
continue;
// Sink sequential loops in 'dstNode' (and thus raise parallel loops)
// while preserving relative order. This can increase the maximum loop
@@ -1867,7 +1867,7 @@
// Get 'srcNode' from which to attempt fusion into 'dstNode'.
auto *srcNode = mdg->getNode(srcId);
// Skip if 'srcNode' is not a loop nest.
- if (!srcNode->op->isa<AffineForOp>())
+ if (!isa<AffineForOp>(srcNode->op))
continue;
// Skip if 'srcNode' has more than one store to any memref.
// TODO(andydavis) Support fusing multi-output src loop nests.
@@ -2012,7 +2012,7 @@
// Get 'dstNode' into which to attempt fusion.
auto *dstNode = mdg->getNode(dstId);
// Skip if 'dstNode' is not a loop nest.
- if (!dstNode->op->isa<AffineForOp>())
+ if (!isa<AffineForOp>(dstNode->op))
continue;
// Attempt to fuse 'dstNode' with its sibling nodes in the graph.
fuseWithSiblingNodes(dstNode);
@@ -2180,7 +2180,7 @@
if (outEdge.id == dstNode->id || outEdge.value != inEdge.value)
return;
auto *sibNode = mdg->getNode(sibNodeId);
- if (!sibNode->op->isa<AffineForOp>())
+ if (!isa<AffineForOp>(sibNode->op))
return;
// Check if 'sibNode/dstNode' can be input-reuse fused on 'memref'.
if (canFuseWithSibNode(sibNode, outEdge.value)) {
diff --git a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
index 2f95db9..402f7d9 100644
--- a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
+++ b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
@@ -82,7 +82,7 @@
for (auto &op : *loopBody) {
// If the operation is loop invariant, insert it into opsToMove.
- if (!op.isa<AffineForOp>() && !op.isa<AffineTerminatorOp>() &&
+ if (!isa<AffineForOp>(op) && !isa<AffineTerminatorOp>(op) &&
loopDefinedOps.count(&op) != 1) {
LLVM_DEBUG(op.print(llvm::dbgs() << "\nLICM'ing op\n"));
opsToMove.push_back(&op);
@@ -99,7 +99,7 @@
// If the for loop body has a single operation (the terminator), erase it.
if (forOp.getBody()->getOperations().size() == 1) {
- assert(forOp.getBody()->getOperations().front().isa<AffineTerminatorOp>());
+ assert(isa<AffineTerminatorOp>(forOp.getBody()->front()));
forOp.erase();
}
}
diff --git a/mlir/lib/Transforms/LoopUnroll.cpp b/mlir/lib/Transforms/LoopUnroll.cpp
index 1707f78..0595392 100644
--- a/mlir/lib/Transforms/LoopUnroll.cpp
+++ b/mlir/lib/Transforms/LoopUnroll.cpp
@@ -111,7 +111,7 @@
for (auto ®ion : opInst->getRegions())
for (auto &block : region)
hasInnerLoops |= walkPostOrder(block.begin(), block.end());
- if (opInst->isa<AffineForOp>()) {
+ if (isa<AffineForOp>(opInst)) {
if (!hasInnerLoops)
loops.push_back(cast<AffineForOp>(opInst));
return true;
diff --git a/mlir/lib/Transforms/LoopUnrollAndJam.cpp b/mlir/lib/Transforms/LoopUnrollAndJam.cpp
index 43e8f4a..609b424 100644
--- a/mlir/lib/Transforms/LoopUnrollAndJam.cpp
+++ b/mlir/lib/Transforms/LoopUnrollAndJam.cpp
@@ -139,12 +139,12 @@
void walk(Block &block) {
for (auto it = block.begin(), e = std::prev(block.end()); it != e;) {
auto subBlockStart = it;
- while (it != e && !it->isa<AffineForOp>())
+ while (it != e && !isa<AffineForOp>(&*it))
++it;
if (it != subBlockStart)
subBlocks.push_back({subBlockStart, std::prev(it)});
// Process all for insts that appear next.
- while (it != e && it->isa<AffineForOp>())
+ while (it != e && isa<AffineForOp>(&*it))
walk(&*it++);
}
}
diff --git a/mlir/lib/Transforms/LowerAffine.cpp b/mlir/lib/Transforms/LowerAffine.cpp
index 6f0162e..7f52e85 100644
--- a/mlir/lib/Transforms/LowerAffine.cpp
+++ b/mlir/lib/Transforms/LowerAffine.cpp
@@ -612,8 +612,7 @@
// Collect all the For operations as well as AffineIfOps and AffineApplyOps.
// We do this as a prepass to avoid invalidating the walker with our rewrite.
getFunction().walk([&](Operation *op) {
- if (op->isa<AffineApplyOp>() || op->isa<AffineForOp>() ||
- op->isa<AffineIfOp>())
+ if (isa<AffineApplyOp>(op) || isa<AffineForOp>(op) || isa<AffineIfOp>(op))
instsToRewrite.push_back(op);
});
diff --git a/mlir/lib/Transforms/MaterializeVectors.cpp b/mlir/lib/Transforms/MaterializeVectors.cpp
index 206ae53b..f81fabb 100644
--- a/mlir/lib/Transforms/MaterializeVectors.cpp
+++ b/mlir/lib/Transforms/MaterializeVectors.cpp
@@ -256,7 +256,7 @@
auto it = substitutionsMap->find(v);
if (it == substitutionsMap->end()) {
auto *opInst = v->getDefiningOp();
- if (opInst->isa<ConstantOp>()) {
+ if (isa<ConstantOp>(opInst)) {
FuncBuilder b(opInst);
auto *op = instantiate(&b, opInst, hwVectorType, substitutionsMap);
auto res = substitutionsMap->insert(std::make_pair(v, op->getResult(0)));
@@ -407,9 +407,9 @@
static Operation *instantiate(FuncBuilder *b, Operation *opInst,
VectorType hwVectorType,
DenseMap<Value *, Value *> *substitutionsMap) {
- assert(!opInst->isa<VectorTransferReadOp>() &&
+ assert(!isa<VectorTransferReadOp>(opInst) &&
"Should call the function specialized for VectorTransferReadOp");
- assert(!opInst->isa<VectorTransferWriteOp>() &&
+ assert(!isa<VectorTransferWriteOp>(opInst) &&
"Should call the function specialized for VectorTransferWriteOp");
if (opInst->getNumRegions() != 0)
return nullptr;
@@ -550,7 +550,7 @@
FuncBuilder b(op);
// AffineApplyOp are ignored: instantiating the proper vector op will take
// care of AffineApplyOps by composing them properly.
- if (op->isa<AffineApplyOp>()) {
+ if (isa<AffineApplyOp>(op)) {
return false;
}
if (op->getNumRegions() != 0)
@@ -749,7 +749,7 @@
// Capture terminators; i.e. vector.transfer_write ops involving a strict
// super-vector of subVectorType.
auto filter = [subVectorType](Operation &op) {
- if (!op.isa<VectorTransferWriteOp>()) {
+ if (!isa<VectorTransferWriteOp>(op)) {
return false;
}
return matcher::operatesOnSuperVectorsOf(op, subVectorType);
diff --git a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp
index 118efe5..fcbaeab 100644
--- a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp
+++ b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp
@@ -237,15 +237,15 @@
for (auto *memref : memrefsToErase) {
// If the memref hasn't been alloc'ed in this function, skip.
Operation *defInst = memref->getDefiningOp();
- if (!defInst || !defInst->isa<AllocOp>())
+ if (!defInst || !isa<AllocOp>(defInst))
// TODO(mlir-team): if the memref was returned by a 'call' operation, we
// could still erase it if the call had no side-effects.
continue;
if (std::any_of(memref->use_begin(), memref->use_end(),
[&](OpOperand &use) {
auto *ownerInst = use.getOwner();
- return (!ownerInst->isa<StoreOp>() &&
- !ownerInst->isa<DeallocOp>());
+ return (!isa<StoreOp>(ownerInst) &&
+ !isa<DeallocOp>(ownerInst));
}))
continue;
diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp
index 272972d..0d4b201 100644
--- a/mlir/lib/Transforms/PipelineDataTransfer.cpp
+++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp
@@ -57,8 +57,8 @@
// Temporary utility: will be replaced when DmaStart/DmaFinish abstract op's are
// added. TODO(b/117228571)
static unsigned getTagMemRefPos(Operation &dmaInst) {
- assert(dmaInst.isa<DmaStartOp>() || dmaInst.isa<DmaWaitOp>());
- if (dmaInst.isa<DmaStartOp>()) {
+ assert(isa<DmaStartOp>(dmaInst) || isa<DmaWaitOp>(dmaInst));
+ if (isa<DmaStartOp>(dmaInst)) {
// Second to last operand.
return dmaInst.getNumOperands() - 2;
}
@@ -189,7 +189,7 @@
SmallVector<Operation *, 4> dmaStartInsts, dmaFinishInsts;
for (auto &op : *forOp.getBody()) {
// Collect DMA finish operations.
- if (op.isa<DmaWaitOp>()) {
+ if (isa<DmaWaitOp>(op)) {
dmaFinishInsts.push_back(&op);
continue;
}
@@ -218,7 +218,7 @@
bool escapingUses = false;
for (const auto &use : memref->getUses()) {
// We can double buffer regardless of dealloc's outside the loop.
- if (use.getOwner()->isa<DeallocOp>())
+ if (isa<DeallocOp>(use.getOwner()))
continue;
if (!forOp.getBody()->findAncestorInstInBlock(*use.getOwner())) {
LLVM_DEBUG(llvm::dbgs()
@@ -293,7 +293,7 @@
allocInst->erase();
} else if (oldMemRef->hasOneUse()) {
auto *singleUse = oldMemRef->use_begin()->getOwner();
- if (singleUse->isa<DeallocOp>()) {
+ if (isa<DeallocOp>(singleUse)) {
singleUse->erase();
oldMemRef->getDefiningOp()->erase();
}
@@ -325,7 +325,7 @@
DenseMap<Operation *, unsigned> instShiftMap;
for (auto &pair : startWaitPairs) {
auto *dmaStartInst = pair.first;
- assert(dmaStartInst->isa<DmaStartOp>());
+ assert(isa<DmaStartOp>(dmaStartInst));
instShiftMap[dmaStartInst] = 0;
// Set shifts for DMA start op's affine operand computation slices to 0.
SmallVector<AffineApplyOp, 4> sliceOps;
diff --git a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
index 7fe62a2..fbdee58 100644
--- a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
+++ b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
@@ -173,7 +173,7 @@
if (op->hasNoSideEffect() && op->use_empty()) {
// Be careful to update bookkeeping in ConstantHelper to keep
// consistency if this is a constant op.
- if (op->isa<ConstantOp>())
+ if (isa<ConstantOp>(op))
helper.notifyRemoval(op);
op->erase();
continue;
diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp
index 1ae75b4..d0d564a 100644
--- a/mlir/lib/Transforms/Utils/LoopUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp
@@ -209,7 +209,7 @@
operandMap.map(srcIV, loopChunkIV);
}
for (auto *op : insts) {
- if (!op->isa<AffineTerminatorOp>())
+ if (!isa<AffineTerminatorOp>(op))
bodyBuilder.clone(*op, operandMap);
}
};
@@ -511,7 +511,6 @@
/// deeper in the loop nest.
void mlir::sinkLoop(AffineForOp forOp, unsigned loopDepth) {
for (unsigned i = 0; i < loopDepth; ++i) {
- assert(forOp.getBody()->front().isa<AffineForOp>());
AffineForOp nextForOp = cast<AffineForOp>(forOp.getBody()->front());
interchangeLoops(forOp, nextForOp);
}
@@ -551,7 +550,7 @@
if (&op == newForOp.getOperation()) {
continue;
}
- if (op.isa<AffineTerminatorOp>()) {
+ if (isa<AffineTerminatorOp>(op)) {
continue;
}
auto *instClone = b.clone(op, map);
diff --git a/mlir/lib/Transforms/Utils/Utils.cpp b/mlir/lib/Transforms/Utils/Utils.cpp
index 1ab821a..00ee955 100644
--- a/mlir/lib/Transforms/Utils/Utils.cpp
+++ b/mlir/lib/Transforms/Utils/Utils.cpp
@@ -38,8 +38,8 @@
// Temporary utility: will be replaced when this is modeled through
// side-effects/op traits. TODO(b/117228571)
static bool isMemRefDereferencingOp(Operation &op) {
- if (op.isa<LoadOp>() || op.isa<StoreOp>() || op.isa<DmaStartOp>() ||
- op.isa<DmaWaitOp>())
+ if (isa<LoadOp>(op) || isa<StoreOp>(op) || isa<DmaStartOp>(op) ||
+ isa<DmaWaitOp>(op))
return true;
return false;
}
@@ -93,7 +93,7 @@
// Skip dealloc's - no replacement is necessary, and a replacement doesn't
// hurt dealloc's.
- if (opInst->isa<DeallocOp>())
+ if (isa<DeallocOp>(opInst))
continue;
// Check if the memref was used in a non-deferencing context. It is fine for
@@ -225,12 +225,9 @@
// Collect all operands that are results of affine apply ops.
SmallVector<Value *, 4> subOperands;
subOperands.reserve(opInst->getNumOperands());
- for (auto *operand : opInst->getOperands()) {
- auto *defInst = operand->getDefiningOp();
- if (defInst && defInst->isa<AffineApplyOp>()) {
+ for (auto *operand : opInst->getOperands())
+ if (isa_and_nonnull<AffineApplyOp>(operand->getDefiningOp()))
subOperands.push_back(operand);
- }
- }
// Gather sequence of AffineApplyOps reachable from 'subOperands'.
SmallVector<Operation *, 4> affineApplyOps;
diff --git a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
index 20138d5..7b4db1f 100644
--- a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
+++ b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
@@ -231,7 +231,7 @@
simplifyAffineMap(res).print(outs << "\nComposed map: ");
}
-static bool affineApplyOp(Operation &op) { return op.isa<AffineApplyOp>(); }
+static bool affineApplyOp(Operation &op) { return isa<AffineApplyOp>(op); }
static bool singleResultAffineApplyOpWithoutUses(Operation &op) {
auto app = dyn_cast<AffineApplyOp>(op);
diff --git a/mlir/lib/Transforms/Vectorize.cpp b/mlir/lib/Transforms/Vectorize.cpp
index 4a58b15..a5bb23f 100644
--- a/mlir/lib/Transforms/Vectorize.cpp
+++ b/mlir/lib/Transforms/Vectorize.cpp
@@ -741,14 +741,14 @@
vectorizedSet.insert(value);
vectorizationMap.insert(std::make_pair(key, value));
registerReplacement(key->getResult(0), value->getResult(0));
- if (key->isa<LoadOp>()) {
+ if (isa<LoadOp>(key)) {
assert(roots.count(key) == 0 && "root was already inserted previously");
roots.insert(key);
}
}
void VectorizationState::registerTerminal(Operation *op) {
- assert(op->isa<StoreOp>() && "terminal must be a StoreOp");
+ assert(isa<StoreOp>(op) && "terminal must be a StoreOp");
assert(terminals.count(op) == 0 &&
"terminal was already inserted previously");
terminals.insert(op);
@@ -800,7 +800,7 @@
// identity subset of AffineMap and do not change layout.
// TODO(ntv): increase the expressiveness power of vector.transfer operations
// as needed by various targets.
- if (opInst->template isa<LoadOp>()) {
+ if (isa<LoadOp>(opInst)) {
auto permutationMap =
makePermutationMap(opInst, state->strategy->loopToVectorDim);
if (!permutationMap)
@@ -1005,11 +1005,11 @@
static Operation *vectorizeOneOperation(Operation *opInst,
VectorizationState *state) {
// Sanity checks.
- assert(!opInst->isa<LoadOp>() &&
+ assert(!isa<LoadOp>(opInst) &&
"all loads must have already been fully vectorized independently");
- assert(!opInst->isa<VectorTransferReadOp>() &&
+ assert(!isa<VectorTransferReadOp>(opInst) &&
"vector.transfer_read cannot be further vectorized");
- assert(!opInst->isa<VectorTransferWriteOp>() &&
+ assert(!isa<VectorTransferWriteOp>(opInst) &&
"vector.transfer_write cannot be further vectorized");
if (auto store = dyn_cast<StoreOp>(opInst)) {
diff --git a/mlir/tools/mlir-tblgen/RewriterGen.cpp b/mlir/tools/mlir-tblgen/RewriterGen.cpp
index 9cf8507..57068e2 100644
--- a/mlir/tools/mlir-tblgen/RewriterGen.cpp
+++ b/mlir/tools/mlir-tblgen/RewriterGen.cpp
@@ -251,7 +251,7 @@
// Skip if there is no defining operation (e.g., arguments to function).
os.indent(indent) << formatv("if (!op{0}) return matchFailure();\n", depth);
os.indent(indent) << formatv(
- "if (!op{0}->isa<{1}>()) return matchFailure();\n", depth,
+ "if (!isa<{1}>(op{0})) return matchFailure();\n", depth,
op.getQualCppClassName());
}
if (tree.getNumArgs() != op.getNumArgs()) {