NFC: Rename FuncBuilder to OpBuilder and refactor to take a top level region instead of a function.
PiperOrigin-RevId: 251563898
diff --git a/mlir/bindings/python/pybind.cpp b/mlir/bindings/python/pybind.cpp
index 76cec27..6ec0860 100644
--- a/mlir/bindings/python/pybind.cpp
+++ b/mlir/bindings/python/pybind.cpp
@@ -248,7 +248,7 @@
PythonFunction enter() {
assert(function.function && "function is not set up");
auto *mlirFunc = static_cast<mlir::Function *>(function.function);
- contextBuilder.emplace(mlirFunc);
+ contextBuilder.emplace(mlirFunc->getBody());
context =
new mlir::edsc::ScopedContext(*contextBuilder, mlirFunc->getLoc());
return function;
@@ -262,7 +262,7 @@
PythonFunction function;
mlir::edsc::ScopedContext *context;
- llvm::Optional<FuncBuilder> contextBuilder;
+ llvm::Optional<OpBuilder> contextBuilder;
};
PythonFunctionContext PythonMLIRModule::makeFunctionContext(
diff --git a/mlir/examples/Linalg/Linalg1/lib/ConvertToLLVMDialect.cpp b/mlir/examples/Linalg/Linalg1/lib/ConvertToLLVMDialect.cpp
index 8cd970c..d13f7f3 100644
--- a/mlir/examples/Linalg/Linalg1/lib/ConvertToLLVMDialect.cpp
+++ b/mlir/examples/Linalg/Linalg1/lib/ConvertToLLVMDialect.cpp
@@ -121,8 +121,7 @@
// Create an array attribute containing integer attributes with values provided
// in `position`.
-static ArrayAttr makePositionAttr(FuncBuilder &builder,
- ArrayRef<int> position) {
+static ArrayAttr makePositionAttr(OpBuilder &builder, ArrayRef<int> position) {
SmallVector<Attribute, 4> attrs;
attrs.reserve(position.size());
for (auto p : position)
diff --git a/mlir/examples/Linalg/Linalg2/Example.cpp b/mlir/examples/Linalg/Linalg2/Example.cpp
index 0de8a90..10f4bf7 100644
--- a/mlir/examples/Linalg/Linalg2/Example.cpp
+++ b/mlir/examples/Linalg/Linalg2/Example.cpp
@@ -39,7 +39,7 @@
mlir::Function *f =
makeFunction(module, "linalg_ops", {indexType, indexType, indexType}, {});
- FuncBuilder builder(f);
+ OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
// clang-format off
@@ -78,7 +78,7 @@
mlir::Function *f = makeFunction(module, "linalg_ops_folded_slices",
{indexType, indexType, indexType}, {});
- FuncBuilder builder(f);
+ OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
// clang-format off
diff --git a/mlir/examples/Linalg/Linalg2/lib/Transforms.cpp b/mlir/examples/Linalg/Linalg2/lib/Transforms.cpp
index 4523830..f4d3d68 100644
--- a/mlir/examples/Linalg/Linalg2/lib/Transforms.cpp
+++ b/mlir/examples/Linalg/Linalg2/lib/Transforms.cpp
@@ -31,8 +31,8 @@
using llvm::cast;
using llvm::isa;
using llvm::SmallVector;
-using mlir::FuncBuilder;
using mlir::MemRefType;
+using mlir::OpBuilder;
using mlir::Value;
using mlir::edsc::ScopedContext;
using mlir::edsc::ValueHandle;
@@ -101,7 +101,7 @@
}
ViewOp linalg::emitAndReturnFullyComposedView(Value *v) {
- FuncBuilder builder(v->getDefiningOp());
+ OpBuilder builder(v->getDefiningOp());
ScopedContext scope(builder, v->getDefiningOp()->getLoc());
assert(v->getType().isa<ViewType>() && "must be a ViewType");
auto *memRef = getViewSupportingMemRef(v);
diff --git a/mlir/examples/Linalg/Linalg3/Conversion.cpp b/mlir/examples/Linalg/Linalg3/Conversion.cpp
index 0d7b22b..37d1b51 100644
--- a/mlir/examples/Linalg/Linalg3/Conversion.cpp
+++ b/mlir/examples/Linalg/Linalg3/Conversion.cpp
@@ -44,7 +44,7 @@
module, name,
{dynamic2DMemRefType, dynamic2DMemRefType, dynamic2DMemRefType}, {});
- FuncBuilder builder(f);
+ OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
// clang-format off
ValueHandle
diff --git a/mlir/examples/Linalg/Linalg3/Example.cpp b/mlir/examples/Linalg/Linalg3/Example.cpp
index cf77785..f02aef9 100644
--- a/mlir/examples/Linalg/Linalg3/Example.cpp
+++ b/mlir/examples/Linalg/Linalg3/Example.cpp
@@ -41,7 +41,7 @@
module, name,
{dynamic2DMemRefType, dynamic2DMemRefType, dynamic2DMemRefType}, {});
- mlir::FuncBuilder builder(f);
+ mlir::OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
// clang-format off
ValueHandle
diff --git a/mlir/examples/Linalg/Linalg3/Execution.cpp b/mlir/examples/Linalg/Linalg3/Execution.cpp
index 902ea67..00d571c 100644
--- a/mlir/examples/Linalg/Linalg3/Execution.cpp
+++ b/mlir/examples/Linalg/Linalg3/Execution.cpp
@@ -44,7 +44,7 @@
module, name,
{dynamic2DMemRefType, dynamic2DMemRefType, dynamic2DMemRefType}, {});
- mlir::FuncBuilder builder(f);
+ mlir::OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
// clang-format off
ValueHandle
diff --git a/mlir/examples/Linalg/Linalg3/lib/ConvertToLLVMDialect.cpp b/mlir/examples/Linalg/Linalg3/lib/ConvertToLLVMDialect.cpp
index 60fdf60..ef0d858 100644
--- a/mlir/examples/Linalg/Linalg3/lib/ConvertToLLVMDialect.cpp
+++ b/mlir/examples/Linalg/Linalg3/lib/ConvertToLLVMDialect.cpp
@@ -41,8 +41,7 @@
// Create an array attribute containing integer attributes with values provided
// in `position`.
-static ArrayAttr makePositionAttr(FuncBuilder &builder,
- ArrayRef<int> position) {
+static ArrayAttr makePositionAttr(Builder &builder, ArrayRef<int> position) {
SmallVector<Attribute, 4> attrs;
attrs.reserve(position.size());
for (auto p : position)
@@ -64,7 +63,7 @@
// descriptor to emit IR iteratively computing the actual offset, followed by
// a getelementptr.
Value *obtainDataPtr(Operation *op, Value *viewDescriptor,
- ArrayRef<Value *> indices, FuncBuilder &rewriter) const {
+ ArrayRef<Value *> indices, Builder &rewriter) const {
auto loadOp = cast<Op>(op);
auto elementType =
loadOp.getViewType().template cast<linalg::ViewType>().getElementType();
diff --git a/mlir/examples/Linalg/Linalg3/lib/TensorOps.cpp b/mlir/examples/Linalg/Linalg3/lib/TensorOps.cpp
index f539c70..778f2ea 100644
--- a/mlir/examples/Linalg/Linalg3/lib/TensorOps.cpp
+++ b/mlir/examples/Linalg/Linalg3/lib/TensorOps.cpp
@@ -64,7 +64,7 @@
using edsc::intrinsics::select;
// Account for affine.terminator in loop.
- FuncBuilder builder(body, std::prev(body->end(), 1));
+ OpBuilder builder(body, std::prev(body->end(), 1));
ScopedContext scope(builder, innermostLoop.getLoc());
FloatType fTy = getOperand(0)
->getType()
@@ -107,7 +107,7 @@
assert(
llvm::isa_and_nonnull<RangeOp>(indexingPosPair.first->getDefiningOp()));
// clang-format off
- FuncBuilder builder(op);
+ OpBuilder builder(op);
ScopedContext scope(builder, op->getLoc());
IndexHandle i;
using linalg::common::LoopNestRangeBuilder;
@@ -132,7 +132,7 @@
using edsc::op::operator==;
using edsc::intrinsics::select;
// Account for affine.terminator in loop.
- FuncBuilder builder(body, std::prev(body->end(), 1));
+ OpBuilder builder(body, std::prev(body->end(), 1));
ScopedContext scope(builder, innermostLoop.getLoc());
FloatType fTy = getOperand(0)
->getType()
@@ -181,7 +181,7 @@
llvm::isa_and_nonnull<RangeOp>(indexingPosPair.first->getDefiningOp()));
using linalg::common::LoopNestRangeBuilder;
// clang-format off
- FuncBuilder builder(op);
+ OpBuilder builder(op);
ScopedContext scope(builder, op->getLoc());
IndexHandle j;
LoopNestRangeBuilder(&j, ValueHandle(indexingPosPair.first))(
@@ -205,7 +205,7 @@
using edsc::op::operator==;
using edsc::intrinsics::select;
// Account for affine.terminator in loop.
- FuncBuilder builder(body, std::prev(body->end(), 1));
+ OpBuilder builder(body, std::prev(body->end(), 1));
ScopedContext scope(builder, innermostLoop.getLoc());
FloatType fTy = getOperand(0)
->getType()
diff --git a/mlir/examples/Linalg/Linalg3/lib/Transforms.cpp b/mlir/examples/Linalg/Linalg3/lib/Transforms.cpp
index 3a11c6d..5b16ce0 100644
--- a/mlir/examples/Linalg/Linalg3/lib/Transforms.cpp
+++ b/mlir/examples/Linalg/Linalg3/lib/Transforms.cpp
@@ -161,7 +161,7 @@
template <class ContractionOp>
static SmallVector<mlir::AffineForOp, 4>
writeContractionAsLoops(ContractionOp contraction) {
- FuncBuilder builder(contraction.getOperation());
+ OpBuilder builder(contraction.getOperation());
ScopedContext scope(builder, contraction.getLoc());
auto allRanges = getRanges(contraction);
auto loopRanges =
@@ -274,7 +274,7 @@
SliceOp slice = dyn_cast<SliceOp>(load.getView()->getDefiningOp());
ViewOp view = slice ? emitAndReturnFullyComposedView(slice.getResult())
: cast<ViewOp>(load.getView()->getDefiningOp());
- FuncBuilder builder(load);
+ OpBuilder builder(load);
ScopedContext scope(builder, load.getLoc());
auto *memRef = view.getSupportingMemRef();
auto operands = emitAndReturnLoadStoreOperands(load, view);
@@ -289,7 +289,7 @@
SliceOp slice = dyn_cast<SliceOp>(store.getView()->getDefiningOp());
ViewOp view = slice ? emitAndReturnFullyComposedView(slice.getResult())
: cast<ViewOp>(store.getView()->getDefiningOp());
- FuncBuilder builder(store);
+ OpBuilder builder(store);
ScopedContext scope(builder, store.getLoc());
auto *valueToStore = store.getValueToStore();
auto *memRef = view.getSupportingMemRef();
diff --git a/mlir/examples/Linalg/Linalg4/Example.cpp b/mlir/examples/Linalg/Linalg4/Example.cpp
index 73e7570..cdc05a1 100644
--- a/mlir/examples/Linalg/Linalg4/Example.cpp
+++ b/mlir/examples/Linalg/Linalg4/Example.cpp
@@ -41,7 +41,7 @@
module, name,
{dynamic2DMemRefType, dynamic2DMemRefType, dynamic2DMemRefType}, {});
- FuncBuilder builder(f);
+ OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
// clang-format off
@@ -97,7 +97,7 @@
MLIRContext context;
Module module(&context);
mlir::Function *f = makeFunctionWithAMatmulOp(module, "matmul_tiled_views");
- FuncBuilder b(f);
+ OpBuilder b(f->getBody());
lowerToTiledViews(f, {b.create<ConstantIndexOp>(f->getLoc(), 8),
b.create<ConstantIndexOp>(f->getLoc(), 9)});
composeSliceOps(f);
@@ -127,7 +127,7 @@
Module module(&context);
mlir::Function *f =
makeFunctionWithAMatmulOp(module, "matmul_tiled_views_as_loops");
- FuncBuilder b(f);
+ OpBuilder b(f->getBody());
lowerToTiledViews(f, {b.create<ConstantIndexOp>(f->getLoc(), 8),
b.create<ConstantIndexOp>(f->getLoc(), 9)});
composeSliceOps(f);
diff --git a/mlir/examples/Linalg/Linalg4/lib/Transforms.cpp b/mlir/examples/Linalg/Linalg4/lib/Transforms.cpp
index 3df6f4b..11cd6e5 100644
--- a/mlir/examples/Linalg/Linalg4/lib/Transforms.cpp
+++ b/mlir/examples/Linalg/Linalg4/lib/Transforms.cpp
@@ -148,7 +148,7 @@
contraction.getNumParallelDims() + contraction.getNumReductionDims());
auto *op = static_cast<ConcreteOp *>(&contraction);
- mlir::FuncBuilder builder(op->getOperation());
+ mlir::OpBuilder builder(op->getOperation());
ScopedContext scope(builder, op->getLoc());
SmallVector<IndexHandle, 4> ivs(tileSizes.size());
auto pivs = IndexHandle::makeIndexHandlePointers(ivs);
diff --git a/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp
index 5eb8cd0..df09cd0 100644
--- a/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp
@@ -104,7 +104,7 @@
/// convenience for emitting individual operations.
/// The builder is stateful, in particular it keeeps an "insertion point":
/// this is where the next operations will be introduced.
- std::unique_ptr<mlir::FuncBuilder> builder;
+ std::unique_ptr<mlir::OpBuilder> builder;
/// The symbol table maps a variable name to a value in the current scope.
/// Entering a function creates a new scope, and the function arguments are
@@ -174,7 +174,7 @@
// Create a builder for the function, it will be used throughout the codegen
// to create operations in this function.
- builder = llvm::make_unique<mlir::FuncBuilder>(function.get());
+ builder = llvm::make_unique<mlir::OpBuilder>(function->getBody());
// Emit the body of the function.
if (!mlirGen(*funcAST.getBody()))
diff --git a/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp
index 7c580d2..4001b30 100644
--- a/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp
@@ -105,7 +105,7 @@
/// convenience for emitting individual operations.
/// The builder is stateful, in particular it keeeps an "insertion point":
/// this is where the next operations will be introduced.
- std::unique_ptr<mlir::FuncBuilder> builder;
+ std::unique_ptr<mlir::OpBuilder> builder;
/// The symbol table maps a variable name to a value in the current scope.
/// Entering a function creates a new scope, and the function arguments are
@@ -175,7 +175,7 @@
// Create a builder for the function, it will be used throughout the codegen
// to create operations in this function.
- builder = llvm::make_unique<mlir::FuncBuilder>(function.get());
+ builder = llvm::make_unique<mlir::OpBuilder>(function->getBody());
// Emit the body of the function.
if (!mlirGen(*funcAST.getBody()))
diff --git a/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp
index e2001fb..e091cbd 100644
--- a/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp
@@ -105,7 +105,7 @@
/// convenience for emitting individual operations.
/// The builder is stateful, in particular it keeeps an "insertion point":
/// this is where the next operations will be introduced.
- std::unique_ptr<mlir::FuncBuilder> builder;
+ std::unique_ptr<mlir::OpBuilder> builder;
/// The symbol table maps a variable name to a value in the current scope.
/// Entering a function creates a new scope, and the function arguments are
@@ -175,7 +175,7 @@
// Create a builder for the function, it will be used throughout the codegen
// to create operations in this function.
- builder = llvm::make_unique<mlir::FuncBuilder>(function.get());
+ builder = llvm::make_unique<mlir::OpBuilder>(function->getBody());
// Emit the body of the function.
if (!mlirGen(*funcAST.getBody()))
diff --git a/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp b/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp
index 2c06526..440e3d8 100644
--- a/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp
+++ b/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp
@@ -315,7 +315,7 @@
// Found a specialized callee! Let's turn this into a normal call
// operation.
SmallVector<mlir::Value *, 8> operands(op->getOperands());
- mlir::FuncBuilder builder(op);
+ mlir::OpBuilder builder(op);
auto newCall =
builder.create<mlir::CallOp>(op->getLoc(), mangledCallee, operands);
if (newCall.getNumResults()) {
diff --git a/mlir/examples/toy/Ch5/mlir/EarlyLowering.cpp b/mlir/examples/toy/Ch5/mlir/EarlyLowering.cpp
index 45d608d..189add0 100644
--- a/mlir/examples/toy/Ch5/mlir/EarlyLowering.cpp
+++ b/mlir/examples/toy/Ch5/mlir/EarlyLowering.cpp
@@ -57,7 +57,7 @@
/// time both side of the cast (producer and consumer) will be lowered to a
/// dialect like LLVM and end up with the same LLVM representation, at which
/// point this becomes a no-op and is eliminated.
-Value *typeCast(FuncBuilder &builder, Value *val, Type destTy) {
+Value *typeCast(PatternRewriter &builder, Value *val, Type destTy) {
if (val->getType() == destTy)
return val;
return builder.create<toy::TypeCastOp>(val->getLoc(), val, destTy)
@@ -67,7 +67,7 @@
/// Create a type cast to turn a toy.array into a memref. The Toy Array will be
/// lowered to a memref during buffer allocation, at which point the type cast
/// becomes useless.
-Value *memRefTypeCast(FuncBuilder &builder, Value *val) {
+Value *memRefTypeCast(PatternRewriter &builder, Value *val) {
if (val->getType().isa<MemRefType>())
return val;
auto toyArrayTy = val->getType().dyn_cast<toy::ToyArrayType>();
diff --git a/mlir/examples/toy/Ch5/mlir/LateLowering.cpp b/mlir/examples/toy/Ch5/mlir/LateLowering.cpp
index d682d12..ecf6c9d 100644
--- a/mlir/examples/toy/Ch5/mlir/LateLowering.cpp
+++ b/mlir/examples/toy/Ch5/mlir/LateLowering.cpp
@@ -57,7 +57,7 @@
/// time both side of the cast (producer and consumer) will be lowered to a
/// dialect like LLVM and end up with the same LLVM representation, at which
/// point this becomes a no-op and is eliminated.
-Value *typeCast(FuncBuilder &builder, Value *val, Type destTy) {
+Value *typeCast(PatternRewriter &builder, Value *val, Type destTy) {
if (val->getType() == destTy)
return val;
return builder.create<toy::TypeCastOp>(val->getLoc(), val, destTy)
@@ -67,7 +67,7 @@
/// Create a type cast to turn a toy.array into a memref. The Toy Array will be
/// lowered to a memref during buffer allocation, at which point the type cast
/// becomes useless.
-Value *memRefTypeCast(FuncBuilder &builder, Value *val) {
+Value *memRefTypeCast(PatternRewriter &builder, Value *val) {
if (val->getType().isa<MemRefType>())
return val;
auto toyArrayTy = val->getType().dyn_cast<toy::ToyArrayType>();
@@ -183,7 +183,7 @@
private:
// Turn a string into a toy.alloc (malloc/free abstraction) and a sequence
// of stores into the buffer, and return a MemRef into the buffer.
- Value *getConstantCharBuffer(FuncBuilder &builder, Location loc,
+ Value *getConstantCharBuffer(PatternRewriter &builder, Location loc,
StringRef data) const {
auto retTy =
builder.getMemRefType(data.size() + 1, builder.getIntegerType(8));
@@ -405,7 +405,7 @@
/// operating in a brand new function: we don't have the return to hook the
/// dealloc operations.
Value *allocTensor(toy::AllocOp alloc) {
- FuncBuilder builder(alloc);
+ OpBuilder builder(alloc);
auto retTy = alloc.getResult()->getType();
auto memRefTy = retTy.dyn_cast<MemRefType>();
@@ -420,7 +420,7 @@
// Insert a `dealloc` operation right before the `return` operations, unless
// it is returned itself in which case the caller is responsible for it.
- builder.getFunction()->walk([&](Operation *op) {
+ builder.getRegion()->walk([&](Operation *op) {
auto returnOp = dyn_cast<ReturnOp>(op);
if (!returnOp)
return;
diff --git a/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp
index e2001fb..e091cbd 100644
--- a/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp
@@ -105,7 +105,7 @@
/// convenience for emitting individual operations.
/// The builder is stateful, in particular it keeeps an "insertion point":
/// this is where the next operations will be introduced.
- std::unique_ptr<mlir::FuncBuilder> builder;
+ std::unique_ptr<mlir::OpBuilder> builder;
/// The symbol table maps a variable name to a value in the current scope.
/// Entering a function creates a new scope, and the function arguments are
@@ -175,7 +175,7 @@
// Create a builder for the function, it will be used throughout the codegen
// to create operations in this function.
- builder = llvm::make_unique<mlir::FuncBuilder>(function.get());
+ builder = llvm::make_unique<mlir::OpBuilder>(function->getBody());
// Emit the body of the function.
if (!mlirGen(*funcAST.getBody()))
diff --git a/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp b/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp
index c9da85f..4294f7b 100644
--- a/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp
+++ b/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp
@@ -319,7 +319,7 @@
// Found a specialized callee! Let's turn this into a normal call
// operation.
SmallVector<mlir::Value *, 8> operands(op->getOperands());
- mlir::FuncBuilder builder(f);
+ mlir::OpBuilder builder(f->getBody());
builder.setInsertionPoint(op);
auto newCall =
builder.create<mlir::CallOp>(op->getLoc(), mangledCallee, operands);
diff --git a/mlir/g3doc/Tutorials/Linalg/LLVMConversion.md b/mlir/g3doc/Tutorials/Linalg/LLVMConversion.md
index 83a2a31..af34c9c 100644
--- a/mlir/g3doc/Tutorials/Linalg/LLVMConversion.md
+++ b/mlir/g3doc/Tutorials/Linalg/LLVMConversion.md
@@ -233,7 +233,7 @@
// needs to define as many value as the original operation, but their types
// may be different.
SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands,
- FuncBuilder &rewriter) const override;
+ OpBuilder &rewriter) const override;
}
```
@@ -296,7 +296,7 @@
```c++
SmallVector<Value *, 4> ViewOpConversion::rewrite(
Operation *op, ArrayRef<Value *> operands,
- FuncBuilder &rewriter) const override {
+ OpBuilder &rewriter) const override {
// Obtain the typed operation (we know we matched only one type).
auto viewOp = op->cast<linalg::ViewOp>();
@@ -437,7 +437,7 @@
}
// The builder into which we emit code.
- FuncBuilder &builder;
+ OpBuilder &builder;
// The actual descriptor.
Value *d;
@@ -450,7 +450,7 @@
```c++
SmallVector<Value *, 4> SliceOpConversion::rewrite(
Operation *op, ArrayRef<Value *> operands,
- FuncBuilder &rewriter) const override {
+ OpBuilder &rewriter) const override {
// Obtain the typed operation (we know we matched only one type).
auto sliceOp = op->cast<linalg::SliceOp>();
@@ -528,7 +528,7 @@
```c++
Value *obtainDataPtr(Location loc, int rank, Value *viewDescriptorVal,
- ArrayRef<Value *> indices, FuncBuilder &rewriter) {
+ ArrayRef<Value *> indices, OpBuilder &rewriter) {
// Create the context object (RAII) in which we can use declarative builders.
// Bring all the builders into the namespace.
using namespace intrinsics;
@@ -560,7 +560,7 @@
// Load Operation Conversion.
SmallVector<Value *, 4> LoadOpConversion::rewrite(
Operation *op, ArrayRef<Value *> operands,
- FuncBuilder &rewriter) const override {
+ OpBuilder &rewriter) const override {
// Obtain the typed operation (we know we matched only one type).
auto loadOp = op->cast<linalg::LoadOp>();
@@ -582,7 +582,7 @@
// Store Operation Conversion
SmallVector<Value *, 4> StoreOpConversion::rewrite(
Operation *op, ArrayRef<Value *> operands,
- FuncBuilder &rewriter) const override {
+ OpBuilder &rewriter) const override {
// Obtain the typed operation (we know we matched only one type).
auto loadOp = op->cast<linalg::StoreOp>();
diff --git a/mlir/g3doc/Tutorials/Toy/Ch-2.md b/mlir/g3doc/Tutorials/Toy/Ch-2.md
index 4a8b8dc..9b07385 100644
--- a/mlir/g3doc/Tutorials/Toy/Ch-2.md
+++ b/mlir/g3doc/Tutorials/Toy/Ch-2.md
@@ -123,7 +123,7 @@
how we create a `toy.transpose` operation:
```
-mlir::Operation *createTransposeOp(FuncBuilder *builder,
+mlir::Operation *createTransposeOp(OpBuilder *builder,
mlir::Value *input_array) {
// We bundle our custom type in a `toy` dialect.
auto toyDialect = mlir::Identifier::get("toy", builder->getContext());
diff --git a/mlir/g3doc/Tutorials/Toy/Ch-3.md b/mlir/g3doc/Tutorials/Toy/Ch-3.md
index 498438a..9ff6c40 100644
--- a/mlir/g3doc/Tutorials/Toy/Ch-3.md
+++ b/mlir/g3doc/Tutorials/Toy/Ch-3.md
@@ -202,11 +202,11 @@
bool verify();
/// Interface to the builder to allow:
- /// mlir::FuncBuilder::create<GenericCallOp>(...)
+ /// mlir::OpBuilder::create<GenericCallOp>(...)
/// This method populate the `state` that MLIR use to create operations.
/// The `toy.generic_call` operation accepts a callee name and a list of
/// arguments for the call.
- static void build(mlir::FuncBuilder *builder, mlir::OperationState *state,
+ static void build(mlir::OpBuilder *builder, mlir::OperationState *state,
llvm::StringRef callee,
llvm::ArrayRef<mlir::Value *> arguments);
diff --git a/mlir/g3doc/Tutorials/Toy/Ch-5.md b/mlir/g3doc/Tutorials/Toy/Ch-5.md
index 2461275..2681720 100644
--- a/mlir/g3doc/Tutorials/Toy/Ch-5.md
+++ b/mlir/g3doc/Tutorials/Toy/Ch-5.md
@@ -80,7 +80,7 @@
/// The results created by the new IR with the builder are returned, and their
/// number must match the number of result of `op`.
SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands,
- FuncBuilder &rewriter) const override {
+ OpBuilder &rewriter) const override {
...
// Return the newly allocated buffer, it will be used as an operand when
diff --git a/mlir/include/mlir/AffineOps/AffineOps.h b/mlir/include/mlir/AffineOps/AffineOps.h
index a3749a3..8fcd0ab 100644
--- a/mlir/include/mlir/AffineOps/AffineOps.h
+++ b/mlir/include/mlir/AffineOps/AffineOps.h
@@ -32,7 +32,7 @@
class AffineBound;
class AffineValueMap;
class FlatAffineConstraints;
-class FuncBuilder;
+class OpBuilder;
/// A utility function to check if a value is defined at the top level of a
/// function. A value defined at the top level is always a valid symbol.
@@ -143,7 +143,7 @@
/// Return a Builder set up to insert operations immediately before the
/// terminator.
- FuncBuilder getBodyBuilder();
+ OpBuilder getBodyBuilder();
/// Get the body of the AffineForOp.
Block *getBody() { return &getRegion().front(); }
@@ -361,8 +361,7 @@
/// Returns a composed AffineApplyOp by composing `map` and `operands` with
/// other AffineApplyOps supplying those operands. The operands of the resulting
/// AffineApplyOp do not change the length of AffineApplyOp chains.
-AffineApplyOp makeComposedAffineApply(FuncBuilder *b, Location loc,
- AffineMap map,
+AffineApplyOp makeComposedAffineApply(OpBuilder *b, Location loc, AffineMap map,
llvm::ArrayRef<Value *> operands);
/// Given an affine map `map` and its input `operands`, this method composes
diff --git a/mlir/include/mlir/Analysis/VectorAnalysis.h b/mlir/include/mlir/Analysis/VectorAnalysis.h
index bf070e8..1f4e50c 100644
--- a/mlir/include/mlir/Analysis/VectorAnalysis.h
+++ b/mlir/include/mlir/Analysis/VectorAnalysis.h
@@ -27,9 +27,9 @@
class AffineApplyOp;
class AffineForOp;
class AffineMap;
-class FuncBuilder;
class Location;
class MemRefType;
+class OpBuilder;
class Operation;
class Value;
class VectorType;
diff --git a/mlir/include/mlir/EDSC/Builders.h b/mlir/include/mlir/EDSC/Builders.h
index c925e0a..aa5c321 100644
--- a/mlir/include/mlir/EDSC/Builders.h
+++ b/mlir/include/mlir/EDSC/Builders.h
@@ -50,17 +50,17 @@
/// setting and restoring of insertion points.
class ScopedContext {
public:
- ScopedContext(FuncBuilder &builder, Location location);
+ ScopedContext(OpBuilder &builder, Location location);
/// Sets the insertion point of the builder to 'newInsertPt' for the duration
/// of the scope. The existing insertion point of the builder is restored on
/// destruction.
- ScopedContext(FuncBuilder &builder, FuncBuilder::InsertPoint newInsertPt,
+ ScopedContext(OpBuilder &builder, OpBuilder::InsertPoint newInsertPt,
Location location);
~ScopedContext();
static MLIRContext *getContext();
- static FuncBuilder *getBuilder();
+ static OpBuilder *getBuilder();
static Location getLocation();
private:
@@ -74,10 +74,10 @@
static ScopedContext *&getCurrentScopedContext();
- /// Top level FuncBuilder.
- FuncBuilder &builder;
+ /// Top level OpBuilder.
+ OpBuilder &builder;
/// The previous insertion point of the builder.
- llvm::Optional<FuncBuilder::InsertPoint> prevBuilderInsertPoint;
+ llvm::Optional<OpBuilder::InsertPoint> prevBuilderInsertPoint;
/// Current location.
Location location;
/// Parent context we return into.
@@ -116,20 +116,20 @@
/// Enter an mlir::Block and setup a ScopedContext to insert operations at
/// the end of it. Since we cannot use c++ language-level scoping to implement
/// scoping itself, we use enter/exit pairs of operations.
- /// As a consequence we must allocate a new FuncBuilder + ScopedContext and
+ /// As a consequence we must allocate a new OpBuilder + ScopedContext and
/// let the escape.
/// Step back "prev" times from the end of the block to set up the insertion
/// point, which is useful for non-empty blocks.
void enter(mlir::Block *block, int prev = 0) {
bodyScope = new ScopedContext(
*ScopedContext::getBuilder(),
- FuncBuilder::InsertPoint(block, std::prev(block->end(), prev)),
+ OpBuilder::InsertPoint(block, std::prev(block->end(), prev)),
ScopedContext::getLocation());
bodyScope->nestedBuilder = this;
}
/// Exit the current mlir::Block by explicitly deleting the dynamically
- /// allocated FuncBuilder and ScopedContext.
+ /// allocated OpBuilder and ScopedContext.
void exit() {
// Reclaim now to exit the scope.
bodyScope->nestedBuilder = nullptr;
diff --git a/mlir/include/mlir/IR/Block.h b/mlir/include/mlir/IR/Block.h
index 3c627b4..381a790 100644
--- a/mlir/include/mlir/IR/Block.h
+++ b/mlir/include/mlir/IR/Block.h
@@ -344,6 +344,14 @@
explicit Region(Operation *container);
~Region();
+ /// Return the context this region is inserted in. The region must have a
+ /// valid parent container.
+ MLIRContext *getContext();
+
+ /// Return a location for this region. This is the location attached to the
+ /// parent container. The region must have a valid parent container.
+ Location getLoc();
+
using RegionType = llvm::iplist<Block>;
RegionType &getBlocks() { return blocks; }
@@ -409,6 +417,13 @@
/// the operation with an offending use.
bool isIsolatedAbove(llvm::Optional<Location> noteLoc = llvm::None);
+ /// Walk the operations in this block in postorder, calling the callback for
+ /// each operation.
+ void walk(const std::function<void(Operation *)> &callback) {
+ for (auto &block : *this)
+ block.walk(callback);
+ }
+
private:
RegionType blocks;
diff --git a/mlir/include/mlir/IR/Builders.h b/mlir/include/mlir/IR/Builders.h
index b869dcd..09eaf56 100644
--- a/mlir/include/mlir/IR/Builders.h
+++ b/mlir/include/mlir/IR/Builders.h
@@ -181,40 +181,37 @@
MLIRContext *context;
};
-/// This class helps build a Function. Operations that are created are
-/// automatically inserted at an insertion point. The builder is copyable.
-class FuncBuilder : public Builder {
+/// This class helps build Operations. Operations that are created are
+/// automatically inserted at an insertion point. The builder is copyable.
+class OpBuilder : public Builder {
public:
- /// Create a function builder and set the insertion point to the start of
- /// the function.
- explicit FuncBuilder(Function *func)
- : Builder(func->getContext()), function(func) {
- if (!func->empty())
- setInsertionPoint(&func->front(), func->front().begin());
+ /// Create a builder and set the insertion point to the start of the region.
+ explicit OpBuilder(Region *region)
+ : Builder(region->getContext()), region(region) {
+ if (!region->empty())
+ setInsertionPoint(®ion->front(), region->front().begin());
else
clearInsertionPoint();
}
+ explicit OpBuilder(Region ®ion) : OpBuilder(®ion) {}
- explicit FuncBuilder(Function &func) : FuncBuilder(&func) {}
- virtual ~FuncBuilder();
+ virtual ~OpBuilder();
- /// Create a function builder and set insertion point to the given
- /// operation, which will cause subsequent insertions to go right before it.
- FuncBuilder(Operation *op) : FuncBuilder(op->getFunction()) {
+ /// Create a builder and set insertion point to the given operation, which
+ /// will cause subsequent insertions to go right before it.
+ OpBuilder(Operation *op) : OpBuilder(op->getContainingRegion()) {
setInsertionPoint(op);
}
- FuncBuilder(Block *block) : FuncBuilder(block->getFunction()) {
- setInsertionPoint(block, block->end());
- }
+ OpBuilder(Block *block) : OpBuilder(block, block->end()) {}
- FuncBuilder(Block *block, Block::iterator insertPoint)
- : FuncBuilder(block->getFunction()) {
+ OpBuilder(Block *block, Block::iterator insertPoint)
+ : OpBuilder(block->getParent()) {
setInsertionPoint(block, insertPoint);
}
- /// Return the function this builder is referring to.
- Function *getFunction() const { return function; }
+ /// Return the region this builder is referring to.
+ Region *getRegion() const { return region; }
/// This class represents a saved insertion point.
class InsertPoint {
@@ -291,7 +288,7 @@
/// Add new block and set the insertion point to the end of it. If an
/// 'insertBefore' block is passed, the block will be placed before the
/// specified block. If not, the block will be appended to the end of the
- /// current function.
+ /// current region.
Block *createBlock(Block *insertBefore = nullptr);
/// Returns the current block of the builder.
@@ -342,7 +339,7 @@
}
private:
- Function *function;
+ Region *region;
Block *block = nullptr;
Block::iterator insertPoint;
};
diff --git a/mlir/include/mlir/IR/PatternMatch.h b/mlir/include/mlir/IR/PatternMatch.h
index bbca58b..08fb490 100644
--- a/mlir/include/mlir/IR/PatternMatch.h
+++ b/mlir/include/mlir/IR/PatternMatch.h
@@ -266,7 +266,7 @@
/// to apply patterns and observe their effects (e.g. to keep worklists or
/// other data structures up to date).
///
-class PatternRewriter : public FuncBuilder {
+class PatternRewriter : public OpBuilder {
public:
/// Create operation of specific op type at the current insertion point
/// without verifying to see if it is valid.
@@ -342,7 +342,7 @@
ArrayRef<Value *> valuesToRemoveIfDead = {});
protected:
- PatternRewriter(Function *fn) : FuncBuilder(fn) {}
+ PatternRewriter(Region ®ion) : OpBuilder(region) {}
virtual ~PatternRewriter();
// These are the callback methods that subclasses can choose to implement if
diff --git a/mlir/include/mlir/Linalg/IR/LinalgOps.h b/mlir/include/mlir/Linalg/IR/LinalgOps.h
index 92f2630..9b02344 100644
--- a/mlir/include/mlir/Linalg/IR/LinalgOps.h
+++ b/mlir/include/mlir/Linalg/IR/LinalgOps.h
@@ -113,9 +113,9 @@
/// Return a Builder set up to insert operations immediately before the
/// terminator.
- FuncBuilder getBodyBuilder() {
+ OpBuilder getBodyBuilder() {
Block *body = getBody();
- return FuncBuilder(body, std::prev(body->end()));
+ return OpBuilder(body, std::prev(body->end()));
}
/// Get the body of the ForOp.
@@ -408,7 +408,7 @@
unsigned getNumInputsAndOutputs() {
return impl->getNumInputsAndOutputs(getOperation());
}
- Operation *create(FuncBuilder &builder, Location loc,
+ Operation *create(OpBuilder &builder, Location loc,
ArrayRef<Value *> operands) {
return impl->create(builder, loc, operands);
}
@@ -425,7 +425,7 @@
virtual unsigned getNumReductionLoops(Operation *op) = 0;
virtual unsigned getNumWindowLoops(Operation *op) = 0;
virtual unsigned getNumLoops(Operation *op) = 0;
- virtual Operation *create(FuncBuilder &builder, Location loc,
+ virtual Operation *create(OpBuilder &builder, Location loc,
ArrayRef<Value *> operands) = 0;
};
@@ -458,7 +458,7 @@
unsigned getNumLoops(Operation *op) override {
return cast<ConcreteOp>(op).getNumLoops();
}
- Operation *create(FuncBuilder &builder, Location loc,
+ Operation *create(OpBuilder &builder, Location loc,
ArrayRef<Value *> operands) override {
return builder.create<ConcreteOp>(loc, operands);
}
diff --git a/mlir/include/mlir/Linalg/Utils/Utils.h b/mlir/include/mlir/Linalg/Utils/Utils.h
index 31963b2..594a9d1 100644
--- a/mlir/include/mlir/Linalg/Utils/Utils.h
+++ b/mlir/include/mlir/Linalg/Utils/Utils.h
@@ -88,7 +88,7 @@
/// Returns the values obtained by applying `map` to the list of values.
/// Performs simplifications and foldings where possible.
-SmallVector<Value *, 4> applyMapToValues(FuncBuilder *b, Location loc,
+SmallVector<Value *, 4> applyMapToValues(OpBuilder *b, Location loc,
AffineMap map,
ArrayRef<Value *> values,
FunctionConstants &state);
diff --git a/mlir/include/mlir/Transforms/DialectConversion.h b/mlir/include/mlir/Transforms/DialectConversion.h
index 3886f0c..8b476c0 100644
--- a/mlir/include/mlir/Transforms/DialectConversion.h
+++ b/mlir/include/mlir/Transforms/DialectConversion.h
@@ -31,7 +31,6 @@
// Forward declarations.
class Block;
-class FuncBuilder;
class MLIRContext;
class Operation;
class Type;
diff --git a/mlir/include/mlir/Transforms/LoopUtils.h b/mlir/include/mlir/Transforms/LoopUtils.h
index 1105688..8a25522 100644
--- a/mlir/include/mlir/Transforms/LoopUtils.h
+++ b/mlir/include/mlir/Transforms/LoopUtils.h
@@ -31,7 +31,7 @@
class AffineMap;
class AffineForOp;
class Function;
-class FuncBuilder;
+class OpBuilder;
class Value;
/// Unrolls this for operation completely if the trip count is known to be
@@ -80,7 +80,7 @@
void getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
AffineMap *map,
SmallVectorImpl<Value *> *operands,
- FuncBuilder *builder);
+ OpBuilder *builder);
/// Skew the operations in the body of a 'affine.for' operation with the
/// specified operation-wise shifts. The shifts are with respect to the
diff --git a/mlir/include/mlir/Transforms/Utils.h b/mlir/include/mlir/Transforms/Utils.h
index 75407ad..1b32a98 100644
--- a/mlir/include/mlir/Transforms/Utils.h
+++ b/mlir/include/mlir/Transforms/Utils.h
@@ -34,11 +34,9 @@
class AffineApplyOp;
class AffineForOp;
-class FuncBuilder;
class Location;
class Module;
-
-class Function;
+class OpBuilder;
/// Replaces all "deferencing" uses of oldMemRef with newMemRef while optionally
/// remapping the old memref's indices using the supplied affine map,
@@ -83,7 +81,7 @@
/// these will also be collected into a single (multi-result) affine apply op.
/// The final results of the composed AffineApplyOp are returned in output
/// parameter 'results'. Returns the affine apply op created.
-Operation *createComposedAffineApplyOp(FuncBuilder *builder, Location loc,
+Operation *createComposedAffineApplyOp(OpBuilder *builder, Location loc,
ArrayRef<Value *> operands,
ArrayRef<Operation *> affineApplyOps,
SmallVectorImpl<Value *> *results);
diff --git a/mlir/lib/AffineOps/AffineOps.cpp b/mlir/lib/AffineOps/AffineOps.cpp
index 28594a3..9189acf 100644
--- a/mlir/lib/AffineOps/AffineOps.cpp
+++ b/mlir/lib/AffineOps/AffineOps.cpp
@@ -544,7 +544,7 @@
}
}
-AffineApplyOp mlir::makeComposedAffineApply(FuncBuilder *b, Location loc,
+AffineApplyOp mlir::makeComposedAffineApply(OpBuilder *b, Location loc,
AffineMap map,
ArrayRef<Value *> operands) {
AffineMap normalizedMap = map;
@@ -1069,9 +1069,9 @@
results.push_back(llvm::make_unique<AffineForLoopBoundFolder>(context));
}
-FuncBuilder AffineForOp::getBodyBuilder() {
+OpBuilder AffineForOp::getBodyBuilder() {
Block *body = getBody();
- return FuncBuilder(body, std::prev(body->end()));
+ return OpBuilder(body, std::prev(body->end()));
}
AffineBound AffineForOp::getLowerBound() {
diff --git a/mlir/lib/Analysis/LoopAnalysis.cpp b/mlir/lib/Analysis/LoopAnalysis.cpp
index 117cf6e..16e092b 100644
--- a/mlir/lib/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Analysis/LoopAnalysis.cpp
@@ -54,7 +54,7 @@
int64_t loopSpan;
int64_t step = forOp.getStep();
- FuncBuilder b(forOp.getOperation());
+ OpBuilder b(forOp.getOperation());
if (forOp.hasConstantBounds()) {
int64_t lb = forOp.getConstantLowerBound();
diff --git a/mlir/lib/Analysis/TestParallelismDetection.cpp b/mlir/lib/Analysis/TestParallelismDetection.cpp
index ae5551d..cbda6d4 100644
--- a/mlir/lib/Analysis/TestParallelismDetection.cpp
+++ b/mlir/lib/Analysis/TestParallelismDetection.cpp
@@ -44,7 +44,7 @@
// parallel.
void TestParallelismDetection::runOnFunction() {
Function &f = getFunction();
- FuncBuilder b(f);
+ OpBuilder b(f.getBody());
f.walk<AffineForOp>([&](AffineForOp forOp) {
if (isLoopParallel(forOp))
forOp.emitRemark("parallel loop");
diff --git a/mlir/lib/Analysis/Utils.cpp b/mlir/lib/Analysis/Utils.cpp
index 476c7c8..aa84236 100644
--- a/mlir/lib/Analysis/Utils.cpp
+++ b/mlir/lib/Analysis/Utils.cpp
@@ -749,7 +749,7 @@
// Clone src loop nest and insert it a the beginning of the operation block
// of the loop at 'dstLoopDepth' in 'dstLoopIVs'.
auto dstAffineForOp = dstLoopIVs[dstLoopDepth - 1];
- FuncBuilder b(dstAffineForOp.getBody(), dstAffineForOp.getBody()->begin());
+ OpBuilder b(dstAffineForOp.getBody(), dstAffineForOp.getBody()->begin());
auto sliceLoopNest =
cast<AffineForOp>(b.clone(*srcLoopIVs[0].getOperation()));
diff --git a/mlir/lib/EDSC/Builders.cpp b/mlir/lib/EDSC/Builders.cpp
index 22f91399..6f6363f 100644
--- a/mlir/lib/EDSC/Builders.cpp
+++ b/mlir/lib/EDSC/Builders.cpp
@@ -24,8 +24,7 @@
using namespace mlir;
using namespace mlir::edsc;
-mlir::edsc::ScopedContext::ScopedContext(FuncBuilder &builder,
- Location location)
+mlir::edsc::ScopedContext::ScopedContext(OpBuilder &builder, Location location)
: builder(builder), location(location),
enclosingScopedContext(ScopedContext::getCurrentScopedContext()),
nestedBuilder(nullptr) {
@@ -35,8 +34,8 @@
/// Sets the insertion point of the builder to 'newInsertPt' for the duration
/// of the scope. The existing insertion point of the builder is restored on
/// destruction.
-mlir::edsc::ScopedContext::ScopedContext(FuncBuilder &builder,
- FuncBuilder::InsertPoint newInsertPt,
+mlir::edsc::ScopedContext::ScopedContext(OpBuilder &builder,
+ OpBuilder::InsertPoint newInsertPt,
Location location)
: builder(builder), prevBuilderInsertPoint(builder.saveInsertionPoint()),
location(location),
@@ -59,7 +58,7 @@
return context;
}
-FuncBuilder *mlir::edsc::ScopedContext::getBuilder() {
+OpBuilder *mlir::edsc::ScopedContext::getBuilder() {
assert(ScopedContext::getCurrentScopedContext() &&
"Unexpected Null ScopedContext");
return &ScopedContext::getCurrentScopedContext()->builder;
diff --git a/mlir/lib/GPU/Transforms/KernelOutlining.cpp b/mlir/lib/GPU/Transforms/KernelOutlining.cpp
index 163a7cf..86fab1a 100644
--- a/mlir/lib/GPU/Transforms/KernelOutlining.cpp
+++ b/mlir/lib/GPU/Transforms/KernelOutlining.cpp
@@ -30,7 +30,7 @@
namespace {
template <typename OpTy>
-void createForAllDimensions(FuncBuilder &builder, Location loc,
+void createForAllDimensions(OpBuilder &builder, Location loc,
SmallVectorImpl<Value *> &values) {
for (StringRef dim : {"x", "y", "z"}) {
Value *v = builder.create<OpTy>(loc, builder.getIndexType(),
@@ -42,12 +42,12 @@
// Add operations generating block/thread ids and gird/block dimensions at the
// beginning of `kernelFunc` and replace uses of the respective function args.
void injectGpuIndexOperations(Location loc, Function &kernelFunc) {
- FuncBuilder funcBuilder(kernelFunc);
+ OpBuilder OpBuilder(kernelFunc.getBody());
SmallVector<Value *, 12> indexOps;
- createForAllDimensions<gpu::BlockId>(funcBuilder, loc, indexOps);
- createForAllDimensions<gpu::ThreadId>(funcBuilder, loc, indexOps);
- createForAllDimensions<gpu::GridDim>(funcBuilder, loc, indexOps);
- createForAllDimensions<gpu::BlockDim>(funcBuilder, loc, indexOps);
+ createForAllDimensions<gpu::BlockId>(OpBuilder, loc, indexOps);
+ createForAllDimensions<gpu::ThreadId>(OpBuilder, loc, indexOps);
+ createForAllDimensions<gpu::GridDim>(OpBuilder, loc, indexOps);
+ createForAllDimensions<gpu::BlockDim>(OpBuilder, loc, indexOps);
// Replace the leading 12 function args with the respective thread/block index
// operations. Iterate backwards since args are erased and indices change.
for (int i = 11; i >= 0; --i) {
@@ -78,10 +78,10 @@
// Replace `gpu.launch` operations with an `gpu.launch_func` operation launching
// `kernelFunc`.
void convertToLaunchFuncOp(gpu::LaunchOp &launchOp, Function &kernelFunc) {
- FuncBuilder funcBuilder(launchOp);
+ OpBuilder OpBuilder(launchOp);
SmallVector<Value *, 4> kernelOperandValues(
launchOp.getKernelOperandValues());
- funcBuilder.create<gpu::LaunchFuncOp>(
+ OpBuilder.create<gpu::LaunchFuncOp>(
launchOp.getLoc(), &kernelFunc, launchOp.getGridSizeOperandValues(),
launchOp.getBlockSizeOperandValues(), kernelOperandValues);
launchOp.erase();
diff --git a/mlir/lib/IR/Block.cpp b/mlir/lib/IR/Block.cpp
index cf85cc8..9595a72 100644
--- a/mlir/lib/IR/Block.cpp
+++ b/mlir/lib/IR/Block.cpp
@@ -282,6 +282,24 @@
bb.dropAllReferences();
}
+/// Return the context this region is inserted in. The region must have a valid
+/// parent container.
+MLIRContext *Region::getContext() {
+ assert(!container.isNull() && "region is not attached to a container");
+ if (auto *inst = getContainingOp())
+ return inst->getContext();
+ return getContainingFunction()->getContext();
+}
+
+/// Return a location for this region. This is the location attached to the
+/// parent container. The region must have a valid parent container.
+Location Region::getLoc() {
+ assert(!container.isNull() && "region is not attached to a container");
+ if (auto *inst = getContainingOp())
+ return inst->getLoc();
+ return getContainingFunction()->getLoc();
+}
+
Region *Region::getContainingRegion() {
if (auto *inst = getContainingOp())
return inst->getContainingRegion();
diff --git a/mlir/lib/IR/Builders.cpp b/mlir/lib/IR/Builders.cpp
index 4accfb5..d32e705 100644
--- a/mlir/lib/IR/Builders.cpp
+++ b/mlir/lib/IR/Builders.cpp
@@ -332,31 +332,31 @@
}
//===----------------------------------------------------------------------===//
-// Operations.
+// OpBuilder.
//===----------------------------------------------------------------------===//
-FuncBuilder::~FuncBuilder() {}
+OpBuilder::~OpBuilder() {}
/// Add new block and set the insertion point to the end of it. If an
/// 'insertBefore' block is passed, the block will be placed before the
/// specified block. If not, the block will be appended to the end of the
/// current function.
-Block *FuncBuilder::createBlock(Block *insertBefore) {
+Block *OpBuilder::createBlock(Block *insertBefore) {
Block *b = new Block();
// If we are supposed to insert before a specific block, do so, otherwise add
// the block to the end of the function.
if (insertBefore)
- function->getBlocks().insert(Function::iterator(insertBefore), b);
+ region->getBlocks().insert(Function::iterator(insertBefore), b);
else
- function->push_back(b);
+ region->push_back(b);
setInsertionPointToEnd(b);
return b;
}
/// Create an operation given the fields represented as an OperationState.
-Operation *FuncBuilder::createOperation(const OperationState &state) {
+Operation *OpBuilder::createOperation(const OperationState &state) {
assert(block && "createOperation() called without setting builder's block");
auto *op = Operation::create(state);
block->getOperations().insert(insertPoint, op);
diff --git a/mlir/lib/IR/Function.cpp b/mlir/lib/IR/Function.cpp
index f53c715..6ab5a6f 100644
--- a/mlir/lib/IR/Function.cpp
+++ b/mlir/lib/IR/Function.cpp
@@ -214,9 +214,7 @@
}
void Function::walk(const std::function<void(Operation *)> &callback) {
- // Walk each of the blocks within the function.
- for (auto &block : getBlocks())
- block.walk(callback);
+ getBody().walk(callback);
}
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/IR/Operation.cpp b/mlir/lib/IR/Operation.cpp
index 64fb8bc..5804770 100644
--- a/mlir/lib/IR/Operation.cpp
+++ b/mlir/lib/IR/Operation.cpp
@@ -312,8 +312,7 @@
void Operation::walk(const std::function<void(Operation *)> &callback) {
// Visit any internal operations.
for (auto ®ion : getRegions())
- for (auto &block : region)
- block.walk(callback);
+ region.walk(callback);
// Visit the current operation.
callback(this);
diff --git a/mlir/lib/LLVMIR/Transforms/ConvertToLLVMDialect.cpp b/mlir/lib/LLVMIR/Transforms/ConvertToLLVMDialect.cpp
index 0e30a8e..1b50320 100644
--- a/mlir/lib/LLVMIR/Transforms/ConvertToLLVMDialect.cpp
+++ b/mlir/lib/LLVMIR/Transforms/ConvertToLLVMDialect.cpp
@@ -889,7 +889,7 @@
position != end; ++position) {
auto *dummyBlock = new Block();
bb.getParent()->push_back(dummyBlock);
- auto builder = FuncBuilder(dummyBlock);
+ auto builder = OpBuilder(dummyBlock);
SmallVector<Value *, 8> operands(
terminator->getSuccessorOperands(*position));
builder.create<BranchOp>(terminator->getLoc(), successor.first, operands);
diff --git a/mlir/lib/Linalg/IR/LinalgOps.cpp b/mlir/lib/Linalg/IR/LinalgOps.cpp
index 55a791a..3b3a040 100644
--- a/mlir/lib/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Linalg/IR/LinalgOps.cpp
@@ -773,7 +773,7 @@
using edsc::intrinsics::select;
// account for affine.terminator in loop.
- FuncBuilder b(body, std::prev(body->end(), 1));
+ OpBuilder b(body, std::prev(body->end(), 1));
ScopedContext scope(b, innermostLoop.getLoc());
auto *op = linalgOp.getOperation();
if (isa<DotOp>(op)) {
diff --git a/mlir/lib/Linalg/Transforms/LowerToLLVMDialect.cpp b/mlir/lib/Linalg/Transforms/LowerToLLVMDialect.cpp
index 60c0daf..b3857ac 100644
--- a/mlir/lib/Linalg/Transforms/LowerToLLVMDialect.cpp
+++ b/mlir/lib/Linalg/Transforms/LowerToLLVMDialect.cpp
@@ -621,7 +621,7 @@
auto *op = forOp.getOperation();
auto loc = op->getLoc();
using namespace edsc::op;
- FuncBuilder builder(op);
+ OpBuilder builder(op);
ScopedContext scope(builder, loc);
ValueHandle lb(forOp.getLowerBound()), ub(forOp.getUpperBound()),
step(forOp.getStep());
diff --git a/mlir/lib/Linalg/Transforms/LowerToLoops.cpp b/mlir/lib/Linalg/Transforms/LowerToLoops.cpp
index b2f59c4..5e22f86 100644
--- a/mlir/lib/Linalg/Transforms/LowerToLoops.cpp
+++ b/mlir/lib/Linalg/Transforms/LowerToLoops.cpp
@@ -35,7 +35,7 @@
// Creates a number of ranges equal to the number of results in `map`.
// The returned ranges correspond to the loop ranges, in the proper order, for
// which new loops will be created.
-static SmallVector<Value *, 4> emitLoopRanges(FuncBuilder *b, Location loc,
+static SmallVector<Value *, 4> emitLoopRanges(OpBuilder *b, Location loc,
AffineMap map,
ArrayRef<Value *> allViewSizes,
FunctionConstants &state) {
@@ -51,7 +51,7 @@
}
static void emitLinalgOpAsLoops(LinalgOp &linalgOp, FunctionConstants &state) {
- FuncBuilder b(linalgOp.getOperation());
+ OpBuilder b(linalgOp.getOperation());
ScopedContext scope(b, linalgOp.getOperation()->getLoc());
auto loopRanges = emitLoopRanges(
scope.getBuilder(), scope.getLocation(),
diff --git a/mlir/lib/Linalg/Transforms/Tiling.cpp b/mlir/lib/Linalg/Transforms/Tiling.cpp
index 22090ca..bc2ed2b 100644
--- a/mlir/lib/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Linalg/Transforms/Tiling.cpp
@@ -58,7 +58,7 @@
// The returned ranges correspond to the loop ranges, in the proper order, that
// are tiled and for which new loops will be created.
static SmallVector<Value *, 4>
-makeTiledLoopRanges(FuncBuilder *b, Location loc, AffineMap map,
+makeTiledLoopRanges(OpBuilder *b, Location loc, AffineMap map,
ArrayRef<Value *> allViewSizes,
ArrayRef<Value *> allTileSizes, FunctionConstants &state) {
assert(allTileSizes.size() == map.getNumResults());
@@ -127,7 +127,7 @@
return nullptr;
}
-static SmallVector<Value *, 4> makeTiledViews(FuncBuilder *b, Location loc,
+static SmallVector<Value *, 4> makeTiledViews(OpBuilder *b, Location loc,
LinalgOp &linalgOp,
ArrayRef<Value *> ivs,
ArrayRef<Value *> tileSizes,
@@ -210,7 +210,7 @@
tileSizes.size() &&
"expected matching number of tile sizes and loops");
- FuncBuilder builder(op.getOperation());
+ OpBuilder builder(op.getOperation());
ScopedContext scope(builder, op.getLoc());
auto loopRanges = makeTiledLoopRanges(
scope.getBuilder(), scope.getLocation(),
diff --git a/mlir/lib/Linalg/Utils/Utils.cpp b/mlir/lib/Linalg/Utils/Utils.cpp
index f19e61c..81fad1c 100644
--- a/mlir/lib/Linalg/Utils/Utils.cpp
+++ b/mlir/lib/Linalg/Utils/Utils.cpp
@@ -109,7 +109,7 @@
return nullptr;
}
-static Value *emitOrFoldComposedAffineApply(FuncBuilder *b, Location loc,
+static Value *emitOrFoldComposedAffineApply(OpBuilder *b, Location loc,
AffineMap map,
ArrayRef<Value *> operandsRef,
FunctionConstants &state) {
@@ -121,7 +121,7 @@
}
SmallVector<Value *, 4>
-mlir::linalg::applyMapToValues(FuncBuilder *b, Location loc, AffineMap map,
+mlir::linalg::applyMapToValues(OpBuilder *b, Location loc, AffineMap map,
ArrayRef<Value *> values,
FunctionConstants &state) {
SmallVector<Value *, 4> res;
@@ -141,7 +141,7 @@
auto it = map.find(v);
if (it != map.end())
return it->second;
- FuncBuilder builder(f);
+ OpBuilder builder(f.getBody());
edsc::ScopedContext s(builder, f.getLoc());
return map.insert(std::make_pair(v, edsc::intrinsics::constant_index(v)))
.first->getSecond();
diff --git a/mlir/lib/Parser/Parser.cpp b/mlir/lib/Parser/Parser.cpp
index 6cc933a..a3d44f9 100644
--- a/mlir/lib/Parser/Parser.cpp
+++ b/mlir/lib/Parser/Parser.cpp
@@ -2302,11 +2302,11 @@
/// more specific builder type.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wshadow-field"
- FuncBuilder builder;
+ OpBuilder builder;
#pragma clang diagnostic pop
FunctionParser(ParserState &state, Function *function)
- : Parser(state), builder(function), function(function) {}
+ : Parser(state), builder(function->getBody()), function(function) {}
~FunctionParser();
diff --git a/mlir/lib/Quantizer/Transforms/AddDefaultStatsTestPass.cpp b/mlir/lib/Quantizer/Transforms/AddDefaultStatsTestPass.cpp
index 75c082f..375a64d8 100644
--- a/mlir/lib/Quantizer/Transforms/AddDefaultStatsTestPass.cpp
+++ b/mlir/lib/Quantizer/Transforms/AddDefaultStatsTestPass.cpp
@@ -77,7 +77,7 @@
for (auto *arg : func.getArguments()) {
if (!config.isHandledType(arg->getType()))
continue;
- FuncBuilder b(func);
+ OpBuilder b(func.getBody());
APFloat minValue(-1.0f);
APFloat maxValue(1.0f);
ElementsAttr layerStats = DenseFPElementsAttr::get(
@@ -102,7 +102,7 @@
if (!config.isHandledType(originalResult->getType()))
return;
- FuncBuilder b(op->getBlock(), ++op->getIterator());
+ OpBuilder b(op->getBlock(), ++op->getIterator());
APFloat minValue(-1.0f);
APFloat maxValue(1.0f);
diff --git a/mlir/lib/Quantizer/Transforms/InferQuantizedTypesPass.cpp b/mlir/lib/Quantizer/Transforms/InferQuantizedTypesPass.cpp
index 94bac98..c443354 100644
--- a/mlir/lib/Quantizer/Transforms/InferQuantizedTypesPass.cpp
+++ b/mlir/lib/Quantizer/Transforms/InferQuantizedTypesPass.cpp
@@ -184,7 +184,7 @@
Type newType) {
Value *inputValue = anchor->getValue();
Operation *op = anchor->getOp();
- FuncBuilder b(op->getBlock(), Block::iterator(op));
+ OpBuilder b(op->getBlock(), Block::iterator(op));
SmallVector<Value *, 1> removeValuesIfDead;
@@ -240,7 +240,7 @@
Type newType) {
Value *origResultValue = anchor->getValue();
Operation *op = origResultValue->getDefiningOp();
- FuncBuilder b(op->getBlock(), ++Block::iterator(op));
+ OpBuilder b(op->getBlock(), ++Block::iterator(op));
Value *replacedResultValue = nullptr;
Value *newResultValue = nullptr;
diff --git a/mlir/lib/Transforms/DialectConversion.cpp b/mlir/lib/Transforms/DialectConversion.cpp
index 6002cad..1deedc1 100644
--- a/mlir/lib/Transforms/DialectConversion.cpp
+++ b/mlir/lib/Transforms/DialectConversion.cpp
@@ -108,8 +108,8 @@
SmallVector<Value *, 2> newValues;
};
- DialectConversionRewriter(Function *fn)
- : PatternRewriter(fn), argConverter(fn->getContext()) {}
+ DialectConversionRewriter(Region ®ion)
+ : PatternRewriter(region), argConverter(region.getContext()) {}
~DialectConversionRewriter() = default;
/// Cleanup and destroy any generated rewrite operations. This method is
@@ -151,7 +151,7 @@
/// PatternRewriter hook for creating a new operation.
Operation *createOperation(const OperationState &state) override {
- auto *result = FuncBuilder::createOperation(state);
+ auto *result = OpBuilder::createOperation(state);
createdOps.push_back(result);
return result;
}
@@ -572,7 +572,7 @@
return success();
// Rewrite the function body.
- DialectConversionRewriter rewriter(f);
+ DialectConversionRewriter rewriter(f->getBody());
if (failed(convertRegion(rewriter, f->getBody(), f->getLoc()))) {
// Reset any of the generated rewrites.
rewriter.discardRewrites();
diff --git a/mlir/lib/Transforms/DmaGeneration.cpp b/mlir/lib/Transforms/DmaGeneration.cpp
index 1ead2e5..7c745aa 100644
--- a/mlir/lib/Transforms/DmaGeneration.cpp
+++ b/mlir/lib/Transforms/DmaGeneration.cpp
@@ -240,14 +240,14 @@
return true;
// DMAs for read regions are going to be inserted just before the for loop.
- FuncBuilder prologue(block, begin);
+ OpBuilder prologue(block, begin);
// DMAs for write regions are going to be inserted just after the for loop.
- FuncBuilder epilogue(block, end);
- FuncBuilder *b = region.isWrite() ? &epilogue : &prologue;
+ OpBuilder epilogue(block, end);
+ OpBuilder *b = region.isWrite() ? &epilogue : &prologue;
// Builder to create constants at the top level.
auto *func = block->getFunction();
- FuncBuilder top(func);
+ OpBuilder top(func->getBody());
auto loc = region.loc;
auto *memref = region.memref;
@@ -759,7 +759,7 @@
void DmaGeneration::runOnFunction() {
Function &f = getFunction();
- FuncBuilder topBuilder(f);
+ OpBuilder topBuilder(f.getBody());
zeroIndex = topBuilder.create<ConstantIndexOp>(f.getLoc(), 0);
// Override default is a command line option is provided.
diff --git a/mlir/lib/Transforms/LoopFusion.cpp b/mlir/lib/Transforms/LoopFusion.cpp
index b7b69fa..0f39e52 100644
--- a/mlir/lib/Transforms/LoopFusion.cpp
+++ b/mlir/lib/Transforms/LoopFusion.cpp
@@ -1006,9 +1006,9 @@
auto *forInst = forOp.getOperation();
// Create builder to insert alloc op just before 'forOp'.
- FuncBuilder b(forInst);
+ OpBuilder b(forInst);
// Builder to create constants at the top level.
- FuncBuilder top(forInst->getFunction());
+ OpBuilder top(forInst->getFunction()->getBody());
// Create new memref type based on slice bounds.
auto *oldMemRef = cast<StoreOp>(srcStoreOpInst).getMemRef();
auto oldMemRefType = oldMemRef->getType().cast<MemRefType>();
diff --git a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
index 3187566..c4c1184 100644
--- a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
+++ b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
@@ -203,7 +203,7 @@
SmallPtrSet<Operation *, 8> definedOps;
// This is the place where hoisted instructions would reside.
- FuncBuilder b(forOp.getOperation());
+ OpBuilder b(forOp.getOperation());
SmallPtrSet<Operation *, 8> opsToHoist;
SmallVector<Operation *, 8> opsToMove;
diff --git a/mlir/lib/Transforms/LoopTiling.cpp b/mlir/lib/Transforms/LoopTiling.cpp
index 5233081..c1be6e8 100644
--- a/mlir/lib/Transforms/LoopTiling.cpp
+++ b/mlir/lib/Transforms/LoopTiling.cpp
@@ -112,7 +112,7 @@
assert(!origLoops.empty());
assert(origLoops.size() == tileSizes.size());
- FuncBuilder b(origLoops[0].getOperation());
+ OpBuilder b(origLoops[0].getOperation());
unsigned width = origLoops.size();
// Bounds for tile space loops.
@@ -207,7 +207,7 @@
// Add intra-tile (or point) loops.
for (unsigned i = 0; i < width; i++) {
- FuncBuilder b(topLoop);
+ OpBuilder b(topLoop);
// Loop bounds will be set later.
auto pointLoop = b.create<AffineForOp>(loc, 0, 0);
pointLoop.getBody()->getOperations().splice(
@@ -221,7 +221,7 @@
// Add tile space loops;
for (unsigned i = width; i < 2 * width; i++) {
- FuncBuilder b(topLoop);
+ OpBuilder b(topLoop);
// Loop bounds will be set later.
auto tileSpaceLoop = b.create<AffineForOp>(loc, 0, 0);
tileSpaceLoop.getBody()->getOperations().splice(
diff --git a/mlir/lib/Transforms/LoopUnrollAndJam.cpp b/mlir/lib/Transforms/LoopUnrollAndJam.cpp
index 731464b..409eb39 100644
--- a/mlir/lib/Transforms/LoopUnrollAndJam.cpp
+++ b/mlir/lib/Transforms/LoopUnrollAndJam.cpp
@@ -185,8 +185,7 @@
// unrollJamFactor.
if (getLargestDivisorOfTripCount(forOp) % unrollJamFactor != 0) {
// Insert the cleanup loop right after 'forOp'.
- FuncBuilder builder(forInst->getBlock(),
- std::next(Block::iterator(forInst)));
+ OpBuilder builder(forInst->getBlock(), std::next(Block::iterator(forInst)));
auto cleanupAffineForOp = cast<AffineForOp>(builder.clone(*forInst));
// Adjust the lower bound of the cleanup loop; its upper bound is the same
// as the original loop's upper bound.
@@ -212,7 +211,7 @@
for (auto &subBlock : subBlocks) {
// Builder to insert unroll-jammed bodies. Insert right at the end of
// sub-block.
- FuncBuilder builder(subBlock.first->getBlock(), std::next(subBlock.second));
+ OpBuilder builder(subBlock.first->getBlock(), std::next(subBlock.second));
// Unroll and jam (appends unrollJamFactor-1 additional copies).
for (unsigned i = 1; i < unrollJamFactor; i++) {
diff --git a/mlir/lib/Transforms/LowerAffine.cpp b/mlir/lib/Transforms/LowerAffine.cpp
index 4dcc82f..b890b43 100644
--- a/mlir/lib/Transforms/LowerAffine.cpp
+++ b/mlir/lib/Transforms/LowerAffine.cpp
@@ -41,7 +41,7 @@
public:
// This internal class expects arguments to be non-null, checks must be
// performed at the call site.
- AffineApplyExpander(FuncBuilder *builder, ArrayRef<Value *> dimValues,
+ AffineApplyExpander(OpBuilder *builder, ArrayRef<Value *> dimValues,
ArrayRef<Value *> symbolValues, Location loc)
: builder(*builder), dimValues(dimValues), symbolValues(symbolValues),
loc(loc) {}
@@ -206,7 +206,7 @@
}
private:
- FuncBuilder &builder;
+ OpBuilder &builder;
ArrayRef<Value *> dimValues;
ArrayRef<Value *> symbolValues;
@@ -216,7 +216,7 @@
// Create a sequence of operations that implement the `expr` applied to the
// given dimension and symbol values.
-static mlir::Value *expandAffineExpr(FuncBuilder *builder, Location loc,
+static mlir::Value *expandAffineExpr(OpBuilder *builder, Location loc,
AffineExpr expr,
ArrayRef<Value *> dimValues,
ArrayRef<Value *> symbolValues) {
@@ -226,7 +226,7 @@
// Create a sequence of operations that implement the `affineMap` applied to
// the given `operands` (as it it were an AffineApplyOp).
Optional<SmallVector<Value *, 8>> static expandAffineMap(
- FuncBuilder *builder, Location loc, AffineMap affineMap,
+ OpBuilder *builder, Location loc, AffineMap affineMap,
ArrayRef<Value *> operands) {
auto numDims = affineMap.getNumDims();
auto expanded = functional::map(
@@ -260,7 +260,7 @@
// recognize as a reduction by the subsequent passes.
static Value *buildMinMaxReductionSeq(Location loc, CmpIPredicate predicate,
ArrayRef<Value *> values,
- FuncBuilder &builder) {
+ OpBuilder &builder) {
assert(!llvm::empty(values) && "empty min/max chain");
auto valueIt = values.begin();
@@ -348,7 +348,7 @@
// Append the induction variable stepping logic and branch back to the exit
// condition block. Construct an affine expression f : (x -> x+step) and
// apply this expression to the induction variable.
- FuncBuilder builder(bodyBlock);
+ OpBuilder builder(bodyBlock);
auto affStep = builder.getAffineConstantExpr(forOp.getStep());
auto affDim = builder.getAffineDimExpr(0);
auto stepped = expandAffineExpr(&builder, loc, affDim + affStep, iv, {});
@@ -482,7 +482,7 @@
std::prev(oldThen->end()));
}
- FuncBuilder builder(thenBlock);
+ OpBuilder builder(thenBlock);
builder.create<BranchOp>(loc, continueBlock);
// Handle the 'else' block the same way, but we skip it if we have no else
@@ -569,7 +569,7 @@
// Convert an "affine.apply" operation into a sequence of arithmetic
// operations using the StandardOps dialect. Return true on error.
static LogicalResult lowerAffineApply(AffineApplyOp op) {
- FuncBuilder builder(op.getOperation());
+ OpBuilder builder(op.getOperation());
auto maybeExpandedMap =
expandAffineMap(&builder, op.getLoc(), op.getAffineMap(),
llvm::to_vector<8>(op.getOperands()));
diff --git a/mlir/lib/Transforms/MaterializeVectors.cpp b/mlir/lib/Transforms/MaterializeVectors.cpp
index 80e080f..0d8cfea 100644
--- a/mlir/lib/Transforms/MaterializeVectors.cpp
+++ b/mlir/lib/Transforms/MaterializeVectors.cpp
@@ -238,7 +238,7 @@
return res;
}
-static Operation *instantiate(FuncBuilder *b, Operation *opInst,
+static Operation *instantiate(OpBuilder *b, Operation *opInst,
VectorType hwVectorType,
DenseMap<Value *, Value *> *substitutionsMap);
@@ -257,7 +257,7 @@
if (it == substitutionsMap->end()) {
auto *opInst = v->getDefiningOp();
if (isa<ConstantOp>(opInst)) {
- FuncBuilder b(opInst);
+ OpBuilder b(opInst);
auto *op = instantiate(&b, opInst, hwVectorType, substitutionsMap);
auto res = substitutionsMap->insert(std::make_pair(v, op->getResult(0)));
assert(res.second && "Insertion failed");
@@ -331,7 +331,7 @@
/// TODO(ntv): these implementation details should be captured in a
/// vectorization trait at the op level directly.
static SmallVector<mlir::Value *, 8>
-reindexAffineIndices(FuncBuilder *b, VectorType hwVectorType,
+reindexAffineIndices(OpBuilder *b, VectorType hwVectorType,
ArrayRef<unsigned> hwVectorInstance,
ArrayRef<Value *> memrefIndices) {
auto vectorShape = hwVectorType.getShape();
@@ -404,7 +404,7 @@
/// substitutionsMap.
///
/// If the underlying substitution fails, this fails too and returns nullptr.
-static Operation *instantiate(FuncBuilder *b, Operation *opInst,
+static Operation *instantiate(OpBuilder *b, Operation *opInst,
VectorType hwVectorType,
DenseMap<Value *, Value *> *substitutionsMap) {
assert(!isa<VectorTransferReadOp>(opInst) &&
@@ -481,7 +481,7 @@
/// `hwVectorType` int the covering of the super-vector type. For a more
/// detailed description of the problem, see the description of
/// reindexAffineIndices.
-static Operation *instantiate(FuncBuilder *b, VectorTransferReadOp read,
+static Operation *instantiate(OpBuilder *b, VectorTransferReadOp read,
VectorType hwVectorType,
ArrayRef<unsigned> hwVectorInstance,
DenseMap<Value *, Value *> *substitutionsMap) {
@@ -505,7 +505,7 @@
/// `hwVectorType` int the covering of th3e super-vector type. For a more
/// detailed description of the problem, see the description of
/// reindexAffineIndices.
-static Operation *instantiate(FuncBuilder *b, VectorTransferWriteOp write,
+static Operation *instantiate(OpBuilder *b, VectorTransferWriteOp write,
VectorType hwVectorType,
ArrayRef<unsigned> hwVectorInstance,
DenseMap<Value *, Value *> *substitutionsMap) {
@@ -547,7 +547,7 @@
LLVM_DEBUG(dbgs() << "\ninstantiate: " << *op);
// Create a builder here for unroll-and-jam effects.
- FuncBuilder b(op);
+ OpBuilder b(op);
// AffineApplyOp are ignored: instantiating the proper vector op will take
// care of AffineApplyOps by composing them properly.
if (isa<AffineApplyOp>(op)) {
diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp
index de8038c..d0e0d18 100644
--- a/mlir/lib/Transforms/PipelineDataTransfer.cpp
+++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp
@@ -73,7 +73,7 @@
/// modulo 2. Returns false if such a replacement cannot be performed.
static bool doubleBuffer(Value *oldMemRef, AffineForOp forOp) {
auto *forBody = forOp.getBody();
- FuncBuilder bInner(forBody, forBody->begin());
+ OpBuilder bInner(forBody, forBody->begin());
bInner.setInsertionPoint(forBody, forBody->begin());
// Doubles the shape with a leading dimension extent of 2.
@@ -94,7 +94,7 @@
// The double buffer is allocated right before 'forInst'.
auto *forInst = forOp.getOperation();
- FuncBuilder bOuter(forInst);
+ OpBuilder bOuter(forInst);
// Put together alloc operands for any dynamic dimensions of the memref.
SmallVector<Value *, 4> allocOperands;
unsigned dynamicDimCount = 0;
@@ -360,7 +360,7 @@
// Tagging operations with shifts for debugging purposes.
LLVM_DEBUG({
- FuncBuilder b(&op);
+ OpBuilder b(&op);
op.setAttr("shift", b.getI64IntegerAttr(shifts[s - 1]));
});
}
diff --git a/mlir/lib/Transforms/Utils/FoldUtils.cpp b/mlir/lib/Transforms/Utils/FoldUtils.cpp
index fbf1a2a..3983dda 100644
--- a/mlir/lib/Transforms/Utils/FoldUtils.cpp
+++ b/mlir/lib/Transforms/Utils/FoldUtils.cpp
@@ -110,7 +110,7 @@
assert(foldResults.size() == op->getNumResults());
// Create the result constants and replace the results.
- FuncBuilder builder(op);
+ OpBuilder builder(op);
for (unsigned i = 0, e = op->getNumResults(); i != e; ++i) {
assert(!foldResults[i].isNull() && "expected valid OpFoldResult");
diff --git a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
index a2e6427..0cd3225 100644
--- a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
+++ b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
@@ -46,7 +46,7 @@
public:
explicit GreedyPatternRewriteDriver(Function &fn,
OwningRewritePatternList &&patterns)
- : PatternRewriter(&fn), matcher(std::move(patterns)) {
+ : PatternRewriter(fn.getBody()), matcher(std::move(patterns)) {
worklist.reserve(64);
}
@@ -88,7 +88,7 @@
// Implement the hook for creating operations, and make sure that newly
// created ops are added to the worklist for processing.
Operation *createOperation(const OperationState &state) override {
- auto *result = FuncBuilder::createOperation(state);
+ auto *result = OpBuilder::createOperation(state);
addToWorklist(result);
return result;
}
@@ -142,14 +142,16 @@
/// Perform the rewrites.
bool GreedyPatternRewriteDriver::simplifyFunction(int maxIterations) {
- Function *fn = getFunction();
- OperationFolder helper(fn);
+ Region *region = getRegion();
+
+ // TODO(riverriddle) OperationFolder should take a region to insert into.
+ OperationFolder helper(region->getContainingFunction());
bool changed = false;
int i = 0;
do {
// Add all operations to the worklist.
- fn->walk([&](Operation *op) { addToWorklist(op); });
+ region->walk([&](Operation *op) { addToWorklist(op); });
// These are scratch vectors used in the folding loop below.
SmallVector<Value *, 8> originalOperands, resultValues;
diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp
index d5bdcea..23375e7 100644
--- a/mlir/lib/Transforms/Utils/LoopUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp
@@ -46,7 +46,7 @@
void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
AffineMap *map,
SmallVectorImpl<Value *> *operands,
- FuncBuilder *b) {
+ OpBuilder *b) {
auto lbMap = forOp.getLowerBoundMap();
// Single result lower bound map only.
@@ -125,15 +125,14 @@
Operation *op = forOp.getOperation();
if (!iv->use_empty()) {
if (forOp.hasConstantLowerBound()) {
- auto *mlFunc = op->getFunction();
- FuncBuilder topBuilder(mlFunc);
+ OpBuilder topBuilder(op->getFunction()->getBody());
auto constOp = topBuilder.create<ConstantIndexOp>(
forOp.getLoc(), forOp.getConstantLowerBound());
iv->replaceAllUsesWith(constOp);
} else {
AffineBound lb = forOp.getLowerBound();
SmallVector<Value *, 4> lbOperands(lb.operand_begin(), lb.operand_end());
- FuncBuilder builder(op->getBlock(), Block::iterator(op));
+ OpBuilder builder(op->getBlock(), Block::iterator(op));
if (lb.getMap() == builder.getDimIdentityMap()) {
// No need of generating an affine.apply.
iv->replaceAllUsesWith(lbOperands[0]);
@@ -173,7 +172,7 @@
generateLoop(AffineMap lbMap, AffineMap ubMap,
const std::vector<std::pair<uint64_t, ArrayRef<Operation *>>>
&instGroupQueue,
- unsigned offset, AffineForOp srcForInst, FuncBuilder *b) {
+ unsigned offset, AffineForOp srcForInst, OpBuilder *b) {
SmallVector<Value *, 4> lbOperands(srcForInst.getLowerBoundOperands());
SmallVector<Value *, 4> ubOperands(srcForInst.getUpperBoundOperands());
@@ -188,7 +187,7 @@
BlockAndValueMapping operandMap;
- FuncBuilder bodyBuilder = loopChunk.getBodyBuilder();
+ OpBuilder bodyBuilder = loopChunk.getBodyBuilder();
for (auto it = instGroupQueue.begin() + offset, e = instGroupQueue.end();
it != e; ++it) {
uint64_t shift = it->first;
@@ -291,7 +290,7 @@
auto origLbMap = forOp.getLowerBoundMap();
uint64_t lbShift = 0;
- FuncBuilder b(forOp.getOperation());
+ OpBuilder b(forOp.getOperation());
for (uint64_t d = 0, e = sortedInstGroups.size(); d < e; ++d) {
// If nothing is shifted by d, continue.
if (sortedInstGroups[d].empty())
@@ -424,7 +423,7 @@
// Generate the cleanup loop if trip count isn't a multiple of unrollFactor.
Operation *op = forOp.getOperation();
if (getLargestDivisorOfTripCount(forOp) % unrollFactor != 0) {
- FuncBuilder builder(op->getBlock(), ++Block::iterator(op));
+ OpBuilder builder(op->getBlock(), ++Block::iterator(op));
auto cleanupForInst = cast<AffineForOp>(builder.clone(*op));
AffineMap cleanupMap;
SmallVector<Value *, 4> cleanupOperands;
@@ -448,7 +447,7 @@
// Builder to insert unrolled bodies just before the terminator of the body of
// 'forOp'.
- FuncBuilder builder = forOp.getBodyBuilder();
+ OpBuilder builder = forOp.getBodyBuilder();
// Keep a pointer to the last non-terminator operation in the original block
// so that we know what to clone (since we are doing this in-place).
@@ -647,7 +646,7 @@
// ...
// }
// ```
-static void augmentMapAndBounds(FuncBuilder *b, Value *iv, AffineMap *map,
+static void augmentMapAndBounds(OpBuilder *b, Value *iv, AffineMap *map,
SmallVector<Value *, 4> *operands,
int64_t offset = 0) {
auto bounds = llvm::to_vector<4>(map->getResults());
@@ -665,7 +664,7 @@
AffineForOp newForOp) {
BlockAndValueMapping map;
map.map(oldIv, newForOp.getInductionVar());
- FuncBuilder b = newForOp.getBodyBuilder();
+ OpBuilder b = newForOp.getBodyBuilder();
for (auto &op : *forOp.getBody()) {
// Step over newForOp in case it is nested under forOp.
if (&op == newForOp.getOperation()) {
@@ -704,7 +703,7 @@
forOp.setStep(scaledStep);
auto *op = forOp.getOperation();
- FuncBuilder b(op->getBlock(), ++Block::iterator(op));
+ OpBuilder b(op->getBlock(), ++Block::iterator(op));
// Lower-bound map creation.
auto lbMap = forOp.getLowerBoundMap();
@@ -720,7 +719,7 @@
SmallVector<AffineForOp, 8> innerLoops;
for (auto t : targets) {
// Insert newForOp before the terminator of `t`.
- FuncBuilder b = t.getBodyBuilder();
+ OpBuilder b = t.getBodyBuilder();
auto newForOp = b.create<AffineForOp>(t.getLoc(), lbOperands, lbMap,
ubOperands, ubMap, originalStep);
cloneLoopBodyInto(t, forOp.getInductionVar(), newForOp);
diff --git a/mlir/lib/Transforms/Utils/Utils.cpp b/mlir/lib/Transforms/Utils/Utils.cpp
index 13e5b2f..2e2bc08 100644
--- a/mlir/lib/Transforms/Utils/Utils.cpp
+++ b/mlir/lib/Transforms/Utils/Utils.cpp
@@ -123,7 +123,7 @@
opInst->operand_begin() + memRefOperandPos);
state.operands.push_back(newMemRef);
- FuncBuilder builder(opInst);
+ OpBuilder builder(opInst);
for (auto *extraIndex : extraIndices) {
assert(extraIndex->getDefiningOp()->getNumResults() == 1 &&
"single result op's expected to generate these indices");
@@ -249,7 +249,7 @@
if (localized)
return;
- FuncBuilder builder(opInst);
+ OpBuilder builder(opInst);
SmallVector<Value *, 4> composedOpOperands(subOperands);
auto composedMap = builder.getMultiDimIdentityMap(composedOpOperands.size());
fullyComposeAffineMapAndOperands(&composedMap, &composedOpOperands);
diff --git a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
index aeaea02..9220b7b 100644
--- a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
+++ b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
@@ -240,7 +240,7 @@
pattern.match(f, &matches);
for (auto m : matches) {
auto app = cast<AffineApplyOp>(m.getMatchedOperation());
- FuncBuilder b(m.getMatchedOperation());
+ OpBuilder b(m.getMatchedOperation());
SmallVector<Value *, 8> operands(app.getOperands());
makeComposedAffineApply(&b, app.getLoc(), app.getAffineMap(), operands);
}
diff --git a/mlir/lib/Transforms/Vectorize.cpp b/mlir/lib/Transforms/Vectorize.cpp
index ddaf112..a96713b 100644
--- a/mlir/lib/Transforms/Vectorize.cpp
+++ b/mlir/lib/Transforms/Vectorize.cpp
@@ -805,7 +805,7 @@
return LogicalResult::Failure;
LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
LLVM_DEBUG(permutationMap.print(dbgs()));
- FuncBuilder b(opInst);
+ OpBuilder b(opInst);
auto transfer = b.create<VectorTransferReadOp>(
opInst->getLoc(), vectorType, memoryOp.getMemRef(),
map(makePtrDynCaster<Value>(), memoryOp.getIndices()), permutationMap);
@@ -920,7 +920,7 @@
!VectorType::isValidElementType(constant.getType())) {
return nullptr;
}
- FuncBuilder b(op);
+ OpBuilder b(op);
Location loc = op->getLoc();
auto vectorType = type.cast<VectorType>();
auto attr = SplatElementsAttr::get(vectorType, constant.getValue());
@@ -1015,7 +1015,7 @@
auto *value = store.getValueToStore();
auto *vectorValue = vectorizeOperand(value, opInst, state);
auto indices = map(makePtrDynCaster<Value>(), store.getIndices());
- FuncBuilder b(opInst);
+ OpBuilder b(opInst);
auto permutationMap =
makePermutationMap(opInst, state->strategy->loopToVectorDim);
if (!permutationMap)
@@ -1054,7 +1054,7 @@
// name that works both in scalar mode and vector mode.
// TODO(ntv): Is it worth considering an Operation.clone operation which
// changes the type so we can promote an Operation with less boilerplate?
- FuncBuilder b(opInst);
+ OpBuilder b(opInst);
OperationState newOp(b.getContext(), opInst->getLoc(),
opInst->getName().getStringRef(), vectorOperands,
vectorTypes, opInst->getAttrs(), /*successors=*/{},
@@ -1136,7 +1136,7 @@
/// maintains a clone for handling failure and restores the proper state via
/// RAII.
auto *loopInst = loop.getOperation();
- FuncBuilder builder(loopInst);
+ OpBuilder builder(loopInst);
auto clonedLoop = cast<AffineForOp>(builder.clone(*loopInst));
struct Guard {
LogicalResult failure() {
diff --git a/mlir/test/EDSC/builder-api-test.cpp b/mlir/test/EDSC/builder-api-test.cpp
index 55c4574..6018abe 100644
--- a/mlir/test/EDSC/builder-api-test.cpp
+++ b/mlir/test/EDSC/builder-api-test.cpp
@@ -62,7 +62,7 @@
auto f =
makeFunction("builder_dynamic_for_func_args", {}, {indexType, indexType});
- FuncBuilder builder(*f);
+ OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
ValueHandle i(indexType), j(indexType), lb(f->getArgument(0)),
ub(f->getArgument(1));
@@ -113,7 +113,7 @@
auto f = makeFunction("builder_dynamic_for", {},
{indexType, indexType, indexType, indexType});
- FuncBuilder builder(*f);
+ OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
ValueHandle i(indexType), a(f->getArgument(0)), b(f->getArgument(1)),
c(f->getArgument(2)), d(f->getArgument(3));
@@ -136,7 +136,7 @@
auto f = makeFunction("builder_max_min_for", {},
{indexType, indexType, indexType, indexType});
- FuncBuilder builder(*f);
+ OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
ValueHandle i(indexType), lb1(f->getArgument(0)), lb2(f->getArgument(1)),
ub1(f->getArgument(2)), ub2(f->getArgument(3));
@@ -157,7 +157,7 @@
using namespace edsc::op;
auto f = makeFunction("builder_blocks");
- FuncBuilder builder(*f);
+ OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
ValueHandle c1(ValueHandle::create<ConstantIntOp>(42, 32)),
c2(ValueHandle::create<ConstantIntOp>(1234, 32));
@@ -201,7 +201,7 @@
using namespace edsc::op;
auto f = makeFunction("builder_blocks_eager");
- FuncBuilder builder(*f);
+ OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
ValueHandle c1(ValueHandle::create<ConstantIntOp>(42, 32)),
c2(ValueHandle::create<ConstantIntOp>(1234, 32));
@@ -244,7 +244,7 @@
auto f = makeFunction("builder_cond_branch", {},
{IntegerType::get(1, &globalContext())});
- FuncBuilder builder(*f);
+ OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
ValueHandle funcArg(f->getArgument(0));
ValueHandle c32(ValueHandle::create<ConstantIntOp>(32, 32)),
@@ -281,7 +281,7 @@
auto f = makeFunction("builder_cond_branch_eager", {},
{IntegerType::get(1, &globalContext())});
- FuncBuilder builder(*f);
+ OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
ValueHandle funcArg(f->getArgument(0));
ValueHandle c32(ValueHandle::create<ConstantIntOp>(32, 32)),
@@ -321,7 +321,7 @@
auto f =
makeFunction("builder_helpers", {}, {memrefType, memrefType, memrefType});
- FuncBuilder builder(*f);
+ OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
// clang-format off
ValueHandle f7(
@@ -373,7 +373,7 @@
auto indexType = IndexType::get(&globalContext());
auto f = makeFunction("custom_ops", {}, {indexType, indexType});
- FuncBuilder builder(*f);
+ OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
CustomOperation<ValueHandle> MY_CUSTOM_OP("my_custom_op");
CustomOperation<OperationHandle> MY_CUSTOM_OP_0("my_custom_op_0");
@@ -412,7 +412,7 @@
auto indexType = IndexType::get(&globalContext());
auto f = makeFunction("insertion_in_block", {}, {indexType, indexType});
- FuncBuilder builder(*f);
+ OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
BlockHandle b1;
// clang-format off
@@ -438,7 +438,7 @@
auto memrefType = MemRefType::get({-1, -1, -1}, f32Type, {}, 0);
auto f = makeFunction("select_op", {}, {memrefType});
- FuncBuilder builder(*f);
+ OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
// clang-format off
ValueHandle zero = constant_index(0), one = constant_index(1);
@@ -474,7 +474,7 @@
MemRefType::get({-1, -1, -1}, FloatType::getF32(&globalContext()), {}, 0);
auto f = makeFunction("tile_2d", {}, {memrefType, memrefType, memrefType});
- FuncBuilder builder(*f);
+ OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
ValueHandle zero = constant_index(0);
MemRefView vA(f->getArgument(0)), vB(f->getArgument(1)),
@@ -548,7 +548,7 @@
mlir::Module module(&globalContext());
module.getFunctions().push_back(f);
- FuncBuilder builder(f);
+ OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
ValueHandle zero = constant_index(0);
MemRefView vA(f->getArgument(0)), vB(f->getArgument(1)),