Globally change load/store/dma_start/dma_wait operations over to affine.load/store/dma_start/dma_wait.
In most places, this is just a name change (with the exception of affine.dma_start swapping the operand positions of its tag memref and num_elements operands).
Significant code changes occur here:
*) Vectorization: LoopAnalysis.cpp, Vectorize.cpp
*) Affine Transforms: Transforms/Utils/Utils.cpp

PiperOrigin-RevId: 256395088
diff --git a/mlir/include/mlir/AffineOps/AffineOps.h b/mlir/include/mlir/AffineOps/AffineOps.h
index 91b0d0e..b8bf368 100644
--- a/mlir/include/mlir/AffineOps/AffineOps.h
+++ b/mlir/include/mlir/AffineOps/AffineOps.h
@@ -284,6 +284,8 @@
   static ParseResult parse(OpAsmParser *parser, OperationState *result);
   void print(OpAsmPrinter *p);
   LogicalResult verify();
+  static void getCanonicalizationPatterns(OwningRewritePatternList &results,
+                                          MLIRContext *context);
 
   /// Returns true if this DMA operation is strided, returns false otherwise.
   bool isStrided() {
@@ -367,6 +369,8 @@
   static ParseResult parse(OpAsmParser *parser, OperationState *result);
   void print(OpAsmPrinter *p);
   LogicalResult verify();
+  static void getCanonicalizationPatterns(OwningRewritePatternList &results,
+                                          MLIRContext *context);
 };
 
 /// The "affine.for" operation represents an affine loop nest, defining an SSA
@@ -649,10 +653,16 @@
   /// Builds an affine load op with the specified map and operands.
   static void build(Builder *builder, OperationState *result, AffineMap map,
                     ArrayRef<Value *> operands);
+  /// Builds an affine load op an identify map and operands.
+  static void build(Builder *builder, OperationState *result, Value *memref,
+                    ArrayRef<Value *> indices = {});
+
+  /// Returns the operand index of the memref.
+  unsigned getMemRefOperandIndex() { return 0; }
 
   /// Get memref operand.
-  Value *getMemRef() { return getOperand(0); }
-  void setMemRef(Value *value) { setOperand(0, value); }
+  Value *getMemRef() { return getOperand(getMemRefOperandIndex()); }
+  void setMemRef(Value *value) { setOperand(getMemRefOperandIndex(), value); }
   MemRefType getMemRefType() {
     return getMemRef()->getType().cast<MemRefType>();
   }
@@ -680,6 +690,8 @@
   static ParseResult parse(OpAsmParser *parser, OperationState *result);
   void print(OpAsmPrinter *p);
   LogicalResult verify();
+  static void getCanonicalizationPatterns(OwningRewritePatternList &results,
+                                          MLIRContext *context);
 };
 
 /// The "affine.store" op writes an element to a memref, where the index
@@ -707,13 +719,20 @@
   static void build(Builder *builder, OperationState *result,
                     Value *valueToStore, AffineMap map,
                     ArrayRef<Value *> operands);
+  /// Builds an affine store operation with an identity map and operands.
+  static void build(Builder *builder, OperationState *result,
+                    Value *valueToStore, Value *memref,
+                    ArrayRef<Value *> operands);
 
   /// Get value to be stored by store operation.
   Value *getValueToStore() { return getOperand(0); }
 
+  /// Returns the operand index of the memref.
+  unsigned getMemRefOperandIndex() { return 1; }
+
   /// Get memref operand.
-  Value *getMemRef() { return getOperand(1); }
-  void setMemRef(Value *value) { setOperand(1, value); }
+  Value *getMemRef() { return getOperand(getMemRefOperandIndex()); }
+  void setMemRef(Value *value) { setOperand(getMemRefOperandIndex(), value); }
 
   MemRefType getMemRefType() {
     return getMemRef()->getType().cast<MemRefType>();
@@ -742,6 +761,8 @@
   static ParseResult parse(OpAsmParser *parser, OperationState *result);
   void print(OpAsmPrinter *p);
   LogicalResult verify();
+  static void getCanonicalizationPatterns(OwningRewritePatternList &results,
+                                          MLIRContext *context);
 };
 
 /// Returns true if the given Value can be used as a dimension id.
diff --git a/mlir/include/mlir/Analysis/Utils.h b/mlir/include/mlir/Analysis/Utils.h
index 5c1f47a..b012cc1 100644
--- a/mlir/include/mlir/Analysis/Utils.h
+++ b/mlir/include/mlir/Analysis/Utils.h
@@ -103,29 +103,29 @@
 //  Backward slice example:
 //
 //    affine.for %i0 = 0 to 10 {
-//      store %cst, %0[%i0] : memref<100xf32>  // 'depSourceAccess'
+//      affine.store %cst, %0[%i0] : memref<100xf32>  // 'depSourceAccess'
 //    }
 //    affine.for %i1 = 0 to 10 {
-//      %v = load %0[%i1] : memref<100xf32>    // 'depSinkAccess'
+//      %v = affine.load %0[%i1] : memref<100xf32>    // 'depSinkAccess'
 //    }
 //
 //    // Backward computation slice of loop nest '%i0'.
 //    affine.for %i0 = (d0) -> (d0)(%i1) to (d0) -> (d0 + 1)(%i1) {
-//      store %cst, %0[%i0] : memref<100xf32>  // 'depSourceAccess'
+//      affine.store %cst, %0[%i0] : memref<100xf32>  // 'depSourceAccess'
 //    }
 //
 //  Forward slice example:
 //
 //    affine.for %i0 = 0 to 10 {
-//      store %cst, %0[%i0] : memref<100xf32>  // 'depSourceAccess'
+//      affine.store %cst, %0[%i0] : memref<100xf32>  // 'depSourceAccess'
 //    }
 //    affine.for %i1 = 0 to 10 {
-//      %v = load %0[%i1] : memref<100xf32>    // 'depSinkAccess'
+//      %v = affine.load %0[%i1] : memref<100xf32>    // 'depSinkAccess'
 //    }
 //
 //    // Forward computation slice of loop nest '%i1'.
 //    affine.for %i1 = (d0) -> (d0)(%i0) to (d0) -> (d0 + 1)(%i0) {
-//      %v = load %0[%i1] : memref<100xf32>    // 'depSinkAccess'
+//      %v = affine.load %0[%i1] : memref<100xf32>    // 'depSinkAccess'
 //    }
 //
 void getComputationSliceState(Operation *depSourceOp, Operation *depSinkOp,
@@ -172,7 +172,7 @@
 //
 //    affine.for %i = 0 to 32 {
 //      affine.for %ii = %i to (d0) -> (d0 + 8) (%i) {
-//        load %A[%ii]
+//        affine.load %A[%ii]
 //      }
 //    }
 //
diff --git a/mlir/include/mlir/Analysis/VectorAnalysis.h b/mlir/include/mlir/Analysis/VectorAnalysis.h
index 1f4e50c..8b9992d 100644
--- a/mlir/include/mlir/Analysis/VectorAnalysis.h
+++ b/mlir/include/mlir/Analysis/VectorAnalysis.h
@@ -122,7 +122,7 @@
 /// `%arg0[%c0, %c0]` into vector<128xf32> which needs a 1-D vector broadcast.
 ///
 AffineMap makePermutationMap(
-    Operation *op,
+    Operation *op, ArrayRef<Value *> indices,
     const llvm::DenseMap<Operation *, unsigned> &loopToVectorDim);
 
 namespace matcher {
diff --git a/mlir/include/mlir/Transforms/Passes.h b/mlir/include/mlir/Transforms/Passes.h
index 48822cd..a253871 100644
--- a/mlir/include/mlir/Transforms/Passes.h
+++ b/mlir/include/mlir/Transforms/Passes.h
@@ -103,7 +103,7 @@
 /// while generating DMAs to move data.
 FunctionPassBase *createDmaGenerationPass(
     unsigned slowMemorySpace, unsigned fastMemorySpace,
-    int minDmaTransferSize = 1024,
+    unsigned tagMemorySpace = 0, int minDmaTransferSize = 1024,
     uint64_t fastMemCapacityBytes = std::numeric_limits<uint64_t>::max());
 
 /// Creates a pass to lower VectorTransferReadOp and VectorTransferWriteOp.
diff --git a/mlir/lib/AffineOps/AffineOps.cpp b/mlir/lib/AffineOps/AffineOps.cpp
index d7650dc..04a3462 100644
--- a/mlir/lib/AffineOps/AffineOps.cpp
+++ b/mlir/lib/AffineOps/AffineOps.cpp
@@ -697,6 +697,38 @@
 }
 
 //===----------------------------------------------------------------------===//
+// Common canonicalization pattern support logic
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// This is a common class used for patterns of the form
+/// "someop(memrefcast) -> someop".  It folds the source of any memref_cast
+/// into the root operation directly.
+struct MemRefCastFolder : public RewritePattern {
+  /// The rootOpName is the name of the root operation to match against.
+  MemRefCastFolder(StringRef rootOpName, MLIRContext *context)
+      : RewritePattern(rootOpName, 1, context) {}
+
+  PatternMatchResult match(Operation *op) const override {
+    for (auto *operand : op->getOperands())
+      if (matchPattern(operand, m_Op<MemRefCastOp>()))
+        return matchSuccess();
+
+    return matchFailure();
+  }
+
+  void rewrite(Operation *op, PatternRewriter &rewriter) const override {
+    for (unsigned i = 0, e = op->getNumOperands(); i != e; ++i)
+      if (auto *memref = op->getOperand(i)->getDefiningOp())
+        if (auto cast = dyn_cast<MemRefCastOp>(memref))
+          op->setOperand(i, cast.getOperand());
+    rewriter.updatedRootInPlace(op);
+  }
+};
+
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
 // AffineDmaStartOp
 //===----------------------------------------------------------------------===//
 
@@ -770,19 +802,16 @@
   // *) src memref followed by its affine map operands (in square brackets).
   // *) tag memref followed by its affine map operands (in square brackets).
   // *) number of elements transferred by DMA operation.
-  if (parser->parseOperand(srcMemRefInfo) || parser->parseLSquare() ||
+  if (parser->parseOperand(srcMemRefInfo) ||
       parser->parseAffineMapOfSSAIds(srcMapOperands, srcMapAttr,
                                      getSrcMapAttrName(), result->attributes) ||
-      parser->parseRSquare() || parser->parseComma() ||
-      parser->parseOperand(dstMemRefInfo) || parser->parseLSquare() ||
+      parser->parseComma() || parser->parseOperand(dstMemRefInfo) ||
       parser->parseAffineMapOfSSAIds(dstMapOperands, dstMapAttr,
                                      getDstMapAttrName(), result->attributes) ||
-      parser->parseRSquare() || parser->parseComma() ||
-      parser->parseOperand(tagMemRefInfo) || parser->parseLSquare() ||
+      parser->parseComma() || parser->parseOperand(tagMemRefInfo) ||
       parser->parseAffineMapOfSSAIds(tagMapOperands, tagMapAttr,
                                      getTagMapAttrName(), result->attributes) ||
-      parser->parseRSquare() || parser->parseComma() ||
-      parser->parseOperand(numElementsInfo))
+      parser->parseComma() || parser->parseOperand(numElementsInfo))
     return failure();
 
   // Parse optional stride and elements per stride.
@@ -846,6 +875,13 @@
   return success();
 }
 
+void AffineDmaStartOp::getCanonicalizationPatterns(
+    OwningRewritePatternList &results, MLIRContext *context) {
+  /// dma_start(memrefcast) -> dma_start
+  results.push_back(
+      llvm::make_unique<MemRefCastFolder>(getOperationName(), context));
+}
+
 //===----------------------------------------------------------------------===//
 // AffineDmaWaitOp
 //===----------------------------------------------------------------------===//
@@ -884,11 +920,11 @@
   OpAsmParser::OperandType numElementsInfo;
 
   // Parse tag memref, its map operands, and dma size.
-  if (parser->parseOperand(tagMemRefInfo) || parser->parseLSquare() ||
+  if (parser->parseOperand(tagMemRefInfo) ||
       parser->parseAffineMapOfSSAIds(tagMapOperands, tagMapAttr,
                                      getTagMapAttrName(), result->attributes) ||
-      parser->parseRSquare() || parser->parseComma() ||
-      parser->parseOperand(numElementsInfo) || parser->parseColonType(type) ||
+      parser->parseComma() || parser->parseOperand(numElementsInfo) ||
+      parser->parseColonType(type) ||
       parser->resolveOperand(tagMemRefInfo, type, result->operands) ||
       parser->resolveOperands(tagMapOperands, indexType, result->operands) ||
       parser->resolveOperand(numElementsInfo, indexType, result->operands))
@@ -910,6 +946,13 @@
   return success();
 }
 
+void AffineDmaWaitOp::getCanonicalizationPatterns(
+    OwningRewritePatternList &results, MLIRContext *context) {
+  /// dma_wait(memrefcast) -> dma_wait
+  results.push_back(
+      llvm::make_unique<MemRefCastFolder>(getOperationName(), context));
+}
+
 //===----------------------------------------------------------------------===//
 // AffineForOp
 //===----------------------------------------------------------------------===//
@@ -1556,7 +1599,20 @@
                          AffineMap map, ArrayRef<Value *> operands) {
   // TODO(b/133776335) Check that map operands are loop IVs or symbols.
   result->addOperands(operands);
-  result->addAttribute("map", builder->getAffineMapAttr(map));
+  if (map)
+    result->addAttribute(getMapAttrName(), builder->getAffineMapAttr(map));
+  auto memrefType = operands[0]->getType().cast<MemRefType>();
+  result->types.push_back(memrefType.getElementType());
+}
+
+void AffineLoadOp::build(Builder *builder, OperationState *result,
+                         Value *memref, ArrayRef<Value *> indices) {
+  result->addOperands(memref);
+  result->addOperands(indices);
+  auto memrefType = memref->getType().cast<MemRefType>();
+  auto map = builder->getMultiDimIdentityMap(memrefType.getRank());
+  result->addAttribute(getMapAttrName(), builder->getAffineMapAttr(map));
+  result->types.push_back(memrefType.getElementType());
 }
 
 ParseResult AffineLoadOp::parse(OpAsmParser *parser, OperationState *result) {
@@ -1568,10 +1624,11 @@
   AffineMapAttr mapAttr;
   SmallVector<OpAsmParser::OperandType, 1> mapOperands;
   return failure(
-      parser->parseOperand(memrefInfo) || parser->parseLSquare() ||
-      parser->parseAffineMapOfSSAIds(mapOperands, mapAttr, "map",
+      parser->parseOperand(memrefInfo) ||
+      parser->parseAffineMapOfSSAIds(mapOperands, mapAttr, getMapAttrName(),
                                      result->attributes) ||
-      parser->parseRSquare() || parser->parseColonType(type) ||
+      parser->parseOptionalAttributeDict(result->attributes) ||
+      parser->parseColonType(type) ||
       parser->resolveOperand(memrefInfo, type, result->operands) ||
       parser->resolveOperands(mapOperands, affineIntTy, result->operands) ||
       parser->addTypeToList(type.getElementType(), result->types));
@@ -1579,20 +1636,27 @@
 
 void AffineLoadOp::print(OpAsmPrinter *p) {
   *p << "affine.load " << *getMemRef() << '[';
-  AffineMapAttr mapAttr = getAttrOfType<AffineMapAttr>("map");
-  SmallVector<Value *, 2> operands(getIndices());
-  p->printAffineMapOfSSAIds(mapAttr, operands);
-  *p << "] : " << getMemRefType();
+  AffineMapAttr mapAttr = getAttrOfType<AffineMapAttr>(getMapAttrName());
+  if (mapAttr) {
+    SmallVector<Value *, 2> operands(getIndices());
+    p->printAffineMapOfSSAIds(mapAttr, operands);
+  }
+  *p << ']';
+  p->printOptionalAttrDict(getAttrs(), /*elidedAttrs=*/{getMapAttrName()});
+  *p << " : " << getMemRefType();
 }
 
 LogicalResult AffineLoadOp::verify() {
   if (getType() != getMemRefType().getElementType())
     return emitOpError("result type must match element type of memref");
 
-  AffineMap map = getAttrOfType<AffineMapAttr>("map").getValue();
-  if (map.getNumResults() != getMemRefType().getRank())
-    return emitOpError("affine.load affine map num results must equal memref "
-                       "rank");
+  auto mapAttr = getAttrOfType<AffineMapAttr>(getMapAttrName());
+  if (mapAttr) {
+    AffineMap map = getAttrOfType<AffineMapAttr>(getMapAttrName()).getValue();
+    if (map.getNumResults() != getMemRefType().getRank())
+      return emitOpError("affine.load affine map num results must equal"
+                         " memref rank");
+  }
 
   for (auto *idx : getIndices())
     if (!idx->getType().isIndex())
@@ -1601,6 +1665,13 @@
   return success();
 }
 
+void AffineLoadOp::getCanonicalizationPatterns(
+    OwningRewritePatternList &results, MLIRContext *context) {
+  /// load(memrefcast) -> load
+  results.push_back(
+      llvm::make_unique<MemRefCastFolder>(getOperationName(), context));
+}
+
 //===----------------------------------------------------------------------===//
 // AffineStoreOp
 //===----------------------------------------------------------------------===//
@@ -1611,7 +1682,19 @@
   // TODO(b/133776335) Check that map operands are loop IVs or symbols.
   result->addOperands(valueToStore);
   result->addOperands(operands);
-  result->addAttribute("map", builder->getAffineMapAttr(map));
+  if (map)
+    result->addAttribute(getMapAttrName(), builder->getAffineMapAttr(map));
+}
+
+void AffineStoreOp::build(Builder *builder, OperationState *result,
+                          Value *valueToStore, Value *memref,
+                          ArrayRef<Value *> operands) {
+  result->addOperands(valueToStore);
+  result->addOperands(memref);
+  result->addOperands(operands);
+  auto memrefType = memref->getType().cast<MemRefType>();
+  auto map = builder->getMultiDimIdentityMap(memrefType.getRank());
+  result->addAttribute(getMapAttrName(), builder->getAffineMapAttr(map));
 }
 
 ParseResult AffineStoreOp::parse(OpAsmParser *parser, OperationState *result) {
@@ -1624,10 +1707,11 @@
   SmallVector<OpAsmParser::OperandType, 1> mapOperands;
   return failure(
       parser->parseOperand(storeValueInfo) || parser->parseComma() ||
-      parser->parseOperand(memrefInfo) || parser->parseLSquare() ||
-      parser->parseAffineMapOfSSAIds(mapOperands, mapAttr, "map",
+      parser->parseOperand(memrefInfo) ||
+      parser->parseAffineMapOfSSAIds(mapOperands, mapAttr, getMapAttrName(),
                                      result->attributes) ||
-      parser->parseRSquare() || parser->parseColonType(type) ||
+      parser->parseOptionalAttributeDict(result->attributes) ||
+      parser->parseColonType(type) ||
       parser->resolveOperand(storeValueInfo, type.getElementType(),
                              result->operands) ||
       parser->resolveOperand(memrefInfo, type, result->operands) ||
@@ -1637,10 +1721,14 @@
 void AffineStoreOp::print(OpAsmPrinter *p) {
   *p << "affine.store " << *getValueToStore();
   *p << ", " << *getMemRef() << '[';
-  AffineMapAttr mapAttr = getAttrOfType<AffineMapAttr>("map");
-  SmallVector<Value *, 2> operands(getIndices());
-  p->printAffineMapOfSSAIds(mapAttr, operands);
-  *p << "] : " << getMemRefType();
+  AffineMapAttr mapAttr = getAttrOfType<AffineMapAttr>(getMapAttrName());
+  if (mapAttr) {
+    SmallVector<Value *, 2> operands(getIndices());
+    p->printAffineMapOfSSAIds(mapAttr, operands);
+  }
+  *p << ']';
+  p->printOptionalAttrDict(getAttrs(), /*elidedAttrs=*/{getMapAttrName()});
+  *p << " : " << getMemRefType();
 }
 
 LogicalResult AffineStoreOp::verify() {
@@ -1648,14 +1736,23 @@
   if (getValueToStore()->getType() != getMemRefType().getElementType())
     return emitOpError("first operand must have same type memref element type");
 
-  AffineMap map = getAttrOfType<AffineMapAttr>("map").getValue();
-  if (map.getNumResults() != getMemRefType().getRank())
-    return emitOpError("affine.store affine map num results must equal memref "
-                       "rank");
-
+  auto mapAttr = getAttrOfType<AffineMapAttr>(getMapAttrName());
+  if (mapAttr) {
+    AffineMap map = mapAttr.getValue();
+    if (map.getNumResults() != getMemRefType().getRank())
+      return emitOpError("affine.store affine map num results must equal"
+                         " memref rank");
+  }
   for (auto *idx : getIndices())
     if (!idx->getType().isIndex())
       return emitOpError("index to load must have 'index' type");
   // TODO(b/133776335) Verify that map operands are loop IVs or symbols.
   return success();
 }
+
+void AffineStoreOp::getCanonicalizationPatterns(
+    OwningRewritePatternList &results, MLIRContext *context) {
+  /// load(memrefcast) -> load
+  results.push_back(
+      llvm::make_unique<MemRefCastFolder>(getOperationName(), context));
+}
diff --git a/mlir/lib/Analysis/AffineAnalysis.cpp b/mlir/lib/Analysis/AffineAnalysis.cpp
index fc8c712..28ee5d6 100644
--- a/mlir/lib/Analysis/AffineAnalysis.cpp
+++ b/mlir/lib/Analysis/AffineAnalysis.cpp
@@ -668,10 +668,12 @@
 // Populates 'accessMap' with composition of AffineApplyOps reachable from
 // indices of MemRefAccess.
 void MemRefAccess::getAccessMap(AffineValueMap *accessMap) const {
-  auto memrefType = memref->getType().cast<MemRefType>();
-  // Create identity map with same number of dimensions as 'memrefType' rank.
-  auto map = AffineMap::getMultiDimIdentityMap(memrefType.getRank(),
-                                               memref->getType().getContext());
+  // Get affine map from AffineLoad/Store.
+  AffineMap map;
+  if (auto loadOp = dyn_cast<AffineLoadOp>(opInst))
+    map = loadOp.getAffineMap();
+  else if (auto storeOp = dyn_cast<AffineStoreOp>(opInst))
+    map = storeOp.getAffineMap();
   SmallVector<Value *, 8> operands(indices.begin(), indices.end());
   fullyComposeAffineMapAndOperands(&map, &operands);
   map = simplifyAffineMap(map);
@@ -780,9 +782,9 @@
   if (srcAccess.memref != dstAccess.memref)
     return DependenceResult::NoDependence;
 
-  // Return 'NoDependence' if one of these accesses is not a StoreOp.
-  if (!allowRAR && !isa<StoreOp>(srcAccess.opInst) &&
-      !isa<StoreOp>(dstAccess.opInst))
+  // Return 'NoDependence' if one of these accesses is not an AffineStoreOp.
+  if (!allowRAR && !isa<AffineStoreOp>(srcAccess.opInst) &&
+      !isa<AffineStoreOp>(dstAccess.opInst))
     return DependenceResult::NoDependence;
 
   // Get composed access function for 'srcAccess'.
@@ -866,7 +868,7 @@
   // Collect all load and store ops in loop nest rooted at 'forOp'.
   SmallVector<Operation *, 8> loadAndStoreOpInsts;
   forOp.getOperation()->walk([&](Operation *opInst) {
-    if (isa<LoadOp>(opInst) || isa<StoreOp>(opInst))
+    if (isa<AffineLoadOp>(opInst) || isa<AffineStoreOp>(opInst))
       loadAndStoreOpInsts.push_back(opInst);
   });
 
diff --git a/mlir/lib/Analysis/LoopAnalysis.cpp b/mlir/lib/Analysis/LoopAnalysis.cpp
index 16e092b..0b487ba 100644
--- a/mlir/lib/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Analysis/LoopAnalysis.cpp
@@ -232,8 +232,8 @@
 template <typename LoadOrStoreOp>
 static bool isContiguousAccess(Value *iv, LoadOrStoreOp memoryOp,
                                int *memRefDim) {
-  static_assert(std::is_same<LoadOrStoreOp, LoadOp>::value ||
-                    std::is_same<LoadOrStoreOp, StoreOp>::value,
+  static_assert(std::is_same<LoadOrStoreOp, AffineLoadOp>::value ||
+                    std::is_same<LoadOrStoreOp, AffineStoreOp>::value,
                 "Must be called on either const LoadOp & or const StoreOp &");
   assert(memRefDim && "memRefDim == nullptr");
   auto memRefType = memoryOp.getMemRefType();
@@ -250,25 +250,35 @@
   }
 
   int uniqueVaryingIndexAlongIv = -1;
-  auto indices = memoryOp.getIndices();
-  unsigned numIndices = llvm::size(indices);
-  unsigned dim = 0;
-  for (auto *index : indices) {
-    if (!isAccessInvariant(iv, index)) {
-      if (uniqueVaryingIndexAlongIv != -1) {
-        // 2+ varying indices -> do not vectorize along iv.
-        return false;
+  auto accessMap = memoryOp.getAffineMap();
+  SmallVector<Value *, 4> mapOperands(memoryOp.getIndices());
+  unsigned numDims = accessMap.getNumDims();
+  for (unsigned i = 0, e = memRefType.getRank(); i < e; ++i) {
+    // Gather map operands used result expr 'i' in 'exprOperands'.
+    SmallVector<Value *, 4> exprOperands;
+    auto resultExpr = accessMap.getResult(i);
+    resultExpr.walk([&](AffineExpr expr) {
+      if (auto dimExpr = expr.dyn_cast<AffineDimExpr>())
+        exprOperands.push_back(mapOperands[dimExpr.getPosition()]);
+      else if (auto symExpr = expr.dyn_cast<AffineSymbolExpr>())
+        exprOperands.push_back(mapOperands[numDims + symExpr.getPosition()]);
+    });
+    // Check access invariance of each operand in 'exprOperands'.
+    for (auto *exprOperand : exprOperands) {
+      if (!isAccessInvariant(iv, exprOperand)) {
+        if (uniqueVaryingIndexAlongIv != -1) {
+          // 2+ varying indices -> do not vectorize along iv.
+          return false;
+        }
+        uniqueVaryingIndexAlongIv = i;
       }
-      uniqueVaryingIndexAlongIv = dim;
     }
-    ++dim;
   }
 
   if (uniqueVaryingIndexAlongIv == -1)
     *memRefDim = -1;
   else
-    *memRefDim = numIndices - (uniqueVaryingIndexAlongIv + 1);
-
+    *memRefDim = memRefType.getRank() - (uniqueVaryingIndexAlongIv + 1);
   return true;
 }
 
@@ -320,8 +330,8 @@
   loadAndStores.match(forOp, &loadAndStoresMatched);
   for (auto ls : loadAndStoresMatched) {
     auto *op = ls.getMatchedOperation();
-    auto load = dyn_cast<LoadOp>(op);
-    auto store = dyn_cast<StoreOp>(op);
+    auto load = dyn_cast<AffineLoadOp>(op);
+    auto store = dyn_cast<AffineStoreOp>(op);
     // Only scalar types are considered vectorizable, all load/store must be
     // vectorizable for a loop to qualify as vectorizable.
     // TODO(ntv): ponder whether we want to be more general here.
@@ -338,8 +348,8 @@
 
 bool mlir::isVectorizableLoopBody(AffineForOp loop, int *memRefDim) {
   VectorizableOpFun fun([memRefDim](AffineForOp loop, Operation &op) {
-    auto load = dyn_cast<LoadOp>(op);
-    auto store = dyn_cast<StoreOp>(op);
+    auto load = dyn_cast<AffineLoadOp>(op);
+    auto store = dyn_cast<AffineStoreOp>(op);
     return load ? isContiguousAccess(loop.getInductionVar(), load, memRefDim)
                 : isContiguousAccess(loop.getInductionVar(), store, memRefDim);
   });
diff --git a/mlir/lib/Analysis/MemRefBoundCheck.cpp b/mlir/lib/Analysis/MemRefBoundCheck.cpp
index 0f5edc7..b043d47 100644
--- a/mlir/lib/Analysis/MemRefBoundCheck.cpp
+++ b/mlir/lib/Analysis/MemRefBoundCheck.cpp
@@ -20,6 +20,7 @@
 //
 //===----------------------------------------------------------------------===//
 
+#include "mlir/AffineOps/AffineOps.h"
 #include "mlir/Analysis/AffineAnalysis.h"
 #include "mlir/Analysis/AffineStructures.h"
 #include "mlir/Analysis/Passes.h"
@@ -48,9 +49,9 @@
 
 void MemRefBoundCheck::runOnFunction() {
   getFunction().walk([](Operation *opInst) {
-    if (auto loadOp = dyn_cast<LoadOp>(opInst)) {
+    if (auto loadOp = dyn_cast<AffineLoadOp>(opInst)) {
       boundCheckLoadOrStoreOp(loadOp);
-    } else if (auto storeOp = dyn_cast<StoreOp>(opInst)) {
+    } else if (auto storeOp = dyn_cast<AffineStoreOp>(opInst)) {
       boundCheckLoadOrStoreOp(storeOp);
     }
     // TODO(bondhugula): do this for DMA ops as well.
diff --git a/mlir/lib/Analysis/NestedMatcher.cpp b/mlir/lib/Analysis/NestedMatcher.cpp
index dc6f939..18be6cf 100644
--- a/mlir/lib/Analysis/NestedMatcher.cpp
+++ b/mlir/lib/Analysis/NestedMatcher.cpp
@@ -154,7 +154,7 @@
 }
 
 bool isLoadOrStore(Operation &op) {
-  return isa<LoadOp>(op) || isa<StoreOp>(op);
+  return isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op);
 }
 
 } // end namespace matcher
diff --git a/mlir/lib/Analysis/TestMemRefDependenceCheck.cpp b/mlir/lib/Analysis/TestMemRefDependenceCheck.cpp
index 4456ac2..1802b73 100644
--- a/mlir/lib/Analysis/TestMemRefDependenceCheck.cpp
+++ b/mlir/lib/Analysis/TestMemRefDependenceCheck.cpp
@@ -19,6 +19,7 @@
 //
 //===----------------------------------------------------------------------===//
 
+#include "mlir/AffineOps/AffineOps.h"
 #include "mlir/Analysis/AffineAnalysis.h"
 #include "mlir/Analysis/AffineStructures.h"
 #include "mlir/Analysis/Passes.h"
@@ -116,7 +117,7 @@
   // Collect the loads and stores within the function.
   loadsAndStores.clear();
   getFunction().walk([&](Operation *op) {
-    if (isa<LoadOp>(op) || isa<StoreOp>(op))
+    if (isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op))
       loadsAndStores.push_back(op);
   });
 
diff --git a/mlir/lib/Analysis/Utils.cpp b/mlir/lib/Analysis/Utils.cpp
index ae991f7..486c265 100644
--- a/mlir/lib/Analysis/Utils.cpp
+++ b/mlir/lib/Analysis/Utils.cpp
@@ -173,7 +173,8 @@
 LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth,
                                     ComputationSliceState *sliceState,
                                     bool addMemRefDimBounds) {
-  assert((isa<LoadOp>(op) || isa<StoreOp>(op)) && "load/store op expected");
+  assert((isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op)) &&
+         "affine load/store op expected");
 
   MemRefAccess access(op);
   memref = access.memref;
@@ -381,12 +382,11 @@
 template <typename LoadOrStoreOpPointer>
 LogicalResult mlir::boundCheckLoadOrStoreOp(LoadOrStoreOpPointer loadOrStoreOp,
                                             bool emitError) {
-  static_assert(std::is_same<LoadOrStoreOpPointer, LoadOp>::value ||
-                    std::is_same<LoadOrStoreOpPointer, StoreOp>::value,
-                "argument should be either a LoadOp or a StoreOp");
+  static_assert(std::is_same<LoadOrStoreOpPointer, AffineLoadOp>::value ||
+                    std::is_same<LoadOrStoreOpPointer, AffineStoreOp>::value,
+                "argument should be either a AffineLoadOp or a AffineStoreOp");
 
   Operation *opInst = loadOrStoreOp.getOperation();
-
   MemRefRegion region(opInst->getLoc());
   if (failed(region.compute(opInst, /*loopDepth=*/0, /*sliceState=*/nullptr,
                             /*addMemRefDimBounds=*/false)))
@@ -434,9 +434,9 @@
 }
 
 // Explicitly instantiate the template so that the compiler knows we need them!
-template LogicalResult mlir::boundCheckLoadOrStoreOp(LoadOp loadOp,
+template LogicalResult mlir::boundCheckLoadOrStoreOp(AffineLoadOp loadOp,
                                                      bool emitError);
-template LogicalResult mlir::boundCheckLoadOrStoreOp(StoreOp storeOp,
+template LogicalResult mlir::boundCheckLoadOrStoreOp(AffineStoreOp storeOp,
                                                      bool emitError);
 
 // Returns in 'positions' the Block positions of 'op' in each ancestor
@@ -484,9 +484,9 @@
 
 // Returns the MemRef accessed by load or store 'op'.
 static Value *getLoadOrStoreMemRef(Operation *op) {
-  if (auto loadOp = dyn_cast<LoadOp>(op))
+  if (auto loadOp = dyn_cast<AffineLoadOp>(op))
     return loadOp.getMemRef();
-  return cast<StoreOp>(op).getMemRef();
+  return cast<AffineStoreOp>(op).getMemRef();
 }
 
 // Adds loop IV bounds to 'cst' for loop IVs not found in 'ivs'.
@@ -560,8 +560,8 @@
         return failure();
       }
 
-      bool readReadAccesses =
-          isa<LoadOp>(srcAccess.opInst) && isa<LoadOp>(dstAccess.opInst);
+      bool readReadAccesses = isa<AffineLoadOp>(srcAccess.opInst) &&
+                              isa<AffineLoadOp>(dstAccess.opInst);
       FlatAffineConstraints dependenceConstraints;
       // Check dependence between 'srcAccess' and 'dstAccess'.
       DependenceResult result = checkMemrefAccessDependence(
@@ -752,7 +752,7 @@
                       : std::prev(srcLoopIVs[loopDepth - 1].getBody()->end());
 
   llvm::SmallDenseSet<Value *, 8> sequentialLoops;
-  if (isa<LoadOp>(depSourceOp) && isa<LoadOp>(depSinkOp)) {
+  if (isa<AffineLoadOp>(depSourceOp) && isa<AffineLoadOp>(depSinkOp)) {
     // For read-read access pairs, clear any slice bounds on sequential loops.
     // Get sequential loops in loop nest rooted at 'srcLoopIVs[0]'.
     getSequentialLoops(isBackwardSlice ? srcLoopIVs[0] : dstLoopIVs[0],
@@ -849,7 +849,7 @@
 // Constructs  MemRefAccess populating it with the memref, its indices and
 // opinst from 'loadOrStoreOpInst'.
 MemRefAccess::MemRefAccess(Operation *loadOrStoreOpInst) {
-  if (auto loadOp = dyn_cast<LoadOp>(loadOrStoreOpInst)) {
+  if (auto loadOp = dyn_cast<AffineLoadOp>(loadOrStoreOpInst)) {
     memref = loadOp.getMemRef();
     opInst = loadOrStoreOpInst;
     auto loadMemrefType = loadOp.getMemRefType();
@@ -858,8 +858,8 @@
       indices.push_back(index);
     }
   } else {
-    assert(isa<StoreOp>(loadOrStoreOpInst) && "load/store op expected");
-    auto storeOp = dyn_cast<StoreOp>(loadOrStoreOpInst);
+    assert(isa<AffineStoreOp>(loadOrStoreOpInst) && "load/store op expected");
+    auto storeOp = dyn_cast<AffineStoreOp>(loadOrStoreOpInst);
     opInst = loadOrStoreOpInst;
     memref = storeOp.getMemRef();
     auto storeMemrefType = storeOp.getMemRefType();
@@ -874,7 +874,7 @@
   return memref->getType().cast<MemRefType>().getRank();
 }
 
-bool MemRefAccess::isStore() const { return isa<StoreOp>(opInst); }
+bool MemRefAccess::isStore() const { return isa<AffineStoreOp>(opInst); }
 
 /// Returns the nesting depth of this statement, i.e., the number of loops
 /// surrounding this statement.
@@ -914,7 +914,7 @@
   // Walk this 'affine.for' operation to gather all memory regions.
   bool error = false;
   block.walk(start, end, [&](Operation *opInst) {
-    if (!isa<LoadOp>(opInst) && !isa<StoreOp>(opInst)) {
+    if (!isa<AffineLoadOp>(opInst) && !isa<AffineStoreOp>(opInst)) {
       // Neither load nor a store op.
       return;
     }
@@ -977,7 +977,7 @@
   // Collect all load and store ops in loop nest rooted at 'forOp'.
   SmallVector<Operation *, 8> loadAndStoreOpInsts;
   forOp.getOperation()->walk([&](Operation *opInst) {
-    if (isa<LoadOp>(opInst) || isa<StoreOp>(opInst))
+    if (isa<AffineLoadOp>(opInst) || isa<AffineStoreOp>(opInst))
       loadAndStoreOpInsts.push_back(opInst);
   });
 
diff --git a/mlir/lib/Analysis/VectorAnalysis.cpp b/mlir/lib/Analysis/VectorAnalysis.cpp
index 0d1e2c0..7bb28e9 100644
--- a/mlir/lib/Analysis/VectorAnalysis.cpp
+++ b/mlir/lib/Analysis/VectorAnalysis.cpp
@@ -19,6 +19,7 @@
 #include "mlir/AffineOps/AffineOps.h"
 #include "mlir/Analysis/AffineAnalysis.h"
 #include "mlir/Analysis/LoopAnalysis.h"
+#include "mlir/IR/Builders.h"
 #include "mlir/IR/IntegerSet.h"
 #include "mlir/IR/Operation.h"
 #include "mlir/StandardOps/Ops.h"
@@ -108,7 +109,7 @@
 /// Examples can be found in the documentation of `makePermutationMap`, in the
 /// header file.
 static AffineMap makePermutationMap(
-    Operation::operand_range operands,
+    ArrayRef<Value *> indices,
     const DenseMap<Operation *, unsigned> &enclosingLoopToVectorDim) {
   if (enclosingLoopToVectorDim.empty())
     return AffineMap();
@@ -116,7 +117,6 @@
       enclosingLoopToVectorDim.begin()->getFirst()->getContext();
   using functional::makePtrDynCaster;
   using functional::map;
-  SmallVector<Value *, 8> indices(operands);
   SmallVector<AffineExpr, 4> perm(enclosingLoopToVectorDim.size(),
                                   getAffineConstantExpr(0, context));
 
@@ -167,7 +167,8 @@
 }
 
 AffineMap mlir::makePermutationMap(
-    Operation *op, const DenseMap<Operation *, unsigned> &loopToVectorDim) {
+    Operation *op, ArrayRef<Value *> indices,
+    const DenseMap<Operation *, unsigned> &loopToVectorDim) {
   DenseMap<Operation *, unsigned> enclosingLoopToVectorDim;
   auto enclosingLoops = getEnclosingforOps(op);
   for (auto *forInst : enclosingLoops) {
@@ -176,13 +177,7 @@
       enclosingLoopToVectorDim.insert(*it);
     }
   }
-
-  if (auto load = dyn_cast<LoadOp>(op)) {
-    return ::makePermutationMap(load.getIndices(), enclosingLoopToVectorDim);
-  }
-
-  auto store = cast<StoreOp>(op);
-  return ::makePermutationMap(store.getIndices(), enclosingLoopToVectorDim);
+  return ::makePermutationMap(indices, enclosingLoopToVectorDim);
 }
 
 bool mlir::matcher::operatesOnSuperVectorsOf(Operation &op,
diff --git a/mlir/lib/Parser/Parser.cpp b/mlir/lib/Parser/Parser.cpp
index 5d8a0dd..57832aa 100644
--- a/mlir/lib/Parser/Parser.cpp
+++ b/mlir/lib/Parser/Parser.cpp
@@ -1035,7 +1035,6 @@
   case Token::string: {
     auto val = getToken().getStringValue();
     consumeToken(Token::string);
-
     // Parse the optional trailing colon type if one wasn't explicitly provided.
     if (!type && consumeIf(Token::colon) && !(type = parseType()))
       return Attribute();
@@ -2326,6 +2325,9 @@
 
 /// Parse an AffineMap where the dim and symbol identifiers are SSA ids.
 ParseResult AffineParser::parseAffineMapOfSSAIds(AffineMap &map) {
+  if (!consumeIf(Token::l_square))
+    return failure();
+
   SmallVector<AffineExpr, 4> exprs;
   auto parseElt = [&]() -> ParseResult {
     auto elt = parseAffineExpr();
@@ -2336,11 +2338,15 @@
   // Parse a multi-dimensional affine expression (a comma-separated list of
   // 1-d affine expressions); the list cannot be empty. Grammar:
   // multi-dim-affine-expr ::= `(` affine-expr (`,` affine-expr)* `)
-  if (parseCommaSeparatedList(parseElt))
+  if (parseCommaSeparatedListUntil(Token::r_square, parseElt,
+                                   /*allowEmptyList=*/true))
     return failure();
   // Parsed a valid affine map.
-  map = builder.getAffineMap(numDimOperands,
-                             dimsAndSymbols.size() - numDimOperands, exprs);
+  if (exprs.empty())
+    map = AffineMap();
+  else
+    map = builder.getAffineMap(numDimOperands,
+                               dimsAndSymbols.size() - numDimOperands, exprs);
   return success();
 }
 
@@ -3452,8 +3458,10 @@
     if (parser.parseAffineMapOfSSAIds(map, parseElement))
       return failure();
     // Add AffineMap attribute.
-    mapAttr = parser.builder.getAffineMapAttr(map);
-    attrs.push_back(parser.builder.getNamedAttr(attrName, mapAttr));
+    if (map) {
+      mapAttr = parser.builder.getAffineMapAttr(map);
+      attrs.push_back(parser.builder.getNamedAttr(attrName, mapAttr));
+    }
 
     // Add dim operands before symbol operands in 'operands'.
     operands.assign(dimOperands.begin(), dimOperands.end());
diff --git a/mlir/lib/Transforms/DmaGeneration.cpp b/mlir/lib/Transforms/DmaGeneration.cpp
index a3aa092..e867dc7 100644
--- a/mlir/lib/Transforms/DmaGeneration.cpp
+++ b/mlir/lib/Transforms/DmaGeneration.cpp
@@ -75,16 +75,17 @@
 struct DmaGeneration : public FunctionPass<DmaGeneration> {
   explicit DmaGeneration(
       unsigned slowMemorySpace = 0,
-      unsigned fastMemorySpace = clFastMemorySpace,
+      unsigned fastMemorySpace = clFastMemorySpace, unsigned tagMemorySpace = 0,
       int minDmaTransferSize = 1024,
       uint64_t fastMemCapacityBytes = std::numeric_limits<uint64_t>::max())
       : slowMemorySpace(slowMemorySpace), fastMemorySpace(fastMemorySpace),
-        minDmaTransferSize(minDmaTransferSize),
+        tagMemorySpace(tagMemorySpace), minDmaTransferSize(minDmaTransferSize),
         fastMemCapacityBytes(fastMemCapacityBytes) {}
 
   explicit DmaGeneration(const DmaGeneration &other)
       : slowMemorySpace(other.slowMemorySpace),
         fastMemorySpace(other.fastMemorySpace),
+        tagMemorySpace(other.tagMemorySpace),
         minDmaTransferSize(other.minDmaTransferSize),
         fastMemCapacityBytes(other.fastMemCapacityBytes) {}
 
@@ -111,6 +112,8 @@
   const unsigned slowMemorySpace;
   // Fast memory space associated with DMAs.
   unsigned fastMemorySpace;
+  // Tag memory space associated with DMAs.
+  unsigned tagMemorySpace;
   // Minimum DMA transfer size supported by the target in bytes.
   const int minDmaTransferSize;
   // Capacity of the faster memory space.
@@ -128,10 +131,11 @@
 /// TODO(bondhugula): extend this to store op's.
 FunctionPassBase *mlir::createDmaGenerationPass(unsigned slowMemorySpace,
                                                 unsigned fastMemorySpace,
+                                                unsigned tagMemorySpace,
                                                 int minDmaTransferSize,
                                                 uint64_t fastMemCapacityBytes) {
-  return new DmaGeneration(slowMemorySpace, fastMemorySpace, minDmaTransferSize,
-                           fastMemCapacityBytes);
+  return new DmaGeneration(slowMemorySpace, fastMemorySpace, tagMemorySpace,
+                           minDmaTransferSize, fastMemCapacityBytes);
 }
 
 // Info comprising stride and number of elements transferred every stride.
@@ -173,11 +177,11 @@
 static bool getFullMemRefAsRegion(Operation *opInst, unsigned numParamLoopIVs,
                                   MemRefRegion *region) {
   unsigned rank;
-  if (auto loadOp = dyn_cast<LoadOp>(opInst)) {
+  if (auto loadOp = dyn_cast<AffineLoadOp>(opInst)) {
     rank = loadOp.getMemRefType().getRank();
     region->memref = loadOp.getMemRef();
     region->setWrite(false);
-  } else if (auto storeOp = dyn_cast<StoreOp>(opInst)) {
+  } else if (auto storeOp = dyn_cast<AffineStoreOp>(opInst)) {
     rank = storeOp.getMemRefType().getRank();
     region->memref = storeOp.getMemRef();
     region->setWrite(true);
@@ -363,7 +367,8 @@
     *sizeInBytes = 0;
   }
   // Create a tag (single element 1-d memref) for the DMA.
-  auto tagMemRefType = top.getMemRefType({1}, top.getIntegerType(32));
+  auto tagMemRefType =
+      top.getMemRefType({1}, top.getIntegerType(32), {}, tagMemorySpace);
   auto tagMemRef = prologue.create<AllocOp>(loc, tagMemRefType);
 
   auto numElementsSSA =
@@ -393,23 +398,34 @@
   // don't get replaced.
   auto postDomFilter = std::prev(end);
 
+  // Create fully composed affine maps for each memref.
+  auto memAffineMap = b.getMultiDimIdentityMap(memIndices.size());
+  fullyComposeAffineMapAndOperands(&memAffineMap, &memIndices);
+  auto bufAffineMap = b.getMultiDimIdentityMap(bufIndices.size());
+  fullyComposeAffineMapAndOperands(&bufAffineMap, &bufIndices);
+  SmallVector<Value *, 4> tagIndices({zeroIndex});
+  auto tagAffineMap = b.getMultiDimIdentityMap(tagIndices.size());
+  fullyComposeAffineMapAndOperands(&tagAffineMap, &tagIndices);
   if (!region.isWrite()) {
     // DMA non-blocking read from original buffer to fast buffer.
-    b.create<DmaStartOp>(loc, memref, memIndices, fastMemRef, bufIndices,
-                         numElementsSSA, tagMemRef, zeroIndex, stride,
-                         numEltPerStride);
+    b.create<AffineDmaStartOp>(loc, memref, memAffineMap, memIndices,
+                               fastMemRef, bufAffineMap, bufIndices, tagMemRef,
+                               tagAffineMap, tagIndices, numElementsSSA, stride,
+                               numEltPerStride);
   } else {
     // DMA non-blocking write from fast buffer to the original memref.
-    auto op = b.create<DmaStartOp>(loc, fastMemRef, bufIndices, memref,
-                                   memIndices, numElementsSSA, tagMemRef,
-                                   zeroIndex, stride, numEltPerStride);
+    auto op = b.create<AffineDmaStartOp>(
+        loc, fastMemRef, bufAffineMap, bufIndices, memref, memAffineMap,
+        memIndices, tagMemRef, tagAffineMap, tagIndices, numElementsSSA, stride,
+        numEltPerStride);
     // Since new ops are being appended (for outgoing DMAs), adjust the end to
     // mark end of range of the original.
     *nEnd = Block::iterator(op.getOperation());
   }
 
   // Matching DMA wait to block on completion; tag always has a 0 index.
-  b.create<DmaWaitOp>(loc, tagMemRef, zeroIndex, numElementsSSA);
+  b.create<AffineDmaWaitOp>(loc, tagMemRef, tagAffineMap, zeroIndex,
+                            numElementsSSA);
 
   // Generate dealloc for the tag.
   auto tagDeallocOp = epilogue.create<DeallocOp>(loc, tagMemRef);
@@ -479,7 +495,8 @@
   // Get to the first load, store, or for op.
   auto curBegin =
       std::find_if(block->begin(), block->end(), [&](Operation &op) {
-        return isa<LoadOp>(op) || isa<StoreOp>(op) || isa<AffineForOp>(op);
+        return isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op) ||
+               isa<AffineForOp>(op);
       });
 
   for (auto it = curBegin; it != block->end(); ++it) {
@@ -522,7 +539,7 @@
         runOnBlock(/*begin=*/it, /*end=*/std::next(it));
         curBegin = std::next(it);
       }
-    } else if (!isa<LoadOp>(&*it) && !isa<StoreOp>(&*it)) {
+    } else if (!isa<AffineLoadOp>(&*it) && !isa<AffineStoreOp>(&*it)) {
       runOnBlock(/*begin=*/curBegin, /*end=*/it);
       curBegin = std::next(it);
     }
@@ -607,10 +624,10 @@
   // Walk this range of operations  to gather all memory regions.
   block->walk(begin, end, [&](Operation *opInst) {
     // Gather regions to allocate to buffers in faster memory space.
-    if (auto loadOp = dyn_cast<LoadOp>(opInst)) {
+    if (auto loadOp = dyn_cast<AffineLoadOp>(opInst)) {
       if (loadOp.getMemRefType().getMemorySpace() != slowMemorySpace)
         return;
-    } else if (auto storeOp = dyn_cast<StoreOp>(opInst)) {
+    } else if (auto storeOp = dyn_cast<AffineStoreOp>(opInst)) {
       if (storeOp.getMemRefType().getMemorySpace() != slowMemorySpace)
         return;
     } else {
diff --git a/mlir/lib/Transforms/LoopFusion.cpp b/mlir/lib/Transforms/LoopFusion.cpp
index 77b944f..1eee40b 100644
--- a/mlir/lib/Transforms/LoopFusion.cpp
+++ b/mlir/lib/Transforms/LoopFusion.cpp
@@ -133,9 +133,9 @@
         forOps.push_back(cast<AffineForOp>(op));
       else if (op->getNumRegions() != 0)
         hasNonForRegion = true;
-      else if (isa<LoadOp>(op))
+      else if (isa<AffineLoadOp>(op))
         loadOpInsts.push_back(op);
-      else if (isa<StoreOp>(op))
+      else if (isa<AffineStoreOp>(op))
         storeOpInsts.push_back(op);
     });
   }
@@ -143,8 +143,8 @@
 
 // TODO(b/117228571) Replace when this is modeled through side-effects/op traits
 static bool isMemRefDereferencingOp(Operation &op) {
-  if (isa<LoadOp>(op) || isa<StoreOp>(op) || isa<DmaStartOp>(op) ||
-      isa<DmaWaitOp>(op))
+  if (isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op) ||
+      isa<AffineDmaStartOp>(op) || isa<AffineDmaWaitOp>(op))
     return true;
   return false;
 }
@@ -174,7 +174,7 @@
     unsigned getLoadOpCount(Value *memref) {
       unsigned loadOpCount = 0;
       for (auto *loadOpInst : loads) {
-        if (memref == cast<LoadOp>(loadOpInst).getMemRef())
+        if (memref == cast<AffineLoadOp>(loadOpInst).getMemRef())
           ++loadOpCount;
       }
       return loadOpCount;
@@ -184,7 +184,7 @@
     unsigned getStoreOpCount(Value *memref) {
       unsigned storeOpCount = 0;
       for (auto *storeOpInst : stores) {
-        if (memref == cast<StoreOp>(storeOpInst).getMemRef())
+        if (memref == cast<AffineStoreOp>(storeOpInst).getMemRef())
           ++storeOpCount;
       }
       return storeOpCount;
@@ -194,7 +194,7 @@
     void getStoreOpsForMemref(Value *memref,
                               SmallVectorImpl<Operation *> *storeOps) {
       for (auto *storeOpInst : stores) {
-        if (memref == cast<StoreOp>(storeOpInst).getMemRef())
+        if (memref == cast<AffineStoreOp>(storeOpInst).getMemRef())
           storeOps->push_back(storeOpInst);
       }
     }
@@ -203,7 +203,7 @@
     void getLoadOpsForMemref(Value *memref,
                              SmallVectorImpl<Operation *> *loadOps) {
       for (auto *loadOpInst : loads) {
-        if (memref == cast<LoadOp>(loadOpInst).getMemRef())
+        if (memref == cast<AffineLoadOp>(loadOpInst).getMemRef())
           loadOps->push_back(loadOpInst);
       }
     }
@@ -213,10 +213,10 @@
     void getLoadAndStoreMemrefSet(DenseSet<Value *> *loadAndStoreMemrefSet) {
       llvm::SmallDenseSet<Value *, 2> loadMemrefs;
       for (auto *loadOpInst : loads) {
-        loadMemrefs.insert(cast<LoadOp>(loadOpInst).getMemRef());
+        loadMemrefs.insert(cast<AffineLoadOp>(loadOpInst).getMemRef());
       }
       for (auto *storeOpInst : stores) {
-        auto *memref = cast<StoreOp>(storeOpInst).getMemRef();
+        auto *memref = cast<AffineStoreOp>(storeOpInst).getMemRef();
         if (loadMemrefs.count(memref) > 0)
           loadAndStoreMemrefSet->insert(memref);
       }
@@ -308,7 +308,7 @@
   bool writesToLiveInOrEscapingMemrefs(unsigned id) {
     Node *node = getNode(id);
     for (auto *storeOpInst : node->stores) {
-      auto *memref = cast<StoreOp>(storeOpInst).getMemRef();
+      auto *memref = cast<AffineStoreOp>(storeOpInst).getMemRef();
       auto *op = memref->getDefiningOp();
       // Return true if 'memref' is a block argument.
       if (!op)
@@ -333,7 +333,7 @@
     Node *node = getNode(id);
     for (auto *storeOpInst : node->stores) {
       // Return false if there exist out edges from 'id' on 'memref'.
-      if (getOutEdgeCount(id, cast<StoreOp>(storeOpInst).getMemRef()) > 0)
+      if (getOutEdgeCount(id, cast<AffineStoreOp>(storeOpInst).getMemRef()) > 0)
         return false;
     }
     return true;
@@ -658,28 +658,28 @@
       Node node(nextNodeId++, &op);
       for (auto *opInst : collector.loadOpInsts) {
         node.loads.push_back(opInst);
-        auto *memref = cast<LoadOp>(opInst).getMemRef();
+        auto *memref = cast<AffineLoadOp>(opInst).getMemRef();
         memrefAccesses[memref].insert(node.id);
       }
       for (auto *opInst : collector.storeOpInsts) {
         node.stores.push_back(opInst);
-        auto *memref = cast<StoreOp>(opInst).getMemRef();
+        auto *memref = cast<AffineStoreOp>(opInst).getMemRef();
         memrefAccesses[memref].insert(node.id);
       }
       forToNodeMap[&op] = node.id;
       nodes.insert({node.id, node});
-    } else if (auto loadOp = dyn_cast<LoadOp>(op)) {
+    } else if (auto loadOp = dyn_cast<AffineLoadOp>(op)) {
       // Create graph node for top-level load op.
       Node node(nextNodeId++, &op);
       node.loads.push_back(&op);
-      auto *memref = cast<LoadOp>(op).getMemRef();
+      auto *memref = cast<AffineLoadOp>(op).getMemRef();
       memrefAccesses[memref].insert(node.id);
       nodes.insert({node.id, node});
-    } else if (auto storeOp = dyn_cast<StoreOp>(op)) {
+    } else if (auto storeOp = dyn_cast<AffineStoreOp>(op)) {
       // Create graph node for top-level store op.
       Node node(nextNodeId++, &op);
       node.stores.push_back(&op);
-      auto *memref = cast<StoreOp>(op).getMemRef();
+      auto *memref = cast<AffineStoreOp>(op).getMemRef();
       memrefAccesses[memref].insert(node.id);
       nodes.insert({node.id, node});
     } else if (op.getNumRegions() != 0) {
@@ -740,7 +740,7 @@
   dstLoads->clear();
   SmallVector<Operation *, 4> srcLoadsToKeep;
   for (auto *load : *srcLoads) {
-    if (cast<LoadOp>(load).getMemRef() == memref)
+    if (cast<AffineLoadOp>(load).getMemRef() == memref)
       dstLoads->push_back(load);
     else
       srcLoadsToKeep.push_back(load);
@@ -861,7 +861,7 @@
   // Builder to create constants at the top level.
   OpBuilder top(forInst->getFunction().getBody());
   // Create new memref type based on slice bounds.
-  auto *oldMemRef = cast<StoreOp>(srcStoreOpInst).getMemRef();
+  auto *oldMemRef = cast<AffineStoreOp>(srcStoreOpInst).getMemRef();
   auto oldMemRefType = oldMemRef->getType().cast<MemRefType>();
   unsigned rank = oldMemRefType.getRank();
 
@@ -976,7 +976,7 @@
   // Gather all memrefs from 'srcNode' store ops.
   DenseSet<Value *> storeMemrefs;
   for (auto *storeOpInst : srcNode->stores) {
-    storeMemrefs.insert(cast<StoreOp>(storeOpInst).getMemRef());
+    storeMemrefs.insert(cast<AffineStoreOp>(storeOpInst).getMemRef());
   }
   // Return false if any of the following are true:
   // *) 'srcNode' writes to a live in/out memref other than 'memref'.
@@ -1461,7 +1461,7 @@
       DenseSet<Value *> visitedMemrefs;
       while (!loads.empty()) {
         // Get memref of load on top of the stack.
-        auto *memref = cast<LoadOp>(loads.back()).getMemRef();
+        auto *memref = cast<AffineLoadOp>(loads.back()).getMemRef();
         if (visitedMemrefs.count(memref) > 0)
           continue;
         visitedMemrefs.insert(memref);
@@ -1517,7 +1517,7 @@
           // Gather 'dstNode' store ops to 'memref'.
           SmallVector<Operation *, 2> dstStoreOpInsts;
           for (auto *storeOpInst : dstNode->stores)
-            if (cast<StoreOp>(storeOpInst).getMemRef() == memref)
+            if (cast<AffineStoreOp>(storeOpInst).getMemRef() == memref)
               dstStoreOpInsts.push_back(storeOpInst);
 
           unsigned bestDstLoopDepth;
@@ -1562,7 +1562,7 @@
               // Create private memref for 'memref' in 'dstAffineForOp'.
               SmallVector<Operation *, 4> storesForMemref;
               for (auto *storeOpInst : sliceCollector.storeOpInsts) {
-                if (cast<StoreOp>(storeOpInst).getMemRef() == memref)
+                if (cast<AffineStoreOp>(storeOpInst).getMemRef() == memref)
                   storesForMemref.push_back(storeOpInst);
               }
               assert(storesForMemref.size() == 1);
@@ -1584,7 +1584,7 @@
             // Add new load ops to current Node load op list 'loads' to
             // continue fusing based on new operands.
             for (auto *loadOpInst : dstLoopCollector.loadOpInsts) {
-              auto *loadMemRef = cast<LoadOp>(loadOpInst).getMemRef();
+              auto *loadMemRef = cast<AffineLoadOp>(loadOpInst).getMemRef();
               if (visitedMemrefs.count(loadMemRef) == 0)
                 loads.push_back(loadOpInst);
             }
@@ -1742,7 +1742,7 @@
       // Check that all stores are to the same memref.
       DenseSet<Value *> storeMemrefs;
       for (auto *storeOpInst : sibNode->stores) {
-        storeMemrefs.insert(cast<StoreOp>(storeOpInst).getMemRef());
+        storeMemrefs.insert(cast<AffineStoreOp>(storeOpInst).getMemRef());
       }
       if (storeMemrefs.size() != 1)
         return false;
@@ -1753,7 +1753,7 @@
     auto fn = dstNode->op->getFunction();
     for (unsigned i = 0, e = fn.getNumArguments(); i != e; ++i) {
       for (auto *user : fn.getArgument(i)->getUsers()) {
-        if (auto loadOp = dyn_cast<LoadOp>(user)) {
+        if (auto loadOp = dyn_cast<AffineLoadOp>(user)) {
           // Gather loops surrounding 'use'.
           SmallVector<AffineForOp, 4> loops;
           getLoopIVs(*user, &loops);
diff --git a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
index c4c1184..48e97f4 100644
--- a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
+++ b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
@@ -70,7 +70,7 @@
 
 static bool isMemRefDereferencingOp(Operation &op) {
   // TODO(asabne): Support DMA Ops.
-  if (isa<LoadOp>(op) || isa<StoreOp>(op)) {
+  if (isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op)) {
     return true;
   }
   return false;
@@ -94,23 +94,25 @@
     // If the body of a predicated region has a for loop, we don't hoist the
     // 'affine.if'.
     return false;
-  } else if (isa<DmaStartOp>(op) || isa<DmaWaitOp>(op)) {
+  } else if (isa<AffineDmaStartOp>(op) || isa<AffineDmaWaitOp>(op)) {
     // TODO(asabne): Support DMA ops.
     return false;
   } else if (!isa<ConstantOp>(op)) {
     if (isMemRefDereferencingOp(op)) {
-      Value *memref = isa<LoadOp>(op) ? cast<LoadOp>(op).getMemRef()
-                                      : cast<StoreOp>(op).getMemRef();
+      Value *memref = isa<AffineLoadOp>(op)
+                          ? cast<AffineLoadOp>(op).getMemRef()
+                          : cast<AffineStoreOp>(op).getMemRef();
       for (auto *user : memref->getUsers()) {
         // If this memref has a user that is a DMA, give up because these
         // operations write to this memref.
-        if (isa<DmaStartOp>(op) || isa<DmaWaitOp>(op)) {
+        if (isa<AffineDmaStartOp>(op) || isa<AffineDmaWaitOp>(op)) {
           return false;
         }
         // If the memref used by the load/store is used in a store elsewhere in
         // the loop nest, we do not hoist. Similarly, if the memref used in a
         // load is also being stored too, we do not hoist the load.
-        if (isa<StoreOp>(user) || (isa<LoadOp>(user) && isa<StoreOp>(op))) {
+        if (isa<AffineStoreOp>(user) ||
+            (isa<AffineLoadOp>(user) && isa<AffineStoreOp>(op))) {
           if (&op != user) {
             SmallVector<AffineForOp, 8> userIVs;
             getLoopIVs(*user, &userIVs);
diff --git a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp
index 1208e2f..13a53e3 100644
--- a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp
+++ b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp
@@ -22,6 +22,7 @@
 // SSA scalars live out of 'affine.for'/'affine.if' statements is available.
 //===----------------------------------------------------------------------===//
 
+#include "mlir/AffineOps/AffineOps.h"
 #include "mlir/Analysis/AffineAnalysis.h"
 #include "mlir/Analysis/Dominance.h"
 #include "mlir/Analysis/Utils.h"
@@ -72,7 +73,7 @@
 struct MemRefDataFlowOpt : public FunctionPass<MemRefDataFlowOpt> {
   void runOnFunction() override;
 
-  void forwardStoreToLoad(LoadOp loadOp);
+  void forwardStoreToLoad(AffineLoadOp loadOp);
 
   // A list of memref's that are potentially dead / could be eliminated.
   SmallPtrSet<Value *, 4> memrefsToErase;
@@ -93,7 +94,7 @@
 
 // This is a straightforward implementation not optimized for speed. Optimize
 // this in the future if needed.
-void MemRefDataFlowOpt::forwardStoreToLoad(LoadOp loadOp) {
+void MemRefDataFlowOpt::forwardStoreToLoad(AffineLoadOp loadOp) {
   Operation *lastWriteStoreOp = nullptr;
   Operation *loadOpInst = loadOp.getOperation();
 
@@ -103,7 +104,7 @@
   SmallVector<Operation *, 8> storeOps;
   unsigned minSurroundingLoops = getNestingDepth(*loadOpInst);
   for (auto *user : loadOp.getMemRef()->getUsers()) {
-    auto storeOp = dyn_cast<StoreOp>(user);
+    auto storeOp = dyn_cast<AffineStoreOp>(user);
     if (!storeOp)
       continue;
     auto *storeOpInst = storeOp.getOperation();
@@ -202,7 +203,7 @@
     return;
 
   // Perform the actual store to load forwarding.
-  Value *storeVal = cast<StoreOp>(lastWriteStoreOp).getValueToStore();
+  Value *storeVal = cast<AffineStoreOp>(lastWriteStoreOp).getValueToStore();
   loadOp.getResult()->replaceAllUsesWith(storeVal);
   // Record the memref for a later sweep to optimize away.
   memrefsToErase.insert(loadOp.getMemRef());
@@ -225,7 +226,8 @@
   memrefsToErase.clear();
 
   // Walk all load's and perform load/store forwarding.
-  f.walk<LoadOp>([&](LoadOp loadOp) { forwardStoreToLoad(loadOp); });
+  f.walk<AffineLoadOp>(
+      [&](AffineLoadOp loadOp) { forwardStoreToLoad(loadOp); });
 
   // Erase all load op's whose results were replaced with store fwd'ed ones.
   for (auto *loadOp : loadOpsToErase) {
@@ -243,7 +245,7 @@
       // could still erase it if the call had no side-effects.
       continue;
     if (llvm::any_of(memref->getUsers(), [&](Operation *ownerInst) {
-          return (!isa<StoreOp>(ownerInst) && !isa<DeallocOp>(ownerInst));
+          return (!isa<AffineStoreOp>(ownerInst) && !isa<DeallocOp>(ownerInst));
         }))
       continue;
 
diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp
index d0e0d18..af456c3 100644
--- a/mlir/lib/Transforms/PipelineDataTransfer.cpp
+++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp
@@ -57,10 +57,9 @@
 // Temporary utility: will be replaced when DmaStart/DmaFinish abstract op's are
 // added.  TODO(b/117228571)
 static unsigned getTagMemRefPos(Operation &dmaInst) {
-  assert(isa<DmaStartOp>(dmaInst) || isa<DmaWaitOp>(dmaInst));
-  if (isa<DmaStartOp>(dmaInst)) {
-    // Second to last operand.
-    return dmaInst.getNumOperands() - 2;
+  assert(isa<AffineDmaStartOp>(dmaInst) || isa<AffineDmaWaitOp>(dmaInst));
+  if (auto dmaStartOp = dyn_cast<AffineDmaStartOp>(dmaInst)) {
+    return dmaStartOp.getTagMemRefOperandIndex();
   }
   // First operand for a dma finish operation.
   return 0;
@@ -151,7 +150,7 @@
 }
 
 // Check if tags of the dma start op and dma wait op match.
-static bool checkTagMatch(DmaStartOp startOp, DmaWaitOp waitOp) {
+static bool checkTagMatch(AffineDmaStartOp startOp, AffineDmaWaitOp waitOp) {
   if (startOp.getTagMemRef() != waitOp.getTagMemRef())
     return false;
   auto startIndices = startOp.getTagIndices();
@@ -179,9 +178,9 @@
     SmallVectorImpl<std::pair<Operation *, Operation *>> &startWaitPairs) {
 
   // Collect outgoing DMA operations - needed to check for dependences below.
-  SmallVector<DmaStartOp, 4> outgoingDmaOps;
+  SmallVector<AffineDmaStartOp, 4> outgoingDmaOps;
   for (auto &op : *forOp.getBody()) {
-    auto dmaStartOp = dyn_cast<DmaStartOp>(op);
+    auto dmaStartOp = dyn_cast<AffineDmaStartOp>(op);
     if (dmaStartOp && dmaStartOp.isSrcMemorySpaceFaster())
       outgoingDmaOps.push_back(dmaStartOp);
   }
@@ -189,11 +188,11 @@
   SmallVector<Operation *, 4> dmaStartInsts, dmaFinishInsts;
   for (auto &op : *forOp.getBody()) {
     // Collect DMA finish operations.
-    if (isa<DmaWaitOp>(op)) {
+    if (isa<AffineDmaWaitOp>(op)) {
       dmaFinishInsts.push_back(&op);
       continue;
     }
-    auto dmaStartOp = dyn_cast<DmaStartOp>(op);
+    auto dmaStartOp = dyn_cast<AffineDmaStartOp>(op);
     if (!dmaStartOp)
       continue;
 
@@ -234,8 +233,8 @@
   // For each start operation, we look for a matching finish operation.
   for (auto *dmaStartInst : dmaStartInsts) {
     for (auto *dmaFinishInst : dmaFinishInsts) {
-      if (checkTagMatch(cast<DmaStartOp>(dmaStartInst),
-                        cast<DmaWaitOp>(dmaFinishInst))) {
+      if (checkTagMatch(cast<AffineDmaStartOp>(dmaStartInst),
+                        cast<AffineDmaWaitOp>(dmaFinishInst))) {
         startWaitPairs.push_back({dmaStartInst, dmaFinishInst});
         break;
       }
@@ -273,7 +272,7 @@
   for (auto &pair : startWaitPairs) {
     auto *dmaStartInst = pair.first;
     Value *oldMemRef = dmaStartInst->getOperand(
-        cast<DmaStartOp>(dmaStartInst).getFasterMemPos());
+        cast<AffineDmaStartOp>(dmaStartInst).getFasterMemPos());
     if (!doubleBuffer(oldMemRef, forOp)) {
       // Normally, double buffering should not fail because we already checked
       // that there are no uses outside.
@@ -324,7 +323,7 @@
   DenseMap<Operation *, unsigned> instShiftMap;
   for (auto &pair : startWaitPairs) {
     auto *dmaStartInst = pair.first;
-    assert(isa<DmaStartOp>(dmaStartInst));
+    assert(isa<AffineDmaStartOp>(dmaStartInst));
     instShiftMap[dmaStartInst] = 0;
     // Set shifts for DMA start op's affine operand computation slices to 0.
     SmallVector<AffineApplyOp, 4> sliceOps;
diff --git a/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp b/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
index 93503d1..a87883d 100644
--- a/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
@@ -46,10 +46,10 @@
 static void getLoadAndStoreMemRefAccesses(Operation *opA,
                                           DenseMap<Value *, bool> &values) {
   opA->walk([&](Operation *op) {
-    if (auto loadOp = dyn_cast<LoadOp>(op)) {
+    if (auto loadOp = dyn_cast<AffineLoadOp>(op)) {
       if (values.count(loadOp.getMemRef()) == 0)
         values[loadOp.getMemRef()] = false;
-    } else if (auto storeOp = dyn_cast<StoreOp>(op)) {
+    } else if (auto storeOp = dyn_cast<AffineStoreOp>(op)) {
       values[storeOp.getMemRef()] = true;
     }
   });
@@ -60,10 +60,10 @@
 // Returns false otherwise.
 static bool isDependentLoadOrStoreOp(Operation *op,
                                      DenseMap<Value *, bool> &values) {
-  if (auto loadOp = dyn_cast<LoadOp>(op)) {
+  if (auto loadOp = dyn_cast<AffineLoadOp>(op)) {
     return values.count(loadOp.getMemRef()) > 0 &&
            values[loadOp.getMemRef()] == true;
-  } else if (auto storeOp = dyn_cast<StoreOp>(op)) {
+  } else if (auto storeOp = dyn_cast<AffineStoreOp>(op)) {
     return values.count(storeOp.getMemRef()) > 0;
   }
   return false;
@@ -115,7 +115,7 @@
     opX->walk([&](Operation *op) {
       if (lastDepOp)
         return;
-      if (isa<LoadOp>(op) || isa<StoreOp>(op)) {
+      if (isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op)) {
         if (isDependentLoadOrStoreOp(op, values))
           lastDepOp = opX;
         return;
@@ -185,7 +185,7 @@
                      SmallVectorImpl<Operation *> &loadAndStoreOps) {
   bool hasIfOp = false;
   forOp.getOperation()->walk([&](Operation *op) {
-    if (isa<LoadOp>(op) || isa<StoreOp>(op))
+    if (isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op))
       loadAndStoreOps.push_back(op);
     else if (isa<AffineIfOp>(op))
       hasIfOp = true;
@@ -442,7 +442,7 @@
     unsigned storeCount = 0;
     llvm::SmallDenseSet<Value *, 4> storeMemrefs;
     srcForOp.getOperation()->walk([&](Operation *op) {
-      if (auto storeOp = dyn_cast<StoreOp>(op)) {
+      if (auto storeOp = dyn_cast<AffineStoreOp>(op)) {
         storeMemrefs.insert(storeOp.getMemRef());
         ++storeCount;
       }
@@ -454,7 +454,7 @@
     // 'insertPointParent'.
     for (auto *value : storeMemrefs) {
       for (auto *user : value->getUsers()) {
-        if (auto loadOp = dyn_cast<LoadOp>(user)) {
+        if (auto loadOp = dyn_cast<AffineLoadOp>(user)) {
           SmallVector<AffineForOp, 4> loops;
           // Check if any loop in loop nest surrounding 'user' is
           // 'insertPointParent'.
diff --git a/mlir/lib/Transforms/Utils/Utils.cpp b/mlir/lib/Transforms/Utils/Utils.cpp
index 876f44b..16f4eff 100644
--- a/mlir/lib/Transforms/Utils/Utils.cpp
+++ b/mlir/lib/Transforms/Utils/Utils.cpp
@@ -38,12 +38,24 @@
 // Temporary utility: will be replaced when this is modeled through
 // side-effects/op traits. TODO(b/117228571)
 static bool isMemRefDereferencingOp(Operation &op) {
-  if (isa<LoadOp>(op) || isa<StoreOp>(op) || isa<DmaStartOp>(op) ||
-      isa<DmaWaitOp>(op))
+  if (isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op) ||
+      isa<AffineDmaStartOp>(op) || isa<AffineDmaWaitOp>(op))
     return true;
   return false;
 }
 
+/// Return the AffineMapAttr associated with memory 'op' on 'memref'.
+static NamedAttribute getAffineMapAttrForMemRef(Operation *op, Value *memref) {
+  if (auto loadOp = dyn_cast<AffineLoadOp>(op))
+    return loadOp.getAffineMapAttrForMemRef(memref);
+  else if (auto storeOp = dyn_cast<AffineStoreOp>(op))
+    return storeOp.getAffineMapAttrForMemRef(memref);
+  else if (auto dmaStart = dyn_cast<AffineDmaStartOp>(op))
+    return dmaStart.getAffineMapAttrForMemRef(memref);
+  assert(isa<AffineDmaWaitOp>(op));
+  return cast<AffineDmaWaitOp>(op).getAffineMapAttrForMemRef(memref);
+}
+
 bool mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
                                     ArrayRef<Value *> extraIndices,
                                     AffineMap indexRemap,
@@ -111,24 +123,32 @@
       assert(i < opInst->getNumOperands() && "operand guaranteed to be found");
       return i;
     };
-    unsigned memRefOperandPos = getMemRefOperandPos();
-
-    // Construct the new operation using this memref.
-    OperationState state(opInst->getLoc(), opInst->getName());
-    state.setOperandListToResizable(opInst->hasResizableOperandsList());
-    state.operands.reserve(opInst->getNumOperands() + extraIndices.size());
-    // Insert the non-memref operands.
-    state.operands.append(opInst->operand_begin(),
-                          opInst->operand_begin() + memRefOperandPos);
-    state.operands.push_back(newMemRef);
 
     OpBuilder builder(opInst);
-    for (auto *extraIndex : extraIndices) {
-      assert(extraIndex->getDefiningOp()->getNumResults() == 1 &&
-             "single result op's expected to generate these indices");
-      assert((isValidDim(extraIndex) || isValidSymbol(extraIndex)) &&
-             "invalid memory op index");
-      state.operands.push_back(extraIndex);
+    unsigned memRefOperandPos = getMemRefOperandPos();
+    NamedAttribute oldMapAttrPair =
+        getAffineMapAttrForMemRef(opInst, oldMemRef);
+    AffineMap oldMap = oldMapAttrPair.second.cast<AffineMapAttr>().getValue();
+    unsigned oldMapNumInputs = oldMap.getNumInputs();
+    SmallVector<Value *, 4> oldMapOperands(
+        opInst->operand_begin() + memRefOperandPos + 1,
+        opInst->operand_begin() + memRefOperandPos + 1 + oldMapNumInputs);
+    SmallVector<Value *, 4> affineApplyOps;
+
+    // Apply 'oldMemRefOperands = oldMap(oldMapOperands)'.
+    SmallVector<Value *, 4> oldMemRefOperands;
+    oldMemRefOperands.reserve(oldMemRefRank);
+    if (oldMap != builder.getMultiDimIdentityMap(oldMap.getNumDims())) {
+      for (auto resultExpr : oldMap.getResults()) {
+        auto singleResMap = builder.getAffineMap(
+            oldMap.getNumDims(), oldMap.getNumSymbols(), resultExpr);
+        auto afOp = builder.create<AffineApplyOp>(opInst->getLoc(),
+                                                  singleResMap, oldMapOperands);
+        oldMemRefOperands.push_back(afOp);
+        affineApplyOps.push_back(afOp);
+      }
+    } else {
+      oldMemRefOperands.append(oldMapOperands.begin(), oldMapOperands.end());
     }
 
     // Construct new indices as a remap of the old ones if a remapping has been
@@ -137,28 +157,70 @@
     SmallVector<Value *, 4> remapOperands;
     remapOperands.reserve(extraOperands.size() + oldMemRefRank);
     remapOperands.append(extraOperands.begin(), extraOperands.end());
-    remapOperands.append(opInst->operand_begin() + memRefOperandPos + 1,
-                         opInst->operand_begin() + memRefOperandPos + 1 +
-                             oldMemRefRank);
+    remapOperands.append(oldMemRefOperands.begin(), oldMemRefOperands.end());
+
+    SmallVector<Value *, 4> remapOutputs;
+    remapOutputs.reserve(oldMemRefRank);
+
     if (indexRemap &&
         indexRemap != builder.getMultiDimIdentityMap(indexRemap.getNumDims())) {
-
       // Remapped indices.
       for (auto resultExpr : indexRemap.getResults()) {
         auto singleResMap = builder.getAffineMap(
             indexRemap.getNumDims(), indexRemap.getNumSymbols(), resultExpr);
         auto afOp = builder.create<AffineApplyOp>(opInst->getLoc(),
                                                   singleResMap, remapOperands);
-        state.operands.push_back(afOp);
+        remapOutputs.push_back(afOp);
+        affineApplyOps.push_back(afOp);
       }
     } else {
       // No remapping specified.
-      state.operands.append(remapOperands.begin(), remapOperands.end());
+      remapOutputs.append(remapOperands.begin(), remapOperands.end());
     }
 
+    SmallVector<Value *, 4> newMapOperands;
+    newMapOperands.reserve(newMemRefRank);
+
+    // Prepend 'extraIndices' in 'newMapOperands'.
+    for (auto *extraIndex : extraIndices) {
+      assert(extraIndex->getDefiningOp()->getNumResults() == 1 &&
+             "single result op's expected to generate these indices");
+      assert((isValidDim(extraIndex) || isValidSymbol(extraIndex)) &&
+             "invalid memory op index");
+      newMapOperands.push_back(extraIndex);
+    }
+
+    // Append 'remapOutputs' to 'newMapOperands'.
+    newMapOperands.append(remapOutputs.begin(), remapOutputs.end());
+
+    // Create new fully composed AffineMap for new op to be created.
+    assert(newMapOperands.size() == newMemRefRank);
+    auto newMap = builder.getMultiDimIdentityMap(newMemRefRank);
+    // TODO(b/136262594) Avoid creating/deleting temporary AffineApplyOps here.
+    fullyComposeAffineMapAndOperands(&newMap, &newMapOperands);
+    newMap = simplifyAffineMap(newMap);
+    canonicalizeMapAndOperands(&newMap, &newMapOperands);
+    // Remove any affine.apply's that became dead as a result of composition.
+    for (auto *value : affineApplyOps)
+      if (value->use_empty())
+        value->getDefiningOp()->erase();
+
+    // Construct the new operation using this memref.
+    OperationState state(opInst->getLoc(), opInst->getName());
+    state.setOperandListToResizable(opInst->hasResizableOperandsList());
+    state.operands.reserve(opInst->getNumOperands() + extraIndices.size());
+    // Insert the non-memref operands.
+    state.operands.append(opInst->operand_begin(),
+                          opInst->operand_begin() + memRefOperandPos);
+    // Insert the new memref value.
+    state.operands.push_back(newMemRef);
+
+    // Insert the new memref map operands.
+    state.operands.append(newMapOperands.begin(), newMapOperands.end());
+
     // Insert the remaining operands unmodified.
     state.operands.append(opInst->operand_begin() + memRefOperandPos + 1 +
-                              oldMemRefRank,
+                              oldMapNumInputs,
                           opInst->operand_end());
 
     // Result types don't change. Both memref's are of the same elemental type.
@@ -166,9 +228,15 @@
     for (auto *result : opInst->getResults())
       state.types.push_back(result->getType());
 
-    // Attributes also do not change.
-    state.attributes.append(opInst->getAttrs().begin(),
-                            opInst->getAttrs().end());
+    // Add attribute for 'newMap', other Attributes do not change.
+    auto newMapAttr = builder.getAffineMapAttr(newMap);
+    for (auto namedAttr : opInst->getAttrs()) {
+      if (namedAttr.first == oldMapAttrPair.first) {
+        state.attributes.push_back({namedAttr.first, newMapAttr});
+      } else {
+        state.attributes.push_back(namedAttr);
+      }
+    }
 
     // Create the new operation.
     auto *repOp = builder.createOperation(state);
diff --git a/mlir/lib/Transforms/Vectorize.cpp b/mlir/lib/Transforms/Vectorize.cpp
index 3fca26b..4aff2ac 100644
--- a/mlir/lib/Transforms/Vectorize.cpp
+++ b/mlir/lib/Transforms/Vectorize.cpp
@@ -44,7 +44,6 @@
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
 
 using namespace mlir;
 
@@ -717,7 +716,8 @@
   // do not necessarily belong to use-def chains starting from loads (e.g
   // storing a constant), we need to handle them in a post-pass.
   DenseSet<Operation *> terminals;
-  // Checks that the type of `op` is StoreOp and adds it to the terminals set.
+  // Checks that the type of `op` is AffineStoreOp and adds it to the terminals
+  // set.
   void registerTerminal(Operation *op);
 
 private:
@@ -739,14 +739,14 @@
   vectorizedSet.insert(value);
   vectorizationMap.insert(std::make_pair(key, value));
   registerReplacement(key->getResult(0), value->getResult(0));
-  if (isa<LoadOp>(key)) {
+  if (isa<AffineLoadOp>(key)) {
     assert(roots.count(key) == 0 && "root was already inserted previously");
     roots.insert(key);
   }
 }
 
 void VectorizationState::registerTerminal(Operation *op) {
-  assert(isa<StoreOp>(op) && "terminal must be a StoreOp");
+  assert(isa<AffineStoreOp>(op) && "terminal must be a AffineStoreOp");
   assert(terminals.count(op) == 0 &&
          "terminal was already inserted previously");
   terminals.insert(op);
@@ -766,16 +766,31 @@
   replacementMap.insert(std::make_pair(key, value));
 }
 
+// Apply 'map' with 'mapOperands' returning resulting values in 'results'.
+static void computeMemoryOpIndices(Operation *op, AffineMap map,
+                                   ArrayRef<Value *> mapOperands,
+                                   SmallVectorImpl<Value *> &results) {
+  OpBuilder builder(op);
+  for (auto resultExpr : map.getResults()) {
+    auto singleResMap =
+        builder.getAffineMap(map.getNumDims(), map.getNumSymbols(), resultExpr);
+    auto afOp =
+        builder.create<AffineApplyOp>(op->getLoc(), singleResMap, mapOperands);
+    results.push_back(afOp);
+  }
+}
+
 ////// TODO(ntv): Hoist to a VectorizationMaterialize.cpp when appropriate. ////
 
 /// Handles the vectorization of load and store MLIR operations.
 ///
-/// LoadOp operations are the roots of the vectorizeNonTerminals call. They are
-/// vectorized immediately. The resulting vector.transfer_read is immediately
-/// registered to replace all uses of the LoadOp in this pattern's scope.
+/// AffineLoadOp operations are the roots of the vectorizeNonTerminals call.
+/// They are vectorized immediately. The resulting vector.transfer_read is
+/// immediately registered to replace all uses of the AffineLoadOp in this
+/// pattern's scope.
 ///
-/// StoreOp are the terminals of the vectorizeNonTerminals call. They need to be
-/// vectorized late once all the use-def chains have been traversed.
+/// AffineStoreOp are the terminals of the vectorizeNonTerminals call. They
+/// need to be vectorized late once all the use-def chains have been traversed.
 /// Additionally, they may have ssa-values operands which come from outside the
 /// scope of the current pattern.
 /// Such special cases force us to delay the vectorization of the stores until
@@ -798,17 +813,26 @@
   // identity subset of AffineMap and do not change layout.
   // TODO(ntv): increase the expressiveness power of vector.transfer operations
   // as needed by various targets.
-  if (isa<LoadOp>(opInst)) {
+  if (auto load = dyn_cast<AffineLoadOp>(opInst)) {
+    OpBuilder b(opInst);
+    SmallVector<Value *, 4> mapOperands(load.getIndices());
+    SmallVector<Value *, 8> indices;
+    indices.reserve(load.getMemRefType().getRank());
+    if (load.getAffineMap() !=
+        b.getMultiDimIdentityMap(load.getMemRefType().getRank())) {
+      computeMemoryOpIndices(opInst, load.getAffineMap(), mapOperands, indices);
+    } else {
+      indices.append(load.getIndices().begin(), load.getIndices().end());
+    }
     auto permutationMap =
-        makePermutationMap(opInst, state->strategy->loopToVectorDim);
+        makePermutationMap(opInst, indices, state->strategy->loopToVectorDim);
     if (!permutationMap)
       return LogicalResult::Failure;
     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
     LLVM_DEBUG(permutationMap.print(dbgs()));
-    OpBuilder b(opInst);
     auto transfer = b.create<VectorTransferReadOp>(
         opInst->getLoc(), vectorType, memoryOp.getMemRef(),
-        map(makePtrDynCaster<Value>(), memoryOp.getIndices()), permutationMap);
+        map(makePtrDynCaster<Value>(), indices), permutationMap);
     state->registerReplacement(opInst, transfer.getOperation());
   } else {
     state->registerTerminal(opInst);
@@ -837,8 +861,8 @@
   loadAndStores.match(loop.getOperation(), &loadAndStoresMatches);
   for (auto ls : loadAndStoresMatches) {
     auto *opInst = ls.getMatchedOperation();
-    auto load = dyn_cast<LoadOp>(opInst);
-    auto store = dyn_cast<StoreOp>(opInst);
+    auto load = dyn_cast<AffineLoadOp>(opInst);
+    auto store = dyn_cast<AffineStoreOp>(opInst);
     LLVM_DEBUG(opInst->print(dbgs()));
     LogicalResult result =
         load ? vectorizeRootOrTerminal(loop.getInductionVar(), load, state)
@@ -1002,21 +1026,32 @@
 static Operation *vectorizeOneOperation(Operation *opInst,
                                         VectorizationState *state) {
   // Sanity checks.
-  assert(!isa<LoadOp>(opInst) &&
+  assert(!isa<AffineLoadOp>(opInst) &&
          "all loads must have already been fully vectorized independently");
   assert(!isa<VectorTransferReadOp>(opInst) &&
          "vector.transfer_read cannot be further vectorized");
   assert(!isa<VectorTransferWriteOp>(opInst) &&
          "vector.transfer_write cannot be further vectorized");
 
-  if (auto store = dyn_cast<StoreOp>(opInst)) {
+  if (auto store = dyn_cast<AffineStoreOp>(opInst)) {
+    OpBuilder b(opInst);
     auto *memRef = store.getMemRef();
     auto *value = store.getValueToStore();
     auto *vectorValue = vectorizeOperand(value, opInst, state);
-    auto indices = map(makePtrDynCaster<Value>(), store.getIndices());
-    OpBuilder b(opInst);
+
+    SmallVector<Value *, 4> mapOperands(store.getIndices());
+    SmallVector<Value *, 8> indices;
+    indices.reserve(store.getMemRefType().getRank());
+    if (store.getAffineMap() !=
+        b.getMultiDimIdentityMap(store.getMemRefType().getRank())) {
+      computeMemoryOpIndices(opInst, store.getAffineMap(), mapOperands,
+                             indices);
+    } else {
+      indices.append(store.getIndices().begin(), store.getIndices().end());
+    }
+
     auto permutationMap =
-        makePermutationMap(opInst, state->strategy->loopToVectorDim);
+        makePermutationMap(opInst, indices, state->strategy->loopToVectorDim);
     if (!permutationMap)
       return nullptr;
     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
@@ -1025,7 +1060,7 @@
         opInst->getLoc(), vectorValue, memRef, indices, permutationMap);
     auto *res = transfer.getOperation();
     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorized store: " << *res);
-    // "Terminals" (i.e. StoreOps) are erased on the spot.
+    // "Terminals" (i.e. AffineStoreOps) are erased on the spot.
     opInst->erase();
     return res;
   }
@@ -1156,9 +1191,9 @@
   // From now on, any error triggers the scope guard above.
   //////////////////////////////////////////////////////////////////////////////
   // 1. Vectorize all the loops matched by the pattern, recursively.
-  // This also vectorizes the roots (LoadOp) as well as registers the terminals
-  // (StoreOp) for post-processing vectorization (we need to wait for all
-  // use-def chains into them to be vectorized first).
+  // This also vectorizes the roots (AffineLoadOp) as well as registers the
+  // terminals (AffineStoreOp) for post-processing vectorization (we need to
+  // wait for all use-def chains into them to be vectorized first).
   if (failed(vectorizeLoopsAndLoadsRecursively(m, &state))) {
     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ failed root vectorizeLoop");
     return guard.failure();
diff --git a/mlir/test/EDSC/builder-api-test.cpp b/mlir/test/EDSC/builder-api-test.cpp
index 788857c..2f01085 100644
--- a/mlir/test/EDSC/builder-api-test.cpp
+++ b/mlir/test/EDSC/builder-api-test.cpp
@@ -545,6 +545,8 @@
 }
 
 // Inject an EDSC-constructed computation to exercise 2-d vectorization.
+// TODO(ntv,andydavis) Convert EDSC to use AffineLoad/Store.
+/*
 TEST_FUNC(vectorize_2d) {
   using namespace edsc;
   using namespace edsc::intrinsics;
@@ -572,17 +574,23 @@
   });
   ret();
 
-  // CHECK-LABEL: func @vectorize_2d
-  //  CHECK-NEXT: %[[M:.*]] = dim %arg0, 0 : memref<?x?x?xf32>
-  //  CHECK-NEXT: %[[N:.*]] = dim %arg0, 1 : memref<?x?x?xf32>
-  //  CHECK-NEXT: %[[P:.*]] = dim %arg0, 2 : memref<?x?x?xf32>
-  //  CHECK-NEXT: affine.for %i0 = 0 to (d0) -> (d0)(%[[M]]) {
-  //  CHECK-NEXT:   affine.for %i1 = 0 to (d0) -> (d0)(%[[N]]) step 4 {
-  //  CHECK-NEXT:     affine.for %i2 = 0 to (d0) -> (d0)(%[[P]]) step 4 {
-  //  CHECK-NEXT:       %[[vA:.*]] = "vector.transfer_read"(%arg1, %i0, %i1, %i2) {permutation_map = (d0, d1, d2) -> (d1, d2)} : (memref<?x?x?xf32>, index, index, index) -> vector<4x4xf32>
-  //  CHECK-NEXT:       %[[vB:.*]] =  "vector.transfer_read"(%arg0, %i0, %i1, %i2) {permutation_map = (d0, d1,  d2) -> (d1, d2)} : (memref<?x?x?xf32>, index, index, index) -> vector<4x4xf32>
-  //  CHECK-NEXT:       %[[vRES:.*]] = addf %[[vB]], %[[vA]] : vector<4x4xf32>
-  //  CHECK-NEXT:       "vector.transfer_write"(%[[vRES:.*]], %arg2, %i0, %i1, %i2) {permutation_map = (d0, d1, d2) -> (d1, d2)} : (vector<4x4xf32>, memref<?x?x?xf32>, index, index, index) -> ()
+  // xCHECK-LABEL: func @vectorize_2d
+  //  xCHECK-NEXT: %[[M:.*]] = dim %arg0, 0 : memref<?x?x?xf32>
+  //  xCHECK-NEXT: %[[N:.*]] = dim %arg0, 1 : memref<?x?x?xf32>
+  //  xCHECK-NEXT: %[[P:.*]] = dim %arg0, 2 : memref<?x?x?xf32>
+  //  xCHECK-NEXT: affine.for %i0 = 0 to (d0) -> (d0)(%[[M]]) {
+  //  xCHECK-NEXT:   affine.for %i1 = 0 to (d0) -> (d0)(%[[N]]) step 4 {
+  //  xCHECK-NEXT:     affine.for %i2 = 0 to (d0) -> (d0)(%[[P]]) step 4 {
+  //  xCHECK-NEXT:       %[[vA:.*]] = "vector.transfer_read"(%arg1, %i0, %i1,
+%i2) {permutation_map = (d0, d1, d2) -> (d1, d2)} : (memref<?x?x?xf32>, index,
+index, index) -> vector<4x4xf32>
+  //  xCHECK-NEXT:       %[[vB:.*]] =  "vector.transfer_read"(%arg0, %i0, %i1,
+%i2) {permutation_map = (d0, d1,  d2) -> (d1, d2)} : (memref<?x?x?xf32>, index,
+index, index) -> vector<4x4xf32>
+  //  xCHECK-NEXT:       %[[vRES:.*]] = addf %[[vB]], %[[vA]] : vector<4x4xf32>
+  //  xCHECK-NEXT:       "vector.transfer_write"(%[[vRES:.*]], %arg2, %i0, %i1,
+%i2) {permutation_map = (d0, d1, d2) -> (d1, d2)} : (vector<4x4xf32>,
+memref<?x?x?xf32>, index, index, index) -> ()
   // clang-format on
 
   mlir::PassManager pm;
@@ -594,7 +602,7 @@
     f.print(llvm::outs());
   f.erase();
 }
-
+*/
 int main() {
   RUN_TESTS();
   return 0;
diff --git a/mlir/test/Transforms/Vectorize/materialize_vectors_1d_to_1d.mlir b/mlir/test/Transforms/Vectorize/materialize_vectors_1d_to_1d.mlir
index 6d365ed..88a62ea 100644
--- a/mlir/test/Transforms/Vectorize/materialize_vectors_1d_to_1d.mlir
+++ b/mlir/test/Transforms/Vectorize/materialize_vectors_1d_to_1d.mlir
@@ -37,7 +37,7 @@
   affine.for %i0 = 0 to %M {
     affine.for %i1 = 0 to %N {
       // non-scoped %f1
-      store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
+      affine.store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
     }
   }
   // 4x unroll (jammed by construction).
@@ -63,7 +63,7 @@
   affine.for %i2 = 0 to %M {
     affine.for %i3 = 0 to %N {
       // non-scoped %f2
-      store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
+      affine.store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
     }
   }
   // 4x unroll (jammed by construction).
@@ -112,14 +112,14 @@
   //
   affine.for %i4 = 0 to %M {
     affine.for %i5 = 0 to %N {
-      %a5 = load %A[%i4, %i5] : memref<?x?xf32, 0>
-      %b5 = load %B[%i4, %i5] : memref<?x?xf32, 0>
+      %a5 = affine.load %A[%i4, %i5] : memref<?x?xf32, 0>
+      %b5 = affine.load %B[%i4, %i5] : memref<?x?xf32, 0>
       %s5 = addf %a5, %b5 : f32
-      store %s5, %C[%i4, %i5] : memref<?x?xf32, 0>
+      affine.store %s5, %C[%i4, %i5] : memref<?x?xf32, 0>
     }
   }
   %c7 = constant 7 : index
   %c42 = constant 42 : index
-  %res = load %C[%c7, %c42] : memref<?x?xf32, 0>
+  %res = affine.load %C[%c7, %c42] : memref<?x?xf32, 0>
   return %res : f32
 }
diff --git a/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_1d.mlir b/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_1d.mlir
index 28059f3..93e42ec 100644
--- a/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_1d.mlir
+++ b/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_1d.mlir
@@ -44,7 +44,7 @@
   affine.for %i0 = 0 to %M {
     affine.for %i1 = 0 to %N {
       // non-scoped %f1
-      store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
+      affine.store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
     }
   }
   // (3x2)x unroll (jammed by construction).
@@ -55,7 +55,7 @@
     affine.for %i3 = 0 to %N {
       // non-scoped %f2
       // CHECK does (3x4)x unrolling.
-      store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
+      affine.store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
     }
   }
   // (3x2)x unroll (jammed by construction).
@@ -124,14 +124,14 @@
   //
   affine.for %i4 = 0 to %M {
     affine.for %i5 = 0 to %N {
-      %a5 = load %A[%i4, %i5] : memref<?x?xf32, 0>
-      %b5 = load %B[%i4, %i5] : memref<?x?xf32, 0>
+      %a5 = affine.load %A[%i4, %i5] : memref<?x?xf32, 0>
+      %b5 = affine.load %B[%i4, %i5] : memref<?x?xf32, 0>
       %s5 = addf %a5, %b5 : f32
-      store %s5, %C[%i4, %i5] : memref<?x?xf32, 0>
+      affine.store %s5, %C[%i4, %i5] : memref<?x?xf32, 0>
     }
   }
   %c7 = constant 7 : index
   %c42 = constant 42 : index
-  %res = load %C[%c7, %c42] : memref<?x?xf32, 0>
+  %res = affine.load %C[%c7, %c42] : memref<?x?xf32, 0>
   return %res : f32
 }
diff --git a/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_2d.mlir b/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_2d.mlir
index 29b99f8..ad6452f 100644
--- a/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_2d.mlir
+++ b/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_2d.mlir
@@ -27,7 +27,7 @@
   affine.for %i0 = 0 to %M {
     affine.for %i1 = 0 to %N {
       // non-scoped %f1
-      store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
+      affine.store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
     }
   }
   // 2x unroll (jammed by construction).
@@ -45,7 +45,7 @@
   affine.for %i2 = 0 to %M {
     affine.for %i3 = 0 to %N {
       // non-scoped %f2
-      store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
+      affine.store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
     }
   }
   // 2x unroll (jammed by construction).
@@ -74,14 +74,14 @@
   //
   affine.for %i4 = 0 to %M {
     affine.for %i5 = 0 to %N {
-      %a5 = load %A[%i4, %i5] : memref<?x?xf32, 0>
-      %b5 = load %B[%i4, %i5] : memref<?x?xf32, 0>
+      %a5 = affine.load %A[%i4, %i5] : memref<?x?xf32, 0>
+      %b5 = affine.load %B[%i4, %i5] : memref<?x?xf32, 0>
       %s5 = addf %a5, %b5 : f32
-      store %s5, %C[%i4, %i5] : memref<?x?xf32, 0>
+      affine.store %s5, %C[%i4, %i5] : memref<?x?xf32, 0>
     }
   }
   %c7 = constant 7 : index
   %c42 = constant 42 : index
-  %res = load %C[%c7, %c42] : memref<?x?xf32, 0>
+  %res = affine.load %C[%c7, %c42] : memref<?x?xf32, 0>
   return %res : f32
 }
diff --git a/mlir/test/Transforms/Vectorize/vectorize_1d.mlir b/mlir/test/Transforms/Vectorize/vectorize_1d.mlir
index 71f92b9..48b0ca6 100644
--- a/mlir/test/Transforms/Vectorize/vectorize_1d.mlir
+++ b/mlir/test/Transforms/Vectorize/vectorize_1d.mlir
@@ -23,9 +23,11 @@
    %cst0 = constant 0 : index
 //
 // CHECK: for {{.*}} step 128
-// CHECK-NEXT: {{.*}} = vector.transfer_read %arg0[%[[C0]], %[[C0]]] {permutation_map = #[[map_proj_d0d1_0]]} : memref<?x?xf32>, vector<128xf32>
+// CHECK-NEXT: %3 = affine.apply #map0(%[[C0]])
+// CHECK-NEXT: %4 = affine.apply #map0(%[[C0]])
+// CHECK-NEXT: {{.*}} = vector.transfer_read %arg0[%3, %4] {permutation_map = #[[map_proj_d0d1_0]]} : memref<?x?xf32>, vector<128xf32>
    affine.for %i0 = 0 to %M { // vectorized due to scalar -> vector
-     %a0 = load %A[%cst0, %cst0] : memref<?x?xf32>
+     %a0 = affine.load %A[%cst0, %cst0] : memref<?x?xf32>
    }
    return
 }
@@ -42,11 +44,9 @@
    %cst0 = constant 0 : index
 //
 // CHECK:for [[IV3:%[a-zA-Z0-9]+]] = 0 to [[ARG_M]] step 128
-// CHECK-NEXT:   %[[APP3:[a-zA-Z0-9]+]] = affine.apply {{.*}}[[IV3]]
-// CHECK-NEXT:   {{.*}} = vector.transfer_read %arg0[%[[C0]], %[[APP3]]] {permutation_map = #[[map_proj_d0d1_d1]]} : memref<?x?xf32>, vector<128xf32>
+// CHECK-NEXT:   {{.*}} = vector.transfer_read %arg0[%c0, %i0] {permutation_map = #[[map_proj_d0d1_d1]]} : memref<?x?xf32>, vector<128xf32>
    affine.for %i3 = 0 to %M { // vectorized
-     %r3 = affine.apply (d0) -> (d0) (%i3)
-     %a3 = load %A[%cst0, %r3] : memref<?x?xf32>
+     %a3 = affine.load %A[%cst0, %i3] : memref<?x?xf32>
    }
    return
 }
@@ -64,14 +64,12 @@
 //
 // CHECK:for [[IV8:%[i0-9]+]] = 0 to [[ARG_M]] step 128
 // CHECK-NEXT:   for [[IV9:%[i0-9]*]] = 0 to [[ARG_N]] {
-// CHECK-NEXT:   %[[APP9_0:[0-9]+]] = affine.apply {{.*}}([[IV8]], [[IV9]])
-// CHECK-NEXT:   %[[APP9_1:[0-9]+]] = affine.apply {{.*}}([[IV8]], [[IV9]])
+// CHECK-NEXT:   %[[APP9_0:[0-9]+]] = affine.apply {{.*}}([[IV9]], [[IV8]])
+// CHECK-NEXT:   %[[APP9_1:[0-9]+]] = affine.apply {{.*}}([[IV9]], [[IV8]])
 // CHECK-NEXT:   {{.*}} = vector.transfer_read %arg0[%[[APP9_0]], %[[APP9_1]]] {permutation_map = #[[map_proj_d0d1_d1]]} : memref<?x?xf32>, vector<128xf32>
    affine.for %i8 = 0 to %M { // vectorized
      affine.for %i9 = 0 to %N {
-       %r90 = affine.apply (d0, d1) -> (d1) (%i8, %i9)
-       %r91 = affine.apply (d0, d1) -> (d0 + d1) (%i8, %i9)
-       %a9 = load %A[%r90, %r91] : memref<?x?xf32>
+       %a9 = affine.load %A[%i9, %i8 + %i9] : memref<?x?xf32>
      }
    }
    return
@@ -89,7 +87,7 @@
       // CHECK: [[C1:%.*]] = constant dense<1.000000e+00> : vector<128xf32>
       // CHECK: vector.transfer_write [[C1]], {{.*}} {permutation_map = #[[map_proj_d0d1_d1]]} : vector<128xf32>, memref<?x?xf32>
       // non-scoped %f1
-      store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
+      affine.store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
     }
   }
   affine.for %i2 = 0 to %M {
@@ -97,7 +95,7 @@
       // CHECK: [[C3:%.*]] = constant dense<2.000000e+00> : vector<128xf32>
       // CHECK: vector.transfer_write [[C3]], {{.*}} {permutation_map = #[[map_proj_d0d1_d1]]} : vector<128xf32>, memref<?x?xf32>
       // non-scoped %f2
-      store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
+      affine.store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
     }
   }
   affine.for %i4 = 0 to %M {
@@ -111,8 +109,8 @@
       // CHECK: [[S7:%.*]] = addf [[S5]], [[SPLAT2]] : vector<128xf32>
       // CHECK: [[S8:%.*]] = addf [[S7]], [[S6]] : vector<128xf32>
       // CHECK: vector.transfer_write [[S8]], {{.*}} {permutation_map = #[[map_proj_d0d1_d1]]} : vector<128xf32>, memref<?x?xf32>
-      %a5 = load %A[%i4, %i5] : memref<?x?xf32, 0>
-      %b5 = load %B[%i4, %i5] : memref<?x?xf32, 0>
+      %a5 = affine.load %A[%i4, %i5] : memref<?x?xf32, 0>
+      %b5 = affine.load %B[%i4, %i5] : memref<?x?xf32, 0>
       %s5 = addf %a5, %b5 : f32
       // non-scoped %f1
       %s6 = addf %s5, %f1 : f32
@@ -120,12 +118,12 @@
       %s7 = addf %s5, %f2 : f32
       // diamond dependency.
       %s8 = addf %s7, %s6 : f32
-      store %s8, %C[%i4, %i5] : memref<?x?xf32, 0>
+      affine.store %s8, %C[%i4, %i5] : memref<?x?xf32, 0>
     }
   }
   %c7 = constant 7 : index
   %c42 = constant 42 : index
-  %res = load %C[%c7, %c42] : memref<?x?xf32, 0>
+  %res = affine.load %C[%c7, %c42] : memref<?x?xf32, 0>
   return %res : f32
 }
 
@@ -142,7 +140,7 @@
 //
 // CHECK:for {{.*}} [[ARG_M]] {
    affine.for %i1 = 0 to %M { // not vectorized
-     %a1 = load %A[%i1, %i1] : memref<?x?xf32>
+     %a1 = affine.load %A[%i1, %i1] : memref<?x?xf32>
    }
    return
 }
@@ -160,8 +158,7 @@
 //
 // CHECK:   affine.for %i{{[0-9]*}} = 0 to [[ARG_M]] {
    affine.for %i2 = 0 to %M { // not vectorized, would vectorize with --test-fastest-varying=1
-     %r2 = affine.apply (d0) -> (d0) (%i2)
-     %a2 = load %A[%r2, %cst0] : memref<?x?xf32>
+     %a2 = affine.load %A[%i2, %cst0] : memref<?x?xf32>
    }
    return
 }
@@ -179,14 +176,10 @@
 //
 // CHECK:for [[IV4:%[i0-9]+]] = 0 to [[ARG_M]] step 128 {
 // CHECK-NEXT:   for [[IV5:%[i0-9]*]] = 0 to [[ARG_N]] {
-// CHECK-NEXT:   %[[APP50:[0-9]+]] = affine.apply {{.*}}([[IV4]], [[IV5]])
-// CHECK-NEXT:   %[[APP51:[0-9]+]] = affine.apply {{.*}}([[IV4]], [[IV5]])
-// CHECK-NEXT:   {{.*}} = vector.transfer_read %arg0[%[[APP50]], %[[APP51]]] {permutation_map = #[[map_proj_d0d1_d1]]} : memref<?x?xf32>, vector<128xf32>
+// CHECK-NEXT:   {{.*}} = vector.transfer_read %arg0[%i1, %i0] {permutation_map = #[[map_proj_d0d1_d1]]} : memref<?x?xf32>, vector<128xf32>
    affine.for %i4 = 0 to %M { // vectorized
      affine.for %i5 = 0 to %N { // not vectorized, would vectorize with --test-fastest-varying=1
-       %r50 = affine.apply (d0, d1) -> (d1) (%i4, %i5)
-       %r51 = affine.apply (d0, d1) -> (d0) (%i4, %i5)
-       %a5 = load %A[%r50, %r51] : memref<?x?xf32>
+       %a5 = affine.load %A[%i5, %i4] : memref<?x?xf32>
      }
    }
    return
@@ -207,9 +200,7 @@
 // CHECK-NEXT:   for [[IV7:%[i0-9]*]] = 0 to [[ARG_N]] {
    affine.for %i6 = 0 to %M { // not vectorized, would vectorize with --test-fastest-varying=1
      affine.for %i7 = 0 to %N { // not vectorized, can never vectorize
-       %r70 = affine.apply (d0, d1) -> (d1 + d0) (%i6, %i7)
-       %r71 = affine.apply (d0, d1) -> (d0) (%i6, %i7)
-       %a7 = load %A[%r70, %r71] : memref<?x?xf32>
+       %a7 = affine.load %A[%i6 + %i7, %i6] : memref<?x?xf32>
      }
    }
    return
@@ -230,12 +221,8 @@
 // CHECK:   for [[IV11:%[i0-9]*]] = 0 to %{{[0-9]*}} {
    affine.for %i10 = 0 to %M { // not vectorized, need per load transposes
      affine.for %i11 = 0 to %N { // not vectorized, need per load transposes
-       %r11_0 = affine.apply (d0, d1) -> (d0) (%i10, %i11)
-       %r11_1 = affine.apply (d0, d1) -> (d1) (%i10, %i11)
-       %a11 = load %A[%r11_0, %r11_1] : memref<?x?xf32>
-       %r12_0 = affine.apply (d0, d1) -> (d1) (%i10, %i11)
-       %r12_1 = affine.apply (d0, d1) -> (d0) (%i10, %i11)
-       store %a11, %A[%r12_0, %r12_1] : memref<?x?xf32>
+       %a11 = affine.load %A[%i10, %i11] : memref<?x?xf32>
+       affine.store %a11, %A[%i11, %i10] : memref<?x?xf32>
      }
    }
    return
@@ -258,10 +245,7 @@
    affine.for %i12 = 0 to %M { // not vectorized, can never vectorize
      affine.for %i13 = 0 to %N { // not vectorized, can never vectorize
        affine.for %i14 = 0 to %P { // vectorized
-         %r14_0 = affine.apply (d0, d1, d2) -> (d1) (%i12, %i13, %i14)
-         %r14_1 = affine.apply (d0, d1, d2) -> (d0 + d1) (%i12, %i13, %i14)
-         %r14_2 = affine.apply (d0, d1, d2) -> (d0 + d2) (%i12, %i13, %i14)
-         %a14 = load %B[%r14_0, %r14_1, %r14_2] : memref<?x?x?xf32>
+         %a14 = affine.load %B[%i13, %i12 + %i13, %i12 + %i14] : memref<?x?x?xf32>
        }
      }
    }
@@ -282,7 +266,7 @@
 // CHECK:  affine.for %i{{[0-9]*}} = 0 to %{{[0-9]*}} {
    affine.for %i16 = 0 to %M { // not vectorized, can't vectorize a vector load
      %a16 = alloc(%M) : memref<?xvector<2xf32>>
-     %l16 = load %a16[%i16] : memref<?xvector<2xf32>>
+     %l16 = affine.load %a16[%i16] : memref<?xvector<2xf32>>
    }
    return
 }
@@ -300,10 +284,12 @@
 //
 // CHECK: affine.for %i{{[0-9]*}} = 0 to %{{[0-9]*}} {
 // CHECK:   for [[IV18:%[a-zA-Z0-9]+]] = 0 to [[ARG_M]] step 128
-// CHECK:     {{.*}} = vector.transfer_read %arg0[%[[C0]], %[[C0]]] {permutation_map = #[[map_proj_d0d1_0]]} : memref<?x?xf32>, vector<128xf32>
+// CHECK:     %3 = affine.apply #map0(%c0)
+// CHECK:     %4 = affine.apply #map0(%c0)
+// CHECK:     {{.*}} = vector.transfer_read %arg0[%3, %4] {permutation_map = #[[map_proj_d0d1_0]]} : memref<?x?xf32>, vector<128xf32>
    affine.for %i17 = 0 to %M { // not vectorized, the 1-D pattern that matched %i18 in DFS post-order prevents vectorizing %i17
      affine.for %i18 = 0 to %M { // vectorized due to scalar -> vector
-       %a18 = load %A[%cst0, %cst0] : memref<?x?xf32>
+       %a18 = affine.load %A[%cst0, %cst0] : memref<?x?xf32>
      }
    }
    return
@@ -322,10 +308,12 @@
 //
 // CHECK: affine.for %i{{[0-9]*}} = 0 to %{{[0-9]*}} {
 // CHECK:   for [[IV18:%[a-zA-Z0-9]+]] = 0 to [[ARG_M]] step 128
-// CHECK:     {{.*}} = vector.transfer_read %arg0[%[[C0]], %[[C0]]] {permutation_map = #[[map_proj_d0d1_0]]} : memref<?x?xf32>, vector<128xf32>
+// CHECK:      %3 = affine.apply #map0(%c0)
+// CHECK-NEXT: %4 = affine.apply #map0(%c0)
+// CHECK-NEXT: {{.*}} = vector.transfer_read %arg0[%3, %4] {permutation_map = #[[map_proj_d0d1_0]]} : memref<?x?xf32>, vector<128xf32>
    affine.for %i17 = 0 to %M { // not vectorized, the 1-D pattern that matched %i18 in DFS post-order prevents vectorizing %i17
      affine.for %i18 = 0 to %M { // vectorized due to scalar -> vector
-       %a18 = load %A[%cst0, %cst0] : memref<?x?xf32>
+       %a18 = affine.load %A[%cst0, %cst0] : memref<?x?xf32>
      }
    }
    return
@@ -345,7 +333,7 @@
 // CHECK:  affine.for %i{{[0-9]*}} = 0 to %{{[0-9]*}} {
    affine.for %i15 = 0 to %M { // not vectorized due to condition below
      affine.if #set0(%i15) {
-       %a15 = load %A[%cst0, %cst0] : memref<?x?xf32>
+       %a15 = affine.load %A[%cst0, %cst0] : memref<?x?xf32>
      }
    }
    return
@@ -357,13 +345,13 @@
   %N = dim %A, 0 : memref<?x?xf32>
   affine.for %i = 0 to %N {
 // CHECK-NOT: vector
-    %a = load %A[%i, %i] : memref<?x?xf32> // not vectorized
+    %a = affine.load %A[%i, %i] : memref<?x?xf32> // not vectorized
     affine.for %j = 0 to %N {
-      %b = load %A[%i, %j] : memref<?x?xf32> // may be vectorized
+      %b = affine.load %A[%i, %j] : memref<?x?xf32> // may be vectorized
 // CHECK-NOT: vector
       %c = addf %a, %b : f32 // not vectorized because %a wasn't
 // CHECK-NOT: vector
-      store %c, %C[%i, %j] : memref<?x?xf32> // not vectorized because %c wasn't
+      affine.store %c, %C[%i, %j] : memref<?x?xf32> // not vectorized because %c wasn't
     }
   }
   return
@@ -375,10 +363,9 @@
   %N = dim %A, 0 : memref<?xf32>
   affine.for %i = 0 to %N {
     // CHECK-NOT: vector
-    %a = load %A[%i] : memref<?xf32>
+    %a = affine.load %A[%i] : memref<?xf32>
     // CHECK-NOT: vector
-    %ip1 = affine.apply (d0)->(d0 + 1) (%i)
-    store %a, %A[%ip1] : memref<?xf32>
+    affine.store %a, %A[%i + 1] : memref<?xf32>
   }
   return
 }
diff --git a/mlir/test/Transforms/Vectorize/vectorize_2d.mlir b/mlir/test/Transforms/Vectorize/vectorize_2d.mlir
index b4b1611..a44dc54 100644
--- a/mlir/test/Transforms/Vectorize/vectorize_2d.mlir
+++ b/mlir/test/Transforms/Vectorize/vectorize_2d.mlir
@@ -26,7 +26,7 @@
    affine.for %i0 = 0 to %M {
      affine.for %i1 = 0 to %N {
        affine.for %i2 = 0 to %P {
-         %a2 = load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
+         %a2 = affine.load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
        }
      }
    }
@@ -38,7 +38,7 @@
    affine.for %i3 = 0 to %M {
      affine.for %i4 = 0 to %N {
        affine.for %i5 = 0 to %P {
-         %a5 = load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
+         %a5 = affine.load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
        }
      }
    }
@@ -56,7 +56,7 @@
       // CHECK: [[C1:%.*]] = constant dense<1.000000e+00> : vector<32x256xf32>
       // CHECK: vector.transfer_write [[C1]], {{.*}} {permutation_map = #[[map_id2]]} : vector<32x256xf32>, memref<?x?xf32>
       // non-scoped %f1
-      store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
+      affine.store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
     }
   }
   affine.for %i2 = 0 to %M {
@@ -64,7 +64,7 @@
       // CHECK: [[C3:%.*]] = constant dense<2.000000e+00> : vector<32x256xf32>
       // CHECK: vector.transfer_write [[C3]], {{.*}} {permutation_map = #[[map_id2]]}  : vector<32x256xf32>, memref<?x?xf32>
       // non-scoped %f2
-      store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
+      affine.store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
     }
   }
   affine.for %i4 = 0 to %M {
@@ -79,8 +79,8 @@
       // CHECK: [[S8:%.*]] = addf [[S7]], [[S6]] : vector<32x256xf32>
       // CHECK: vector.transfer_write [[S8]], {{.*}} {permutation_map = #[[map_id2]]} : vector<32x256xf32>, memref<?x?xf32>
       //
-      %a5 = load %A[%i4, %i5] : memref<?x?xf32, 0>
-      %b5 = load %B[%i4, %i5] : memref<?x?xf32, 0>
+      %a5 = affine.load %A[%i4, %i5] : memref<?x?xf32, 0>
+      %b5 = affine.load %B[%i4, %i5] : memref<?x?xf32, 0>
       %s5 = addf %a5, %b5 : f32
       // non-scoped %f1
       %s6 = addf %s5, %f1 : f32
@@ -88,12 +88,12 @@
       %s7 = addf %s5, %f2 : f32
       // diamond dependency.
       %s8 = addf %s7, %s6 : f32
-      store %s8, %C[%i4, %i5] : memref<?x?xf32, 0>
+      affine.store %s8, %C[%i4, %i5] : memref<?x?xf32, 0>
     }
   }
   %c7 = constant 7 : index
   %c42 = constant 42 : index
-  %res = load %C[%c7, %c42] : memref<?x?xf32, 0>
+  %res = affine.load %C[%c7, %c42] : memref<?x?xf32, 0>
   return %res : f32
 }
 
@@ -114,7 +114,7 @@
   affine.for %i0 = (d0) -> (d0)(%c0) to (d0) -> (d0)(%M) {
     affine.for %i1 = (d0) -> (d0)(%c0) to (d0) -> (d0)(%N) {
       %cst = constant 0.000000e+00 : f32
-      store %cst, %arg2[%i0, %i1] : memref<?x?xf32>
+      affine.store %cst, %arg2[%i0, %i1] : memref<?x?xf32>
     }
   }
   //      VECT:  affine.for %[[I2:.*]] = #[[map_id1]](%[[C0]]) to #[[map_id1]](%[[M]]) step 4 {
@@ -129,12 +129,12 @@
   affine.for %i2 = (d0) -> (d0)(%c0) to (d0) -> (d0)(%M) {
     affine.for %i3 = (d0) -> (d0)(%c0) to (d0) -> (d0)(%N) {
       affine.for %i4 = (d0) -> (d0)(%c0) to (d0) -> (d0)(%K) {
-        %6 = load %arg1[%i4, %i3] : memref<?x?xf32>
-        %7 = load %arg0[%i2, %i4] : memref<?x?xf32>
+        %6 = affine.load %arg1[%i4, %i3] : memref<?x?xf32>
+        %7 = affine.load %arg0[%i2, %i4] : memref<?x?xf32>
         %8 = mulf %7, %6 : f32
-        %9 = load %arg2[%i2, %i3] : memref<?x?xf32>
+        %9 = affine.load %arg2[%i2, %i3] : memref<?x?xf32>
         %10 = addf %9, %8 : f32
-        store %10, %arg2[%i2, %i3] : memref<?x?xf32>
+        affine.store %10, %arg2[%i2, %i3] : memref<?x?xf32>
       }
     }
   }
diff --git a/mlir/test/Transforms/Vectorize/vectorize_3d.mlir b/mlir/test/Transforms/Vectorize/vectorize_3d.mlir
index 34db225..98d8ebc 100644
--- a/mlir/test/Transforms/Vectorize/vectorize_3d.mlir
+++ b/mlir/test/Transforms/Vectorize/vectorize_3d.mlir
@@ -18,7 +18,7 @@
        affine.for %i0 = 0 to %0 {
          affine.for %i1 = 0 to %1 {
            affine.for %i2 = 0 to %2 {
-             %a2 = load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
+             %a2 = affine.load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
            }
          }
        }
diff --git a/mlir/test/Transforms/Vectorize/vectorize_outer_loop_2d.mlir b/mlir/test/Transforms/Vectorize/vectorize_outer_loop_2d.mlir
index 00f76d1..b1257d1 100644
--- a/mlir/test/Transforms/Vectorize/vectorize_outer_loop_2d.mlir
+++ b/mlir/test/Transforms/Vectorize/vectorize_outer_loop_2d.mlir
@@ -14,7 +14,7 @@
    affine.for %i0 = 0 to %M {
      affine.for %i1 = 0 to %N {
        affine.for %i2 = 0 to %P {
-         %a2 = load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
+         %a2 = affine.load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
        }
      }
    }
@@ -26,7 +26,7 @@
    affine.for %i3 = 0 to %M {
      affine.for %i4 = 0 to %N {
        affine.for %i5 = 0 to %P {
-         %a5 = load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
+         %a5 = affine.load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
        }
      }
    }
diff --git a/mlir/test/Transforms/Vectorize/vectorize_outer_loop_transpose_2d.mlir b/mlir/test/Transforms/Vectorize/vectorize_outer_loop_transpose_2d.mlir
index 813fef0..7d30162 100644
--- a/mlir/test/Transforms/Vectorize/vectorize_outer_loop_transpose_2d.mlir
+++ b/mlir/test/Transforms/Vectorize/vectorize_outer_loop_transpose_2d.mlir
@@ -15,7 +15,7 @@
    affine.for %i0 = 0 to %M {
      affine.for %i1 = 0 to %N {
        affine.for %i2 = 0 to %P {
-         %a2 = load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
+         %a2 = affine.load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
        }
      }
    }
@@ -26,7 +26,7 @@
    affine.for %i3 = 0 to %M {
      affine.for %i4 = 0 to %N {
        affine.for %i5 = 0 to %P {
-         %a5 = load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
+         %a5 = affine.load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
        }
      }
    }
@@ -49,15 +49,15 @@
    affine.for %i0 = 0 to %0 {
      affine.for %i1 = 0 to %1 {
        affine.for %i2 = 0 to %2 {
-         %a2 = load %A[%i2, %i1, %i0] : memref<?x?x?xf32>
+         %a2 = affine.load %A[%i2, %i1, %i0] : memref<?x?x?xf32>
        }
      }
      affine.for %i3 = 0 to %1 {
        affine.for %i4 = 0 to %2 {
-         %a4 = load %A[%i3, %i4, %i0] : memref<?x?x?xf32>
+         %a4 = affine.load %A[%i3, %i4, %i0] : memref<?x?x?xf32>
        }
        affine.for %i5 = 0 to %2 {
-         %a5 = load %A[%i3, %i5, %i0] : memref<?x?x?xf32>
+         %a5 = affine.load %A[%i3, %i5, %i0] : memref<?x?x?xf32>
        }
      }
    }
diff --git a/mlir/test/Transforms/Vectorize/vectorize_transpose_2d.mlir b/mlir/test/Transforms/Vectorize/vectorize_transpose_2d.mlir
index 99b9bde..f33e434 100644
--- a/mlir/test/Transforms/Vectorize/vectorize_transpose_2d.mlir
+++ b/mlir/test/Transforms/Vectorize/vectorize_transpose_2d.mlir
@@ -15,7 +15,7 @@
   affine.for %i0 = 0 to %M {
      affine.for %i1 = 0 to %N {
        affine.for %i2 = 0 to %P {
-         %a2 = load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
+         %a2 = affine.load %A[%i0, %i1, %i2] : memref<?x?x?xf32>
        }
      }
    }
@@ -26,7 +26,7 @@
    affine.for %i3 = 0 to %M {
      affine.for %i4 = 0 to %N {
        affine.for %i5 = 0 to %P {
-         %a5 = load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
+         %a5 = affine.load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
        }
      }
    }
@@ -49,15 +49,15 @@
    affine.for %i0 = 0 to %0 {
      affine.for %i1 = 0 to %1 {
        affine.for %i2 = 0 to %2 {
-         %a2 = load %A[%i2, %i1, %i0] : memref<?x?x?xf32>
+         %a2 = affine.load %A[%i2, %i1, %i0] : memref<?x?x?xf32>
        }
      }
      affine.for %i3 = 0 to %1 {
        affine.for %i4 = 0 to %2 {
-         %a4 = load %A[%i3, %i4, %i0] : memref<?x?x?xf32>
+         %a4 = affine.load %A[%i3, %i4, %i0] : memref<?x?x?xf32>
        }
        affine.for %i5 = 0 to %2 {
-         %a5 = load %A[%i3, %i5, %i0] : memref<?x?x?xf32>
+         %a5 = affine.load %A[%i3, %i5, %i0] : memref<?x?x?xf32>
        }
      }
    }
diff --git a/mlir/test/Transforms/dma-generate.mlir b/mlir/test/Transforms/dma-generate.mlir
index 9840580..6275f2e 100644
--- a/mlir/test/Transforms/dma-generate.mlir
+++ b/mlir/test/Transforms/dma-generate.mlir
@@ -13,8 +13,8 @@
 // -----
 
 // Index of the buffer for the second DMA is remapped.
-// CHECK-DAG: [[MAP_MINUS_256:#map[0-9]+]] = (d0) -> (d0 - 256)
 // CHECK-DAG: [[MAP_PLUS_256:#map[0-9]+]] = (d0) -> (d0 + 256)
+// CHECK-DAG: [[MAP0:#map[0-9]+]] = (d0) -> (d0)
 
 // CHECK-LABEL: func @loop_nest_1d() {
 func @loop_nest_1d() {
@@ -27,22 +27,23 @@
   // Tag for first DMA.
   // CHECK:  %4 = alloc() : memref<1xi32>
   // First DMA transfer.
-  // CHECK:  dma_start %0[%c0], %3[%c0], %c256_1, %4[%c0] : memref<256xf32>, memref<256xf32, 2>, memref<1xi32>
-  // CHECK:  dma_wait %4[%c0], %c256_1 : memref<1xi32>
+  // CHECK:  affine.dma_start %0[%c0], %3[%c0], %4[%c0], %c256_1 : memref<256xf32>, memref<256xf32, 2>, memref<1xi32>
+  // CHECK:  affine.dma_wait %4[%c0], %c256_1 : memref<1xi32>
   // Second DMA buffer.
   // CHECK:  %5 = alloc() : memref<256xf32, 2>
   // Tag for second DMA.
   // CHECK:  %6 = alloc() : memref<1xi32>
   // Second DMA transfer.
-  // CHECK:       dma_start %1[%c256], %5[%c0], %c256_0, %6[%c0] : memref<512xf32>, memref<256xf32, 2>, memref<1xi32>
-  // CHECK-NEXT:  dma_wait %6[%c0], %c256_0 : memref<1xi32>
+  // CHECK:       affine.dma_start %1[%c256], %5[%c0], %6[%c0], %c256_0 : memref<512xf32>, memref<256xf32, 2>, memref<1xi32>
+  // CHECK-NEXT:  affine.dma_wait %6[%c0], %c256_0 : memref<1xi32>
   // CHECK: affine.for %i0 = 0 to 256 {
-      // CHECK-NEXT: %7 = load %3[%i0] : memref<256xf32, 2>
+      // CHECK-NEXT: %7 = affine.load %3[%i0] : memref<256xf32, 2>
       // CHECK:      %8 = affine.apply [[MAP_PLUS_256]](%i0)
-      // CHECK:      %9 = affine.apply [[MAP_MINUS_256]](%8)
-      // CHECK-NEXT: %10 = load %5[%9] : memref<256xf32, 2>
+      // Buffer for '%B' in faster memref space is smaller size: 256xf32
+      // Affine map for 'affine.load %5' is composed: %i0 + 256 - 256 = %i0.
+      // CHECK-NEXT: %9 = affine.load %5[%i0] : memref<256xf32, 2>
       // Already in faster memory space.
-      // CHECK:     %11 = load %2[%i0] : memref<256xf32, 2>
+      // CHECK:     %10 = affine.load %2[%i0] : memref<256xf32, 2>
   // CHECK-NEXT: }
   // CHECK-NEXT: dealloc %6 : memref<1xi32>
   // CHECK-NEXT: dealloc %5 : memref<256xf32, 2>
@@ -50,10 +51,10 @@
   // CHECK-NEXT: dealloc %3 : memref<256xf32, 2>
   // CHECK-NEXT: return
   affine.for %i = 0 to 256 {
-    load %A[%i] : memref<256 x f32>
+    affine.load %A[%i] : memref<256 x f32>
     %idx = affine.apply (d0) -> (d0 + 256)(%i)
-    load %B[%idx] : memref<512 x f32>
-    load %F[%i] : memref<256 x f32, 2>
+    affine.load %B[%idx] : memref<512 x f32>
+    affine.load %F[%i] : memref<256 x f32, 2>
   }
   return
 }
@@ -70,41 +71,41 @@
 // CHECK-DAG:  [[TAGC:%[0-9]+]] = alloc() : memref<1xi32>
 // CHECK-DAG:  [[TAGC_W:%[0-9]+]] = alloc() : memref<1xi32>
 // INCOMING DMA for B
-// CHECK-DAG:  dma_start %arg1[%c0, %c0], [[BUFB]][%c0, %c0], %c16384_2, [[TAGB]][%c0] : memref<512x32xf32>, memref<512x32xf32, 2>, memref<1xi32>
-// CHECK-DAG:  dma_wait [[TAGB]][%c0], %c16384_2 : memref<1xi32>
+// CHECK-DAG:  affine.dma_start %arg1[%c0, %c0], [[BUFB]][%c0, %c0], [[TAGB]][%c0], %c16384_2 : memref<512x32xf32>, memref<512x32xf32, 2>, memref<1xi32>
+// CHECK-DAG:  affine.dma_wait [[TAGB]][%c0], %c16384_2 : memref<1xi32>
 // INCOMING DMA for A.
-// CHECK-DAG:  dma_start %arg0[%c0, %c0], [[BUFA]][%c0, %c0], %c16384_1, [[TAGA]][%c0] : memref<512x32xf32>, memref<512x32xf32, 2>, memref<1xi32>
-// CHECK-DAG:  dma_wait [[TAGA]][%c0], %c16384_1 : memref<1xi32>
+// CHECK-DAG:  affine.dma_start %arg0[%c0, %c0], [[BUFA]][%c0, %c0], [[TAGA]][%c0], %c16384_1 : memref<512x32xf32>, memref<512x32xf32, 2>, memref<1xi32>
+// CHECK-DAG:  affine.dma_wait [[TAGA]][%c0], %c16384_1 : memref<1xi32>
 // INCOMING DMA for C.
-// CHECK-DAG:  dma_start %arg2[%c0, %c0], [[BUFC]][%c0, %c0], %c16384_0, [[TAGC]][%c0] : memref<512x32xf32>, memref<512x32xf32, 2>, memref<1xi32>
-// CHECK-DAG:  dma_wait [[TAGC]][%c0], %c16384_0 : memref<1xi32>
+// CHECK-DAG:  affine.dma_start %arg2[%c0, %c0], [[BUFC]][%c0, %c0], [[TAGC]][%c0], %c16384_0 : memref<512x32xf32>, memref<512x32xf32, 2>, memref<1xi32>
+// CHECK-DAG:  affine.dma_wait [[TAGC]][%c0], %c16384_0 : memref<1xi32>
 // CHECK-NEXT:  affine.for %i0 = 0 to 32 {
 // CHECK-NEXT:    affine.for %i1 = 0 to 32 {
 // CHECK-NEXT:      affine.for %i2 = 0 to 32 {
 // CHECK-NEXT:        affine.for %i3 = 0 to 16 {
 // CHECK-NEXT:          %7 = affine.apply #map{{[0-9]+}}(%i1, %i3)
-// CHECK-NEXT:          %8 = load [[BUFB]][%7, %i0] : memref<512x32xf32, 2>
+// CHECK-NEXT:          %8 = affine.load [[BUFB]][%i1 * 16 + %i3, %i0] : memref<512x32xf32, 2>
 // CHECK-NEXT:          "foo"(%8) : (f32) -> ()
 // CHECK-NEXT:        }
 // CHECK-NEXT:        affine.for %i4 = 0 to 16 {
 // CHECK-NEXT:          %9 = affine.apply #map{{[0-9]+}}(%i2, %i4)
-// CHECK-NEXT:          %10 = load [[BUFA]][%9, %i1] : memref<512x32xf32, 2>
+// CHECK-NEXT:          %10 = affine.load [[BUFA]][%i2 * 16 + %i4, %i1] : memref<512x32xf32, 2>
 // CHECK-NEXT:          "bar"(%10) : (f32) -> ()
 // CHECK-NEXT:        }
 // CHECK-NEXT:        affine.for %i5 = 0 to 16 {
 // CHECK-NEXT:          %11 = "abc_compute"() : () -> f32
 // CHECK-NEXT:          %12 = affine.apply #map{{[0-9]+}}(%i2, %i5)
-// CHECK-NEXT:          %13 = load [[BUFC]][%12, %i0] : memref<512x32xf32, 2>
+// CHECK-NEXT:          %13 = affine.load [[BUFC]][%i2 * 16 + %i5, %i0] : memref<512x32xf32, 2>
 // CHECK-NEXT:          %14 = "addf32"(%11, %13) : (f32, f32) -> f32
-// CHECK-NEXT:          store %14, [[BUFC]][%12, %i0] : memref<512x32xf32, 2>
+// CHECK-NEXT:          affine.store %14, [[BUFC]][%i2 * 16 + %i5, %i0] : memref<512x32xf32, 2>
 // CHECK-NEXT:        }
 // CHECK-NEXT:        "foobar"() : () -> ()
 // CHECK-NEXT:      }
 // CHECK-NEXT:    }
 // CHECK-NEXT:  }
 // OUTGOING DMA for C.
-// CHECK-NEXT:  dma_start [[BUFC]][%c0, %c0], %arg2[%c0, %c0], %c16384, [[TAGC_W]][%c0] : memref<512x32xf32, 2>, memref<512x32xf32>, memref<1xi32>
-// CHECK-NEXT:  dma_wait [[TAGC_W]][%c0], %c16384 : memref<1xi32>
+// CHECK-NEXT:  affine.dma_start [[BUFC]][%c0, %c0], %arg2[%c0, %c0], [[TAGC_W]][%c0], %c16384 : memref<512x32xf32, 2>, memref<512x32xf32>, memref<1xi32>
+// CHECK-NEXT:  affine.dma_wait [[TAGC_W]][%c0], %c16384 : memref<1xi32>
 // CHECK-NEXT:  dealloc [[TAGC_W]] : memref<1xi32>
 // CHECK-NEXT:  dealloc [[TAGC]] : memref<1xi32>
 // CHECK-NEXT:  dealloc [[BUFC]] : memref<512x32xf32, 2>
@@ -124,20 +125,20 @@
       affine.for %iT = 0 to 32 {
         affine.for %kk = 0 to 16 { // k intratile
           %k = affine.apply (d0, d1) -> (16*d0 + d1) (%kT, %kk)
-          %v0 = load %B[%k, %jT] : memref<512 x 32 x f32>
+          %v0 = affine.load %B[%k, %jT] : memref<512 x 32 x f32>
           "foo"(%v0) : (f32) -> ()
         }
         affine.for %ii = 0 to 16 { // i intratile.
           %i = affine.apply (d0, d1) -> (16*d0 + d1)(%iT, %ii)
-          %v1 = load %A[%i, %kT] : memref<512 x 32 x f32>
+          %v1 = affine.load %A[%i, %kT] : memref<512 x 32 x f32>
           "bar"(%v1) : (f32) -> ()
         }
         affine.for %ii_ = 0 to 16 { // i intratile.
           %v2 = "abc_compute"() : () -> f32
           %i_ = affine.apply (d0, d1) -> (16*d0 + d1)(%iT, %ii_)
-          %v3 =  load %C[%i_, %jT] : memref<512 x 32 x f32>
+          %v3 =  affine.load %C[%i_, %jT] : memref<512 x 32 x f32>
           %v4 = "addf32"(%v2, %v3) : (f32, f32) -> (f32)
-          store %v4, %C[%i_, %jT] : memref<512 x 32 x f32>
+          affine.store %v4, %C[%i_, %jT] : memref<512 x 32 x f32>
         }
         "foobar"() : () -> ()
       }
@@ -157,8 +158,9 @@
 // CHECK-NEXT:      %1 = affine.apply #map{{[0-9]+}}(%i0)
 // CHECK-NEXT:      %2 = alloc() : memref<1x2xf32, 2>
 // CHECK-NEXT:      %3 = alloc() : memref<1xi32>
-// CHECK-NEXT:      dma_start %0[%1, %c0], %2[%c0, %c0], %c2, %3[%c0] : memref<256x8xf32>, memref<1x2xf32, 2>, memref<1xi32>
-// CHECK-NEXT:      dma_wait %3[%c0], %c2 : memref<1xi32>
+// Composition of the affine map for '%0' causes '%c0' to be added as a symbol.
+// CHECK-NEXT:      affine.dma_start %0[%i0, symbol(%c0)], %2[%c0, %c0], %3[%c0], %c2 : memref<256x8xf32>, memref<1x2xf32, 2>, memref<1xi32>
+// CHECK-NEXT:      affine.dma_wait %3[%c0], %c2 : memref<1xi32>
 // CHECK-NEXT:      affine.for %i1 = 0 to 8 {
 //                    ...
 //                    ...
@@ -174,7 +176,7 @@
     affine.for %j = 0 to 8 {
       %idx = affine.apply (d0) -> (d0 mod 2) (%j)
       // A buffer of size 32 x 2 will be allocated (original buffer was 256 x 8).
-      %v = load %A[%i, %idx] : memref<256 x 8 x f32>
+      %v = affine.load %A[%i, %idx] : memref<256 x 8 x f32>
     }
   }
   return
@@ -182,9 +184,6 @@
 
 // -----
 
-// CHECK-DAG: [[MAP_INDEX_DIFF_EVEN:#map[0-9]+]] = (d0, d1, d2, d3) -> (d2 - d0)
-// CHECK-DAG: [[MAP_INDEX_DIFF_ODD:#map[0-9]+]] = (d0, d1, d2, d3) -> (d3 - d1)
-
 // DMA on tiled loop nest. This also tests the case where the bounds are
 // dependent on outer loop IVs.
 // CHECK-LABEL: func @loop_nest_tiled() -> memref<256x1024xf32> {
@@ -195,16 +194,14 @@
 // CHECK:      %3 = alloc() : memref<32x32xf32, 2>
 // CHECK-NEXT: %4 = alloc() : memref<1xi32>
 // Strided DMA here: 32 x 32 tile in a 256 x 1024 memref.
-// CHECK-NEXT: dma_start %0[%1, %2], %3[%c0, %c0], %c1024, %4[%c0], %c1024_0, %c32 : memref<256x1024xf32>, memref<32x32xf32, 2>, memref<1xi32>
-// CHECK-NEXT: dma_wait
+// CHECK-NEXT: affine.dma_start %0[%i0, %i1], %3[%c0, %c0], %4[%c0], %c1024, %c1024_0, %c32 : memref<256x1024xf32>, memref<32x32xf32, 2>, memref<1xi32>
+// CHECK-NEXT: affine.dma_wait
 // CHECK-NEXT: affine.for %i2 = #map
 // CHECK-NEXT:   affine.for %i3 = #map
       affine.for %i2 = (d0) -> (d0)(%i0) to (d0) -> (d0 + 32)(%i0) {
         affine.for %i3 = (d0) -> (d0)(%i1) to (d0) -> (d0 + 32)(%i1) {
-          // CHECK-NEXT: %5 = affine.apply [[MAP_INDEX_DIFF_EVEN]](%i0, %i1, %i2, %i3)
-          // CHECK-NEXT: %6 = affine.apply [[MAP_INDEX_DIFF_ODD]](%i0, %i1, %i2, %i3)
-          // CHECK-NEXT: %7 = load %3[%5, %6] : memref<32x32xf32, 2>
-          %1 = load %0[%i2, %i3] : memref<256x1024xf32>
+          // CHECK: %5 = affine.load %3[-%i0 + %i2, -%i1 + %i3] : memref<32x32xf32, 2>
+          %1 = affine.load %0[%i2, %i3] : memref<256x1024xf32>
         } // CHECK-NEXT: }
       }
     }
@@ -214,9 +211,6 @@
 
 // -----
 
-// CHECK-DAG: [[MAP_D0_MINUS_ONE:#map[0-9]+]] = (d0, d1) -> (d0 - 1)
-// CHECK-DAG: [[MAP_D1:#map[0-9]+]] = (d0, d1) -> (d1)
-
 // CHECK-LABEL: func @dma_constant_dim_access
 func @dma_constant_dim_access(%A : memref<100x100xf32>) {
   %one = constant 1 : index
@@ -224,14 +218,12 @@
   // CHECK:      %0 = alloc() : memref<1x100xf32, 2>
   // CHECK-NEXT: %1 = alloc() : memref<1xi32>
   // No strided DMA needed here.
-  // CHECK:      dma_start %arg0[%c1, %c0], %0[%c0, %c0], %c100, %1[%c0] : memref<100x100xf32>, memref<1x100xf32, 2>,
-  // CHECK-NEXT: dma_wait %1[%c0], %c100 : memref<1xi32>
+  // CHECK:      affine.dma_start %arg0[%c1, %c0], %0[%c0, %c0], %1[%c0], %c100  : memref<100x100xf32>, memref<1x100xf32, 2>,
+  // CHECK-NEXT: affine.dma_wait %1[%c0], %c100 : memref<1xi32>
   affine.for %i = 0 to 100 {
     affine.for %j = 0 to ()[s0] -> (s0) ()[%N] {
-      // CHECK:      %2 = affine.apply [[MAP_D0_MINUS_ONE]](%c1_0, %i1)
-      // CHECK:      %3 = affine.apply [[MAP_D1]](%c1_0, %i1)
-      // CHECK-NEXT: %4 = load %0[%2, %3] : memref<1x100xf32, 2>
-      load %A[%one, %j] : memref<100 x 100 x f32>
+      // CHECK: %2 = affine.load %0[symbol(%c1_0) - 1, %i1] : memref<1x100xf32, 2>
+      affine.load %A[%one, %j] : memref<100 x 100 x f32>
     }
   }
   return
@@ -240,8 +232,6 @@
 // -----
 
 // CHECK-DAG: [[MAP_SYM_SHIFT:#map[0-9]+]] = (d0, d1)[s0, s1] -> (d1 + s0 + s1)
-// CHECK-DAG: [[MAP_3D_D1:#map[0-9]+]] = (d0, d1, d2) -> (d1)
-// CHECK-DAG: [[MAP_SUB_OFFSET:#map[0-9]+]] = (d0, d1, d2) -> (d2 - (d0 + 9))
 
 // CHECK-LABEL: func @dma_with_symbolic_accesses
 func @dma_with_symbolic_accesses(%A : memref<100x100xf32>, %M : index) {
@@ -249,20 +239,18 @@
   affine.for %i = 0 to 100 {
     affine.for %j = 0 to 100 {
       %idy = affine.apply (d0, d1) [s0, s1] -> (d1 + s0 + s1)(%i, %j)[%M, %N]
-      load %A[%i, %idy] : memref<100 x 100 x f32>
+      affine.load %A[%i, %idy] : memref<100 x 100 x f32>
     }
   }
   return
 // CHECK:       %1 = alloc() : memref<100x100xf32, 2>
 // CHECK-NEXT:  %2 = alloc() : memref<1xi32>
-// CHECK-NEXT:  dma_start %arg0[%c0, %0], %1[%c0, %c0], %c10000, %2[%c0]
-// CHECK-NEXT:  dma_wait %2[%c0], %c10000
+// CHECK-NEXT:  affine.dma_start %arg0[symbol(%c0), symbol(%arg1) + 9], %1[%c0, %c0], %2[%c0], %c10000 
+// CHECK-NEXT:  affine.dma_wait %2[%c0], %c10000
 // CHECK-NEXT:  affine.for %i0 = 0 to 100 {
 // CHECK-NEXT:    affine.for %i1 = 0 to 100 {
 // CHECK-NEXT:      %3 = affine.apply [[MAP_SYM_SHIFT]](%i0, %i1)[%arg1, %c9]
-// CHECK-NEXT:      %4 = affine.apply [[MAP_3D_D1]](%arg1, %i0, %3)
-// CHECK-NEXT:      %5 = affine.apply [[MAP_SUB_OFFSET]](%arg1, %i0, %3)
-// CHECK-NEXT:      %6 = load %1[%4, %5] : memref<100x100xf32, 2>
+// CHECK-NEXT:      %4 = affine.load %1[%i0, %i1 + symbol(%c9) - 9] : memref<100x100xf32, 2>
 // CHECK-NEXT:    }
 // CHECK-NEXT:  }
 // CHECK:       return
@@ -277,12 +265,12 @@
 // memref size; so the DMA buffer is the entire 100x100.
 // CHECK:       %0 = alloc() : memref<100x100xf32, 2>
 // CHECK-NEXT:  %1 = alloc() : memref<1xi32>
-// CHECK-NEXT:  dma_start %arg0[%c0, %c0], %0[%c0, %c0], %c10000, %1[%c0] : memref<100x100xf32>, memref<100x100xf32, 2>, memref<1xi32>
-// CHECK-NEXT:  dma_wait %1[%c0], %c10000 : memref<1xi32>
+// CHECK-NEXT:  affine.dma_start %arg0[%c0, %c0], %0[%c0, %c0], %1[%c0], %c10000 : memref<100x100xf32>, memref<100x100xf32, 2>, memref<1xi32>
+// CHECK-NEXT:  affine.dma_wait %1[%c0], %c10000 : memref<1xi32>
   affine.for %i = 0 to 100 {
     affine.for %j = %M to %N {
       %idy = affine.apply (d1) [s0] -> (d1 + s0)(%j)[%K]
-      load %A[%i, %idy] : memref<100 x 100 x f32>
+      affine.load %A[%i, %idy] : memref<100 x 100 x f32>
     }
   }
   return
@@ -298,8 +286,8 @@
     affine.for %j = 0 to %N {
       // If this loop nest isn't tiled, the access requires a non-constant DMA
       // size -- not yet implemented.
-      // CHECK: %2 = load %arg0[%i0, %i1] : memref<?x?xf32>
-      load %arg0[%i, %j] : memref<? x ? x f32>
+      // CHECK: %2 = affine.load %arg0[%i0, %i1] : memref<?x?xf32>
+      affine.load %arg0[%i, %j] : memref<? x ? x f32>
       // expected-error@-6 {{DMA generation failed for one or more memref's in this block}}
     }
   }
@@ -318,8 +306,8 @@
         %idz = affine.apply (d0) -> (d0 mod 128)(%k)
         // DMA with nested striding (or emulating with loop around strided DMA)
         // not yet implemented.
-        // CHECK: %5 = load %arg0[%2, %3, %4] : memref<1024x1024x1024xf32>
-        %v = load %arg0[%idx, %idy, %idz] : memref<1024 x 1024 x 1024 x f32>
+        // CHECK: %5 = affine.load %arg0[%2, %3, %4] : memref<1024x1024x1024xf32>
+        %v = affine.load %arg0[%idx, %idy, %idz] : memref<1024 x 1024 x 1024 x f32>
         // expected-error@-10 {{DMA generation failed for one or more memref's in this block}}
       }
     }
@@ -332,8 +320,6 @@
 // CHECK-DAG: [[MAP_PLUS_64:#map[0-9]+]] = (d0) -> (d0 + 64)
 // CHECK-DAG: [[MAP_PLUS_128:#map[0-9]+]] = (d0) -> (d0 + 128)
 // CHECK-DAG: [[MAP_PLUS_2:#map[0-9]+]] = (d0) -> (d0 + 2)
-// CHECK-DAG: [[MAP_D0_MINUS_2:#map[0-9]+]] = (d0, d1) -> (d0 - 2)
-// CHECK-DAG: [[MAP_D1_MINUS_2:#map[0-9]+]] = (d0, d1) -> (d1 - 2)
 // CHECK-DAG: [[MAP_PLUS_192:#map[0-9]+]] = (d0) -> (d0 + 192)
 
 // The first load accesses ([2,258), [128,384))
@@ -353,14 +339,14 @@
       %ishift = affine.apply (d0) -> (d0 + 2)(%i)
       %jshift = affine.apply (d0) -> (d0 + 2)(%j)
 
-      %u = load %A[%ishift, %idy] : memref<512 x 512 x f32>
-      %v = load %A[%idx, %jshift] : memref<512 x 512 x f32>
+      %u = affine.load %A[%ishift, %idy] : memref<512 x 512 x f32>
+      %v = affine.load %A[%idx, %jshift] : memref<512 x 512 x f32>
 
       %sidx = affine.apply (d0) -> (d0 + 128)(%i)
       %sidy = affine.apply (d0) -> (d0 + 192)(%j)
 
-      store %u, %A[%ishift, %sidy] : memref<512 x 512 x f32>
-      store %v, %A[%sidx, %jshift] : memref<512 x 512 x f32>
+      affine.store %u, %A[%ishift, %sidy] : memref<512 x 512 x f32>
+      affine.store %v, %A[%sidx, %jshift] : memref<512 x 512 x f32>
     }
   }
   return
@@ -368,8 +354,8 @@
 // CHECK:       %0 = alloc() : memref<512x512xf32>
 // CHECK-NEXT:  %1 = alloc() : memref<382x446xf32, 2>
 // CHECK-NEXT:  %2 = alloc() : memref<1xi32>
-// CHECK-NEXT:  dma_start %0[%c2_1, %c2_2], %1[%c0, %c0], %c170372_3, %2[%c0], %c512_4, %c446_5 : memref<512x512xf32>, memref<382x446xf32, 2>, memref<1xi32>
-// CHECK-NEXT:  dma_wait %2[%c0], %c170372_3 : memref<1xi32>
+// CHECK-NEXT:  affine.dma_start %0[%c2_1, %c2_2], %1[%c0, %c0], %2[%c0], %c170372_3, %c512_4, %c446_5 : memref<512x512xf32>, memref<382x446xf32, 2>, memref<1xi32>
+// CHECK-NEXT:  affine.dma_wait %2[%c0], %c170372_3 : memref<1xi32>
 // CHECK-NEXT:  %3 = alloc() : memref<1xi32>
 // CHECK-NEXT:  affine.for %i0 = 0 to 256 {
 // CHECK-NEXT:    affine.for %i1 = 0 to 256 {
@@ -377,24 +363,16 @@
 // CHECK-NEXT:      %5 = affine.apply [[MAP_PLUS_128]](%i1)
 // CHECK-NEXT:      %6 = affine.apply [[MAP_PLUS_2]](%i0)
 // CHECK-NEXT:      %7 = affine.apply [[MAP_PLUS_2]](%i1)
-// CHECK-NEXT:      %8 = affine.apply [[MAP_D0_MINUS_2]](%6, %5)
-// CHECK-NEXT:      %9 = affine.apply [[MAP_D1_MINUS_2]](%6, %5)
-// CHECK-NEXT:      %10 = load %1[%8, %9] : memref<382x446xf32, 2>
-// CHECK-NEXT:      %11 = affine.apply [[MAP_D0_MINUS_2]](%4, %7)
-// CHECK-NEXT:      %12 = affine.apply [[MAP_D1_MINUS_2]](%4, %7)
-// CHECK-NEXT:      %13 = load %1[%11, %12] : memref<382x446xf32, 2>
-// CHECK-NEXT:      %14 = affine.apply [[MAP_PLUS_128]](%i0)
-// CHECK-NEXT:      %15 = affine.apply [[MAP_PLUS_192]](%i1)
-// CHECK-NEXT:      %16 = affine.apply [[MAP_D0_MINUS_2]](%6, %15)
-// CHECK-NEXT:      %17 = affine.apply [[MAP_D1_MINUS_2]](%6, %15)
-// CHECK-NEXT:      store %10, %1[%16, %17] : memref<382x446xf32, 2>
-// CHECK-NEXT:      %18 = affine.apply [[MAP_D0_MINUS_2]](%14, %7)
-// CHECK-NEXT:      %19 = affine.apply [[MAP_D1_MINUS_2]](%14, %7)
-// CHECK-NEXT:      store %13, %1[%18, %19] : memref<382x446xf32, 2>
+// CHECK-NEXT:      %8 = affine.load %1[%i0, %i1 + 126] : memref<382x446xf32, 2>
+// CHECK-NEXT:      %9 = affine.load %1[%i0 + 62, %i1] : memref<382x446xf32, 2>
+// CHECK-NEXT:      %10 = affine.apply [[MAP_PLUS_128]](%i0)
+// CHECK-NEXT:      %11 = affine.apply [[MAP_PLUS_192]](%i1)
+// CHECK-NEXT:      affine.store %8, %1[%i0, %i1 + 190] : memref<382x446xf32, 2>
+// CHECK-NEXT:      affine.store %9, %1[%i0 + 126, %i1] : memref<382x446xf32, 2>
 // CHECK-NEXT:    }
 // CHECK-NEXT:  }
-// CHECK-NEXT:  dma_start %1[%c0, %c0], %0[%c2, %c2_0], %c170372, %3[%c0], %c512, %c446 : memref<382x446xf32, 2>, memref<512x512xf32>, memref<1xi32>
-// CHECK-NEXT:  dma_wait %3[%c0], %c170372 : memref<1xi32>
+// CHECK-NEXT:  affine.dma_start %1[%c0, %c0], %0[%c2, %c2_0], %3[%c0], %c170372, %c512, %c446 : memref<382x446xf32, 2>, memref<512x512xf32>, memref<1xi32>
+// CHECK-NEXT:  affine.dma_wait %3[%c0], %c170372 : memref<1xi32>
 // CHECK-NEXT:  dealloc %3 : memref<1xi32>
 // CHECK-NEXT:  dealloc %2 : memref<1xi32>
 // CHECK-NEXT:  dealloc %1 : memref<382x446xf32, 2>
@@ -403,19 +381,17 @@
 
 // -----
 
-// CHECK-DAG: [[MAP_MINUS_ONE:#map[0-9]+]] = (d0) -> (d0 - 1)
-
 // CHECK-LABEL: func @dma_loop_straightline_interspersed() {
 func @dma_loop_straightline_interspersed() {
   %c0 = constant 0 : index
   %c255 = constant 255 : index
   %A = alloc() : memref<256 x f32>
-  %v = load %A[%c0] : memref<256 x f32>
+  %v = affine.load %A[%c0] : memref<256 x f32>
   affine.for %i = 1 to 255 {
-    load %A[%i] : memref<256 x f32>
+    affine.load %A[%i] : memref<256 x f32>
   }
-  %l = load %A[%c255] : memref<256 x f32>
-  store %l, %A[%c0] : memref<256 x f32>
+  %l = affine.load %A[%c255] : memref<256 x f32>
+  affine.store %l, %A[%c0] : memref<256 x f32>
   return
 }
 // There are three regions here - the 'load' preceding the loop, the loop
@@ -423,33 +399,32 @@
 // CHECK:       %0 = alloc() : memref<256xf32>
 // CHECK-NEXT:  %1 = alloc() : memref<1xf32, 2>
 // CHECK-NEXT:  %2 = alloc() : memref<1xi32>
-// CHECK-NEXT:  dma_start %0[%c0], %1[%c0], %c1_1, %2[%c0] : memref<256xf32>, memref<1xf32, 2>, memref<1xi32>
-// CHECK-NEXT:  dma_wait %2[%c0], %c1_1 : memref<1xi32>
-// CHECK-NEXT:  %3 = load %1[%c0_2] : memref<1xf32, 2>
+// CHECK-NEXT:  affine.dma_start %0[%c0], %1[%c0], %2[%c0], %c1_1 : memref<256xf32>, memref<1xf32, 2>, memref<1xi32>
+// CHECK-NEXT:  affine.dma_wait %2[%c0], %c1_1 : memref<1xi32>
+// CHECK-NEXT:  %3 = affine.load %1[symbol(%c0_2)] : memref<1xf32, 2>
 // CHECK-NEXT:  dealloc %2 : memref<1xi32>
 // CHECK-NEXT:  dealloc %1 : memref<1xf32, 2>
 // CHECK-NEXT:  %4 = alloc() : memref<254xf32, 2>
 // CHECK-NEXT:  %5 = alloc() : memref<1xi32>
-// CHECK-NEXT:  dma_start %0[%c1], %4[%c0], %c254, %5[%c0] : memref<256xf32>, memref<254xf32, 2>, memref<1xi32>
-// CHECK-NEXT:  dma_wait %5[%c0], %c254 : memref<1xi32>
+// CHECK-NEXT:  affine.dma_start %0[%c1], %4[%c0], %5[%c0], %c254 : memref<256xf32>, memref<254xf32, 2>, memref<1xi32>
+// CHECK-NEXT:  affine.dma_wait %5[%c0], %c254 : memref<1xi32>
 // CHECK-NEXT:  affine.for %i0 = 1 to 255 {
-// CHECK-NEXT:    %6 = affine.apply [[MAP_MINUS_ONE]](%i0)
-// CHECK-NEXT:    %7 = load %4[%6] : memref<254xf32, 2>
+// CHECK-NEXT:    %6 = affine.load %4[%i0 - 1] : memref<254xf32, 2>
 // CHECK-NEXT:  }
 // CHECK-NEXT:  dealloc %5 : memref<1xi32>
 // CHECK-NEXT:  dealloc %4 : memref<254xf32, 2>
-// CHECK-NEXT:  %8 = alloc() : memref<256xf32, 2>
+// CHECK-NEXT:  %7 = alloc() : memref<256xf32, 2>
+// CHECK-NEXT:  %8 = alloc() : memref<1xi32>
+// CHECK-NEXT:  affine.dma_start %0[%c0], %7[%c0], %8[%c0], %c256_0 : memref<256xf32>, memref<256xf32, 2>, memref<1xi32>
+// CHECK-NEXT:  affine.dma_wait %8[%c0], %c256_0 : memref<1xi32>
 // CHECK-NEXT:  %9 = alloc() : memref<1xi32>
-// CHECK-NEXT:  dma_start %0[%c0], %8[%c0], %c256_0, %9[%c0] : memref<256xf32>, memref<256xf32, 2>, memref<1xi32>
-// CHECK-NEXT:  dma_wait %9[%c0], %c256_0 : memref<1xi32>
-// CHECK-NEXT:  %10 = alloc() : memref<1xi32>
-// CHECK-NEXT:  %11 = load %8[%c255] : memref<256xf32, 2>
-// CHECK-NEXT:  store %11, %8[%c0_2] : memref<256xf32, 2>
-// CHECK-NEXT:  dma_start %8[%c0], %0[%c0], %c256, %10[%c0] : memref<256xf32, 2>, memref<256xf32>, memref<1xi32>
-// CHECK-NEXT:  dma_wait %10[%c0], %c256 : memref<1xi32>
-// CHECK-NEXT:  dealloc %10 : memref<1xi32>
+// CHECK-NEXT:  %10 = affine.load %7[symbol(%c255)] : memref<256xf32, 2>
+// CHECK-NEXT:  affine.store %10, %7[symbol(%c0_2)] : memref<256xf32, 2>
+// CHECK-NEXT:  affine.dma_start %7[%c0], %0[%c0], %9[%c0], %c256 : memref<256xf32, 2>, memref<256xf32>, memref<1xi32>
+// CHECK-NEXT:  affine.dma_wait %9[%c0], %c256 : memref<1xi32>
 // CHECK-NEXT:  dealloc %9 : memref<1xi32>
-// CHECK-NEXT:  dealloc %8 : memref<256xf32, 2>
+// CHECK-NEXT:  dealloc %8 : memref<1xi32>
+// CHECK-NEXT:  dealloc %7 : memref<256xf32, 2>
 // CHECK-NEXT:  return
 
 // -----
@@ -459,10 +434,10 @@
   %c0 = constant 0 : index
   %A = alloc() : memref<256 x 256 x vector<8 x f32>>
   affine.for %i = 0 to 256 {
-    %v = load %A[%c0, %c0] : memref<256 x 256 x vector<8 x f32>>
+    %v = affine.load %A[%c0, %c0] : memref<256 x 256 x vector<8 x f32>>
     "foo"(%v) : (vector<8 x f32>) -> ()
     affine.for %j = 0 to 256 {
-      %w = load %A[%i, %j] : memref<256 x 256 x vector<8 x f32>>
+      %w = affine.load %A[%i, %j] : memref<256 x 256 x vector<8 x f32>>
       "bar"(%w) : (vector<8 x f32>) -> ()
     }
   }
@@ -471,12 +446,12 @@
 // CHECK-DAG:   [[MEM:%[0-9]+]] = alloc() : memref<256x256xvector<8xf32>>
 // CHECK-DAG:   [[BUF:%[0-9]+]] = alloc() : memref<256x256xvector<8xf32>, 2>
 // CHECK-DAG:   [[TAG:%[0-9]+]] = alloc() : memref<1xi32>
-// CHECK:       dma_start [[MEM]][%c0, %c0], [[BUF]][%c0, %c0], %c65536, [[TAG]][%c0] : memref<256x256xvector<8xf32>>, memref<256x256xvector<8xf32>, 2>, memref<1xi32>
-// CHECK-NEXT:  dma_wait [[TAG]][%c0], %c65536 : memref<1xi32>
+// CHECK:       affine.dma_start [[MEM]][%c0, %c0], [[BUF]][%c0, %c0], [[TAG]][%c0], %c65536 : memref<256x256xvector<8xf32>>, memref<256x256xvector<8xf32>, 2>, memref<1xi32>
+// CHECK-NEXT:  affine.dma_wait [[TAG]][%c0], %c65536 : memref<1xi32>
 // CHECK-NEXT:  affine.for %i0 = 0 to 256 {
-// CHECK-NEXT:    %3 = load [[BUF]][%c0_0, %c0_0] : memref<256x256xvector<8xf32>, 2>
+// CHECK:         %3 = affine.load [[BUF]][symbol(%c0_0), symbol(%c0_0)] : memref<256x256xvector<8xf32>, 2>
 // CHECK:         affine.for %i1 = 0 to 256 {
-// CHECK-NEXT:      %4 = load [[BUF]][%i0, %i1] : memref<256x256xvector<8xf32>, 2>
+// CHECK-NEXT:      %4 = affine.load [[BUF]][%i0, %i1] : memref<256x256xvector<8xf32>, 2>
 
 // -----
 
@@ -485,7 +460,7 @@
   affine.for %i0 = 0 to 1024 {
     affine.for %i2 = (d0) -> (d0)(%i0) to (d0) -> (d0 + 4)(%i0) {
       %0 = constant 0.0 : f32
-      store %0, %arg0[%i2] : memref<1027xf32>
+      affine.store %0, %arg0[%i2] : memref<1027xf32>
     }
   }
   return
@@ -495,17 +470,16 @@
 // CHECK-NEXT: affine.for %i0 = 0 to 1024 {
 // CHECK-NEXT:    affine.for %i1 = {{#map[0-9]+}}(%i0) to {{#map[0-9]+}}(%i0) {
 // CHECK-NEXT:      %cst = constant 0.000000e+00 : f32
-// CHECK-NEXT:      store %cst, [[BUF]][%i1] : memref<1027xf32, 2>
+// CHECK-NEXT:      affine.store %cst, [[BUF]][%i1] : memref<1027xf32, 2>
 // CHECK-NEXT:    }
 // CHECK-NEXT:  }
-// CHECK-NEXT:  dma_start [[BUF]][%c0], %arg0[%c0], %c1027, [[MEM]][%c0] : memref<1027xf32, 2>, memref<1027xf32>, memref<1xi32>
-// CHECK-NEXT:  dma_wait [[MEM]][%c0], %c1027 : memref<1xi32>
+// CHECK-NEXT:  affine.dma_start [[BUF]][%c0], %arg0[%c0], [[MEM]][%c0], %c1027  : memref<1027xf32, 2>, memref<1027xf32>, memref<1xi32>
+// CHECK-NEXT:  affine.dma_wait [[MEM]][%c0], %c1027 : memref<1xi32>
 
 // -----
 
 // CHECK-DAG: [[MAP_READ_OFFSET:#map[0-9]+]] = (d0) -> (d0 + 100)
 // CHECK-DAG: [[MAP_WRITE_OFFSET:#map[0-9]+]] = (d0) -> (d0 + 25)
-// CHECK-DAG: [[MAP_BUFFER_OFFSET:#map[0-9]+]] = (d0) -> (d0 - 25)
 
 func @test_read_write_region_union() {
   %0 = alloc() : memref<256xf32>
@@ -516,8 +490,8 @@
     // union region: [25, 110)
     %a0 = affine.apply (d0) -> (d0 + 100)(%i0)
     %a1 = affine.apply (d0) -> (d0 + 25)(%i0)
-    %1 = load %0[%a0] : memref<256xf32>
-    store %1, %0[%a1] : memref<256xf32>
+    %1 = affine.load %0[%a0] : memref<256xf32>
+    affine.store %1, %0[%a1] : memref<256xf32>
   }
   return
 }
@@ -525,19 +499,17 @@
 // CHECK:       %0 = alloc() : memref<256xf32>
 // CHECK-NEXT:  %1 = alloc() : memref<85xf32, 2>
 // CHECK-NEXT:  %2 = alloc() : memref<1xi32>
-// CHECK-NEXT:  dma_start %0[%c25_0], %1[%c0], %c85_1, %2[%c0] : memref<256xf32>, memref<85xf32, 2>, memref<1xi32>
-// CHECK-NEXT:  dma_wait %2[%c0], %c85_1 : memref<1xi32>
+// CHECK-NEXT:  affine.dma_start %0[%c25_0], %1[%c0], %2[%c0], %c85_1 : memref<256xf32>, memref<85xf32, 2>, memref<1xi32>
+// CHECK-NEXT:  affine.dma_wait %2[%c0], %c85_1 : memref<1xi32>
 // CHECK-NEXT:  %3 = alloc() : memref<1xi32>
 // CHECK-NEXT:  affine.for %i0 = 0 to 10 {
 // CHECK-NEXT:    %4 = affine.apply [[MAP_READ_OFFSET]](%i0)
 // CHECK-NEXT:    %5 = affine.apply [[MAP_WRITE_OFFSET]](%i0)
-// CHECK-NEXT:    %6 = affine.apply [[MAP_BUFFER_OFFSET]](%4)
-// CHECK-NEXT:    %7 = load %1[%6] : memref<85xf32, 2>
-// CHECK-NEXT:    %8 = affine.apply [[MAP_BUFFER_OFFSET]](%5)
-// CHECK-NEXT:    store %7, %1[%8] : memref<85xf32, 2>
+// CHECK-NEXT:    %6 = affine.load %1[%i0 + 75] : memref<85xf32, 2>
+// CHECK-NEXT:    affine.store %6, %1[%i0] : memref<85xf32, 2>
 // CHECK-NEXT:  }
-// CHECK-NEXT:  dma_start %1[%c0], %0[%c25], %c85, %3[%c0] : memref<85xf32, 2>, memref<256xf32>, memref<1xi32>
-// CHECK-NEXT:  dma_wait %3[%c0], %c85 : memref<1xi32>
+// CHECK-NEXT:  affine.dma_start %1[%c0], %0[%c25], %3[%c0], %c85 : memref<85xf32, 2>, memref<256xf32>, memref<1xi32>
+// CHECK-NEXT:  affine.dma_wait %3[%c0], %c85 : memref<1xi32>
 
 // -----
 
@@ -556,10 +528,10 @@
     affine.for %i9 = #map_lb(%i8) to #map_ub(%i8) {
       affine.for %i17 = 0 to 64 {
         %23 = affine.apply #map_acc(%i9)
-        %25 = load %arg2[%23] : memref<2xf32>
+        %25 = affine.load %arg2[%23] : memref<2xf32>
         %26 = affine.apply #map_lb(%i17)
-        %27 = load %0[%26, %c0] : memref<64x1xf32>
-        store %27, %arg2[%23] : memref<2xf32>
+        %27 = affine.load %0[%26, %c0] : memref<64x1xf32>
+        affine.store %27, %arg2[%23] : memref<2xf32>
       }
     }
   }
@@ -567,8 +539,8 @@
 }
 // CHECK:       affine.for %i0 = 0 to 9 step 3 {
 // CHECK:         [[BUF:%[0-9]+]] = alloc() : memref<2xf32, 2>
-// CHECK:         dma_start %arg2[%4], [[BUF]]
-// CHECK:         dma_wait %6[%c0], %c2_0 : memref<1xi32>
+// CHECK:         affine.dma_start %arg2[%i0 floordiv 8], [[BUF]]
+// CHECK:         affine.dma_wait %6[%c0], %c2_0 : memref<1xi32>
 // CHECK:         affine.for %i1 =
 
 // ----
@@ -587,7 +559,7 @@
         %10 = affine.apply #map14(%i9, %i10)
         %11 = affine.apply #map15(%i9, %i10)
         %12 = affine.apply #map16(%i9, %i10)
-        %13 = load %arg0[%10, %11, %12, %c0] : memref<4x4x16x1xvector<8x128xf32>>
+        %13 = affine.load %arg0[%10, %11, %12, %c0] : memref<4x4x16x1xvector<8x128xf32>>
       }
     }
   }
@@ -596,8 +568,8 @@
 
 // CHECK:       %0 = alloc() : memref<4x4x16x1xvector<8x128xf32>, 2>
 // CHECK-NEXT:  %1 = alloc() : memref<1xi32>
-// CHECK-NEXT:  dma_start %arg0[%c0, %c0, %c0, %c0], %0[%c0, %c0, %c0, %c0], %c256, %1[%c0] : memref<4x4x16x1xvector<8x128xf32>>, memref<4x4x16x1xvector<8x128xf32>, 2>, memref<1xi32>
-// CHECK-NEXT:  dma_wait %1[%c0], %c256 : memref<1xi32>
+// CHECK-NEXT:  affine.dma_start %arg0[%c0, %c0, %c0, %c0], %0[%c0, %c0, %c0, %c0], %1[%c0], %c256 : memref<4x4x16x1xvector<8x128xf32>>, memref<4x4x16x1xvector<8x128xf32>, 2>, memref<1xi32>
+// CHECK-NEXT:  affine.dma_wait %1[%c0], %c256 : memref<1xi32>
 
 // -----
 
@@ -609,22 +581,22 @@
   // FAST-MEM-16KB:  affine.for %i0 = 0 to 256 step 4
   affine.for %i0 = 0 to 256 step 4 {
     // FAST-MEM-16KB: [[BUF:%[0-9]+]] = alloc() : memref<4x1024xf32, 2>
-    // FAST-MEM-16KB:    dma_start %arg0
-    // FAST-MEM-16KB-NEXT: dma_wait
+    // FAST-MEM-16KB:    affine.dma_start %arg0
+    // FAST-MEM-16KB-NEXT: affine.dma_wait
     // FAST-MEM-16KB:  affine.for %i1
     affine.for %i1 = 0 to 1024 step 4 {
       // FAST-MEM-16KB:  affine.for %i2
       affine.for %i2 = (d0) -> (d0)(%i0) to (d0) -> (d0 + 4)(%i0) {
         // FAST-MEM-16KB:  affine.for %i3
         affine.for %i3 = (d0) -> (d0)(%i1) to (d0) -> (d0 + 4)(%i1) {
-          %3 = load %arg0[%i2, %i3] : memref<256x1024xf32>
+          %3 = affine.load %arg0[%i2, %i3] : memref<256x1024xf32>
           %4 = mulf %3, %3 : f32
-          store %4, %arg0[%i2, %i3] : memref<256x1024xf32>
+          affine.store %4, %arg0[%i2, %i3] : memref<256x1024xf32>
         } // FAST-MEM-16KB: }
       } // FAST-MEM-16KB: }
     } // FAST-MEM-16KB: }
-    // FAST-MEM-16KB:    dma_start [[BUF]]
-    // FAST-MEM-16KB-NEXT: dma_wait
+    // FAST-MEM-16KB:    affine.dma_start [[BUF]]
+    // FAST-MEM-16KB-NEXT: affine.dma_wait
   }
   return
 }
@@ -648,12 +620,12 @@
         affine.for %ii = #map0(%i) to #map1(%i) {
           affine.for %jj = #map0(%j) to #map1(%j) {
             affine.for %kk = #map0(%k) to #map1(%k) {
-              %5 = load %arg0[%ii, %kk] : memref<8x8xvector<64xf32>>
-              %6 = load %arg1[%kk, %jj] : memref<8x8xvector<64xf32>>
-              %7 = load %arg2[%ii, %jj] : memref<8x8xvector<64xf32>>
+              %5 = affine.load %arg0[%ii, %kk] : memref<8x8xvector<64xf32>>
+              %6 = affine.load %arg1[%kk, %jj] : memref<8x8xvector<64xf32>>
+              %7 = affine.load %arg2[%ii, %jj] : memref<8x8xvector<64xf32>>
               %8 = mulf %5, %6 : vector<64xf32>
               %9 = addf %7, %8 : vector<64xf32>
-              store %9, %arg2[%ii, %jj] : memref<8x8xvector<64xf32>>
+              affine.store %9, %arg2[%ii, %jj] : memref<8x8xvector<64xf32>>
             }
           }
         }
@@ -664,13 +636,13 @@
 }
 // FAST-MEM-16KB: affine.for %i0 = 0 to 8 step 4 {
 // FAST-MEM-16KB:   affine.for %i1 = 0 to 8 step 4 {
-// FAST-MEM-16KB:     dma_start %arg2
-// FAST-MEM-16KB:     dma_wait
+// FAST-MEM-16KB:     affine.dma_start %arg2
+// FAST-MEM-16KB:     affine.dma_wait
 // FAST-MEM-16KB:     affine.for %i2 = 0 to 8 step 4 {
-// FAST-MEM-16KB:       dma_start %arg0
-// FAST-MEM-16KB:       dma_wait
-// FAST-MEM-16KB:       dma_start %arg1
-// FAST-MEM-16KB:       dma_wait
+// FAST-MEM-16KB:       affine.dma_start %arg0
+// FAST-MEM-16KB:       affine.dma_wait
+// FAST-MEM-16KB:       affine.dma_start %arg1
+// FAST-MEM-16KB:       affine.dma_wait
 // FAST-MEM-16KB:       affine.for %i3 = #map{{[0-9]+}}(%i0) to #map{{[0-9]+}}(%i0) {
 // FAST-MEM-16KB-NEXT:    affine.for %i4 = #map{{[0-9]+}}(%i1) to #map{{[0-9]+}}(%i1) {
 // FAST-MEM-16KB-NEXT:      affine.for %i5 = #map{{[0-9]+}}(%i2) to #map{{[0-9]+}}(%i2) {
@@ -678,5 +650,5 @@
 // FAST-MEM-16KB:         }
 // FAST-MEM-16KB:       }
 // FAST-MEM-16KB:     }
-// FAST-MEM-16KB:     dma_start %2[%c0, %c0], %arg2
-// FAST-MEM-16KB:     dma_wait
+// FAST-MEM-16KB:     affine.dma_start %2[%c0, %c0], %arg2
+// FAST-MEM-16KB:     affine.dma_wait
diff --git a/mlir/test/Transforms/loop-fusion-dependence-check.mlir b/mlir/test/Transforms/loop-fusion-dependence-check.mlir
index 697ee9e..4b5c778 100644
--- a/mlir/test/Transforms/loop-fusion-dependence-check.mlir
+++ b/mlir/test/Transforms/loop-fusion-dependence-check.mlir
@@ -18,17 +18,17 @@
   // Fusing loop nest '%i0' and loop nest '%i2' would create a cycle.
   affine.for %i0 = 0 to 10 {
     // expected-remark@-1 {{block-level dependence preventing fusion of loop nest 0 into loop nest 2 at depth 0}}
-    %v0 = load %a[%i0] : memref<10xf32>
-    store %cf7, %b[%i0] : memref<10xf32>
+    %v0 = affine.load %a[%i0] : memref<10xf32>
+    affine.store %cf7, %b[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
-    store %cf7, %a[%i1] : memref<10xf32>
-    %v1 = load %c[%i1] : memref<10xf32>
+    affine.store %cf7, %a[%i1] : memref<10xf32>
+    %v1 = affine.load %c[%i1] : memref<10xf32>
   }
   affine.for %i2 = 0 to 10 {
     // expected-remark@-1 {{block-level dependence preventing fusion of loop nest 2 into loop nest 0 at depth 0}}
-    %v2 = load %b[%i2] : memref<10xf32>
-    store %cf7, %c[%i2] : memref<10xf32>
+    %v2 = affine.load %b[%i2] : memref<10xf32>
+    affine.store %cf7, %c[%i2] : memref<10xf32>
   }
   return
 }
@@ -51,16 +51,16 @@
 
   // Should fuse: no fusion preventing remarks should be emitted for this test.
   affine.for %i0 = 0 to 10 {
-    %v0 = load %a[%i0] : memref<10xf32>
-    store %cf7, %b[%i0] : memref<10xf32>
+    %v0 = affine.load %a[%i0] : memref<10xf32>
+    affine.store %cf7, %b[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
-    %v1 = load %a[%i1] : memref<10xf32>
-    %v2 = load %c[%i1] : memref<10xf32>
+    %v1 = affine.load %a[%i1] : memref<10xf32>
+    %v2 = affine.load %c[%i1] : memref<10xf32>
   }
   affine.for %i2 = 0 to 10 {
-    %v3 = load %b[%i2] : memref<10xf32>
-    store %cf7, %c[%i2] : memref<10xf32>
+    %v3 = affine.load %b[%i2] : memref<10xf32>
+    affine.store %cf7, %c[%i2] : memref<10xf32>
   }
   return
 }
@@ -84,16 +84,16 @@
 
   // Should fuse: no fusion preventing remarks should be emitted for this test.
   affine.for %i0 = 0 to 10 {
-    %v0 = load %a[%i0] : memref<10xf32>
-    store %cf7, %b[%i0] : memref<10xf32>
+    %v0 = affine.load %a[%i0] : memref<10xf32>
+    affine.store %cf7, %b[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
-    store %cf7, %d[%i1] : memref<10xf32>
-    %v1 = load %c[%i1] : memref<10xf32>
+    affine.store %cf7, %d[%i1] : memref<10xf32>
+    %v1 = affine.load %c[%i1] : memref<10xf32>
   }
   affine.for %i2 = 0 to 10 {
-    %v2 = load %b[%i2] : memref<10xf32>
-    store %cf7, %c[%i2] : memref<10xf32>
+    %v2 = affine.load %b[%i2] : memref<10xf32>
+    affine.store %cf7, %c[%i2] : memref<10xf32>
   }
   return
 }
@@ -108,16 +108,16 @@
 
   affine.for %i0 = 0 to 10 {
     // expected-remark@-1 {{block-level dependence preventing fusion of loop nest 0 into loop nest 1 at depth 0}}
-    %v0 = load %0[%i0] : memref<10xf32>
+    %v0 = affine.load %0[%i0] : memref<10xf32>
     "op0"(%v0) : (f32) -> ()
   }
 
   // Should not fuse loop nests '%i0' and '%i1' across top-level store.
-  store %cf7, %0[%c0] : memref<10xf32>
+  affine.store %cf7, %0[%c0] : memref<10xf32>
 
   affine.for %i1 = 0 to 10 {
     // expected-remark@-1 {{block-level dependence preventing fusion of loop nest 1 into loop nest 0 at depth 0}}
-    %v1 = load %0[%i1] : memref<10xf32>
+    %v1 = affine.load %0[%i1] : memref<10xf32>
     "op1"(%v1) : (f32) -> ()
   }
   return
@@ -133,16 +133,16 @@
 
   affine.for %i0 = 0 to 10 {
     // expected-remark@-1 {{block-level dependence preventing fusion of loop nest 0 into loop nest 1 at depth 0}}
-    store %cf7, %0[%i0] : memref<10xf32>
+    affine.store %cf7, %0[%i0] : memref<10xf32>
   }
 
   // Should not fuse loop nests '%i0' and '%i1' across top-level load.
-  %v0 = load %0[%c0] : memref<10xf32>
+  %v0 = affine.load %0[%c0] : memref<10xf32>
   "op0"(%v0) : (f32) -> ()
 
   affine.for %i1 = 0 to 10 {
     // expected-remark@-1 {{block-level dependence preventing fusion of loop nest 1 into loop nest 0 at depth 0}}
-    store %cf7, %0[%i1] : memref<10xf32>
+    affine.store %cf7, %0[%i1] : memref<10xf32>
   }
 
   return
@@ -159,12 +159,12 @@
 
   affine.for %i0 = 0 to 10 {
     // expected-remark@-1 {{block-level dependence preventing fusion of loop nest 0 into loop nest 1 at depth 0}}
-    %v0 = load %0[%i0] : memref<10xf32>
-    store %v0, %1[%i0] : memref<10xf32>
+    %v0 = affine.load %0[%i0] : memref<10xf32>
+    affine.store %v0, %1[%i0] : memref<10xf32>
   }
 
   // Loop nest '%i0" cannot be fused past load from '%1' due to RAW dependence.
-  %v1 = load %1[%c0] : memref<10xf32>
+  %v1 = affine.load %1[%c0] : memref<10xf32>
   "op0"(%v1) : (f32) -> ()
 
   // Loop nest '%i1' cannot be fused past SSA value def '%c2' which it uses.
@@ -172,7 +172,7 @@
 
   affine.for %i1 = 0 to 10 {
     // expected-remark@-1 {{block-level dependence preventing fusion of loop nest 1 into loop nest 0 at depth 0}}
-    store %cf7, %0[%c2] : memref<10xf32>
+    affine.store %cf7, %0[%c2] : memref<10xf32>
   }
 
   return
@@ -188,18 +188,18 @@
 
   affine.for %i0 = 0 to 10 {
     // expected-remark@-1 {{block-level dependence preventing fusion of loop nest 0 into loop nest 2 at depth 0}}
-    store %cf7, %0[%i0] : memref<10xf32>
-    %v0 = load %0[%i0] : memref<10xf32>
+    affine.store %cf7, %0[%i0] : memref<10xf32>
+    %v0 = affine.load %0[%i0] : memref<10xf32>
   }
 
   affine.for %i1 = 0 to 10 {
-    %v1 = load %0[%i1] : memref<10xf32>
+    %v1 = affine.load %0[%i1] : memref<10xf32>
   }
 
   affine.for %i2 = 0 to 10 {
     // expected-remark@-1 {{block-level dependence preventing fusion of loop nest 2 into loop nest 0 at depth 0}}
-    store %cf7, %0[%i2] : memref<10xf32>
-    %v2 = load %0[%i2] : memref<10xf32>
+    affine.store %cf7, %0[%i2] : memref<10xf32>
+    %v2 = affine.load %0[%i2] : memref<10xf32>
   }
   return
 }
@@ -215,14 +215,14 @@
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 10 {
       // expected-remark@-1 {{block-level dependence preventing fusion of loop nest 0 into loop nest 1 at depth 1}}
-      store %cf7, %0[%i0, %i1] : memref<10x10xf32>
+      affine.store %cf7, %0[%i0, %i1] : memref<10x10xf32>
     }
 
-    %v1 = load %0[%i0, %c0] : memref<10x10xf32>
+    %v1 = affine.load %0[%i0, %c0] : memref<10x10xf32>
 
     affine.for %i3 = 0 to 10 {
       // expected-remark@-1 {{block-level dependence preventing fusion of loop nest 1 into loop nest 0 at depth 1}}
-      store %cf7, %0[%i0, %i3] : memref<10x10xf32>
+      affine.store %cf7, %0[%i0, %i3] : memref<10x10xf32>
     }
   }
   return
@@ -239,16 +239,16 @@
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 10 {
       // expected-remark@-1 {{block-level dependence preventing fusion of loop nest 0 into loop nest 2 at depth 1}}
-      store %cf7, %0[%i0, %i1] : memref<10x10xf32>
+      affine.store %cf7, %0[%i0, %i1] : memref<10x10xf32>
     }
 
     affine.for %i2 = 0 to 10 {
-      %v1 = load %0[%i0, %i2] : memref<10x10xf32>
+      %v1 = affine.load %0[%i0, %i2] : memref<10x10xf32>
     }
 
     affine.for %i3 = 0 to 10 {
       // expected-remark@-1 {{block-level dependence preventing fusion of loop nest 2 into loop nest 0 at depth 1}}
-      store %cf7, %0[%i0, %i3] : memref<10x10xf32>
+      affine.store %cf7, %0[%i0, %i3] : memref<10x10xf32>
     }
   }
   return
@@ -265,14 +265,14 @@
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 10 {
       // expected-remark@-1 {{block-level dependence preventing fusion of loop nest 0 into loop nest 1 at depth 1}}
-      %v0 = load %0[%i0, %i1] : memref<10x10xf32>
+      %v0 = affine.load %0[%i0, %i1] : memref<10x10xf32>
     }
 
-    store %cf7, %0[%i0, %c0] : memref<10x10xf32>
+    affine.store %cf7, %0[%i0, %c0] : memref<10x10xf32>
 
     affine.for %i3 = 0 to 10 {
       // expected-remark@-1 {{block-level dependence preventing fusion of loop nest 1 into loop nest 0 at depth 1}}
-      %v1 = load %0[%i0, %i3] : memref<10x10xf32>
+      %v1 = affine.load %0[%i0, %i3] : memref<10x10xf32>
     }
   }
   return
@@ -289,16 +289,16 @@
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 10 {
       // expected-remark@-1 {{block-level dependence preventing fusion of loop nest 0 into loop nest 2 at depth 1}}
-      %v0 = load %0[%i0, %i1] : memref<10x10xf32>
+      %v0 = affine.load %0[%i0, %i1] : memref<10x10xf32>
     }
 
     affine.for %i2 = 0 to 10 {
-      store %cf7, %0[%i0, %i2] : memref<10x10xf32>
+      affine.store %cf7, %0[%i0, %i2] : memref<10x10xf32>
     }
 
     affine.for %i3 = 0 to 10 {
       // expected-remark@-1 {{block-level dependence preventing fusion of loop nest 2 into loop nest 0 at depth 1}}
-      %v1 = load %0[%i0, %i3] : memref<10x10xf32>
+      %v1 = affine.load %0[%i0, %i3] : memref<10x10xf32>
     }
   }
   return
@@ -316,13 +316,13 @@
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 10 {
       // expected-remark@-1 {{block-level dependence preventing fusion of loop nest 0 into loop nest 1 at depth 1}}
-      %v0 = load %0[%i0, %i1] : memref<10x10xf32>
-      store %v0, %1[%i0, %i1] : memref<10x10xf32>
+      %v0 = affine.load %0[%i0, %i1] : memref<10x10xf32>
+      affine.store %v0, %1[%i0, %i1] : memref<10x10xf32>
     }
 
     // RAW dependence from store in loop nest '%i1' to 'load %1' prevents
     // fusion loop nest '%i1' into loops after load.
-    %v1 = load %1[%i0, %c0] : memref<10x10xf32>
+    %v1 = affine.load %1[%i0, %c0] : memref<10x10xf32>
     "op0"(%v1) : (f32) -> ()
 
     // Loop nest '%i2' cannot be fused past SSA value def '%c2' which it uses.
@@ -330,7 +330,7 @@
 
     affine.for %i2 = 0 to 10 {
       // expected-remark@-1 {{block-level dependence preventing fusion of loop nest 1 into loop nest 0 at depth 1}}
-      store %cf7, %0[%i0, %c2] : memref<10x10xf32>
+      affine.store %cf7, %0[%i0, %c2] : memref<10x10xf32>
     }
   }
   return
diff --git a/mlir/test/Transforms/loop-fusion-slice-computation.mlir b/mlir/test/Transforms/loop-fusion-slice-computation.mlir
index 859b750..1e5e448 100644
--- a/mlir/test/Transforms/loop-fusion-slice-computation.mlir
+++ b/mlir/test/Transforms/loop-fusion-slice-computation.mlir
@@ -8,11 +8,11 @@
   %cst = constant 7.000000e+00 : f32
   affine.for %i0 = 0 to 16 {
     // expected-remark@-1 {{slice ( src loop: 1, dst loop: 0, depth: 1 : insert point: (1, 1) loop bounds: [(d0) -> (d0), (d0) -> (d0 + 1)] )}}
-    store %cst, %0[%i0] : memref<100xf32>
+    affine.store %cst, %0[%i0] : memref<100xf32>
   }
   affine.for %i1 = 0 to 5 {
     // expected-remark@-1 {{slice ( src loop: 0, dst loop: 1, depth: 1 : insert point: (1, 0) loop bounds: [(d0) -> (d0), (d0) -> (d0 + 1)] )}}
-    %1 = load %0[%i1] : memref<100xf32>
+    %1 = affine.load %0[%i1] : memref<100xf32>
   }
   return
 }
@@ -29,12 +29,12 @@
   affine.for %i0 = 0 to 16 {
     // expected-remark@-1 {{slice ( src loop: 1, dst loop: 0, depth: 1 : insert point: (1, 2) loop bounds: [(d0) -> (d0 + 3), (d0) -> (d0 + 4)] )}}
     %a0 = affine.apply (d0) -> (d0 + 2)(%i0)
-    store %cst, %0[%a0] : memref<100xf32>
+    affine.store %cst, %0[%a0] : memref<100xf32>
   }
   affine.for %i1 = 4 to 8 {
     // expected-remark@-1 {{slice ( src loop: 0, dst loop: 1, depth: 1 : insert point: (1, 0) loop bounds: [(d0) -> (d0 - 3), (d0) -> (d0 - 2)] )}}
     %a1 = affine.apply (d0) -> (d0 - 1)(%i1)
-    %1 = load %0[%a1] : memref<100xf32>
+    %1 = affine.load %0[%a1] : memref<100xf32>
   }
   return
 }
@@ -51,14 +51,14 @@
     // expected-remark@-1 {{slice ( src loop: 1, dst loop: 0, depth: 1 : insert point: (1, 1) loop bounds: [(d0) -> (d0), (d0) -> (d0 + 1)] [(d0) -> (0), (d0) -> (8)] )}}
     // expected-remark@-2 {{slice ( src loop: 1, dst loop: 0, depth: 2 : insert point: (2, 1) loop bounds: [(d0, d1) -> (d0), (d0, d1) -> (d0 + 1)] [(d0, d1) -> (d1), (d0, d1) -> (d1 + 1)] )}}
     affine.for %i1 = 0 to 16 {
-      store %cst, %0[%i0, %i1] : memref<100x100xf32>
+      affine.store %cst, %0[%i0, %i1] : memref<100x100xf32>
     }
   }
   affine.for %i2 = 0 to 10 {
     // expected-remark@-1 {{slice ( src loop: 0, dst loop: 1, depth: 1 : insert point: (1, 0) loop bounds: [(d0) -> (d0), (d0) -> (d0 + 1)] [(d0) -> (0), (d0) -> (8)] )}}
     // expected-remark@-2 {{slice ( src loop: 0, dst loop: 1, depth: 2 : insert point: (2, 0) loop bounds: [(d0, d1) -> (d0), (d0, d1) -> (d0 + 1)] [(d0, d1) -> (d1), (d0, d1) -> (d1 + 1)] )}}
     affine.for %i3 = 0 to 8 {
-      %1 = load %0[%i2, %i3] : memref<100x100xf32>
+      %1 = affine.load %0[%i2, %i3] : memref<100x100xf32>
     }
   }
   return
@@ -78,15 +78,15 @@
     // expected-remark@-1 {{slice ( src loop: 1, dst loop: 0, depth: 1 : insert point: (1, 1) loop bounds: [(d0)[s0] -> (d0), (d0)[s0] -> (d0 + 1)] [(d0)[s0] -> (0), (d0)[s0] -> (8)] )}}
     // expected-remark@-2 {{slice ( src loop: 1, dst loop: 0, depth: 2 : insert point: (2, 1) loop bounds: [(d0, d1)[s0] -> (d0), (d0, d1)[s0] -> (d0 + 1)] [(d0, d1)[s0] -> (0), (d0, d1)[s0] -> (8)] )}}
     affine.for %i1 = 0 to 16 {
-      store %cst, %0[%i0, %i1] : memref<100x100xf32>
+      affine.store %cst, %0[%i0, %i1] : memref<100x100xf32>
     }
   }
   affine.for %i2 = 0 to 10 {
     // expected-remark@-1 {{slice ( src loop: 0, dst loop: 1, depth: 1 : insert point: (1, 0) loop bounds: [(d0)[s0] -> (d0), (d0)[s0] -> (d0 + 1)] [(d0)[s0] -> (0), (d0)[s0] -> (8)] )}}
     affine.for %i3 = 0 to 8 {
-      %1 = load %0[%i2, %i3] : memref<100x100xf32>
+      %1 = affine.load %0[%i2, %i3] : memref<100x100xf32>
     }
-    %2 = load %0[%i2, %c0] : memref<100x100xf32>
+    %2 = affine.load %0[%i2, %c0] : memref<100x100xf32>
   }
   return
 }
@@ -105,15 +105,15 @@
   affine.for %i0 = 0 to 16 {
     // expected-remark@-1 {{slice ( src loop: 1, dst loop: 0, depth: 1 : insert point: (1, 2) loop bounds: [(d0)[s0] -> (d0), (d0)[s0] -> (d0 + 1)] [(d0)[s0] -> (0), (d0)[s0] -> (8)] )}}
     affine.for %i1 = 0 to 16 {
-      store %cst, %0[%i0, %i1] : memref<100x100xf32>
+      affine.store %cst, %0[%i0, %i1] : memref<100x100xf32>
     }
-    store %cst, %0[%i0, %c0] : memref<100x100xf32>
+    affine.store %cst, %0[%i0, %c0] : memref<100x100xf32>
   }
   affine.for %i2 = 0 to 10 {
     // expected-remark@-1 {{slice ( src loop: 0, dst loop: 1, depth: 1 : insert point: (1, 0) loop bounds: [(d0)[s0] -> (d0), (d0)[s0] -> (d0 + 1)] [(d0)[s0] -> (0), (d0)[s0] -> (16)] )}}
     // expected-remark@-2 {{slice ( src loop: 0, dst loop: 1, depth: 2 : insert point: (2, 0) loop bounds: [(d0, d1)[s0] -> (d0), (d0, d1)[s0] -> (d0 + 1)] [(d0, d1)[s0] -> (0), (d0, d1)[s0] -> (16)] )}}
     affine.for %i3 = 0 to 8 {
-      %1 = load %0[%i2, %i3] : memref<100x100xf32>
+      %1 = affine.load %0[%i2, %i3] : memref<100x100xf32>
     }
   }
   return
@@ -131,14 +131,14 @@
     // expected-remark@-1 {{slice ( src loop: 1, dst loop: 0, depth: 1 : insert point: (1, 1) loop bounds: [(d0) -> (d0), (d0) -> (d0 + 1)] [(d0) -> (0), (d0) -> (10)] )}}
     // expected-remark@-2 {{slice ( src loop: 1, dst loop: 0, depth: 2 : insert point: (2, 1) loop bounds: [(d0, d1) -> (d0), (d0, d1) -> (d0 + 1)] [(d0, d1) -> (d1), (d0, d1) -> (d1 + 1)] )}}
     affine.for %i1 = 0 to 16 {
-      store %cst, %0[%i0, %i1] : memref<100x100xf32>
+      affine.store %cst, %0[%i0, %i1] : memref<100x100xf32>
     }
   }
   affine.for %i2 = 0 to 8 {
     // expected-remark@-1 {{slice ( src loop: 0, dst loop: 1, depth: 1 : insert point: (1, 0) loop bounds: [(d0) -> (d0), (d0) -> (d0 + 1)] [(d0) -> (0), (d0) -> (10)] )}}
     // expected-remark@-2 {{slice ( src loop: 0, dst loop: 1, depth: 2 : insert point: (2, 0) loop bounds: [(d0, d1) -> (d0), (d0, d1) -> (d0 + 1)] [(d0, d1) -> (d1), (d0, d1) -> (d1 + 1)] )}}
     affine.for %i3 = 0 to 10 {
-      %1 = load %0[%i2, %i3] : memref<100x100xf32>
+      %1 = affine.load %0[%i2, %i3] : memref<100x100xf32>
     }
   }
   return
diff --git a/mlir/test/Transforms/loop-fusion.mlir b/mlir/test/Transforms/loop-fusion.mlir
index 84b953d..a8caff4 100644
--- a/mlir/test/Transforms/loop-fusion.mlir
+++ b/mlir/test/Transforms/loop-fusion.mlir
@@ -10,24 +10,20 @@
 
 // -----
 
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1) -> (-d0 + d1)
-
 // CHECK-LABEL: func @should_fuse_raw_dep_for_locality() {
 func @should_fuse_raw_dep_for_locality() {
   %m = alloc() : memref<10xf32>
   %cf7 = constant 7.0 : f32
 
   affine.for %i0 = 0 to 10 {
-    store %cf7, %m[%i0] : memref<10xf32>
+    affine.store %cf7, %m[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
-    %v0 = load %m[%i1] : memref<10xf32>
+    %v0 = affine.load %m[%i1] : memref<10xf32>
   }
   // CHECK:      affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:   %1 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:   store %cst, %0[%1] : memref<1xf32>
-  // CHECK-NEXT:   %2 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:   %3 = load %0[%2] : memref<1xf32>
+  // CHECK-NEXT:   affine.store %cst, %0[0] : memref<1xf32>
+  // CHECK-NEXT:   %1 = affine.load %0[0] : memref<1xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: return
   return
@@ -35,8 +31,6 @@
 
 // -----
 
-// CHECK-DAG: [[MAP0:#map[0-9]+]] = (d0, d1) -> (-d0 + d1)
-
 // CHECK-LABEL: func @should_fuse_reduction_to_pointwise() {
 func @should_fuse_reduction_to_pointwise() {
   %a = alloc() : memref<10x10xf32>
@@ -47,31 +41,28 @@
 
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 10 {
-      %v0 = load %b[%i0] : memref<10xf32>
-      %v1 = load %a[%i0, %i1] : memref<10x10xf32>
+      %v0 = affine.load %b[%i0] : memref<10xf32>
+      %v1 = affine.load %a[%i0, %i1] : memref<10x10xf32>
       %v3 = addf %v0, %v1 : f32
-      store %v3, %b[%i0] : memref<10xf32>
+      affine.store %v3, %b[%i0] : memref<10xf32>
     }
   }
   affine.for %i2 = 0 to 10 {
-    %v4 = load %b[%i2] : memref<10xf32>
-    store %v4, %c[%i2] : memref<10xf32>
+    %v4 = affine.load %b[%i2] : memref<10xf32>
+    affine.store %v4, %c[%i2] : memref<10xf32>
   }
 
   // Should fuse in entire inner loop on %i1 from source loop nest, as %i1
   // is not used in the access function of the store/load on %b.
   // CHECK:       affine.for %i0 = 0 to 10 {
   // CHECK-NEXT:    affine.for %i1 = 0 to 10 {
-  // CHECK-NEXT:      %3 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:      %4 = load %0[%3] : memref<1xf32>
-  // CHECK-NEXT:      %5 = load %1[%i0, %i1] : memref<10x10xf32>
-  // CHECK-NEXT:      %6 = addf %4, %5 : f32
-  // CHECK-NEXT:      %7 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:      store %6, %0[%7] : memref<1xf32>
+  // CHECK-NEXT:      %3 = affine.load %0[0] : memref<1xf32>
+  // CHECK-NEXT:      %4 = affine.load %1[%i0, %i1] : memref<10x10xf32>
+  // CHECK-NEXT:      %5 = addf %3, %4 : f32
+  // CHECK-NEXT:      affine.store %5, %0[0] : memref<1xf32>
   // CHECK-NEXT:    }
-  // CHECK-NEXT:    %8 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:    %9 = load %0[%8] : memref<1xf32>
-  // CHECK-NEXT:    store %9, %2[%i0] : memref<10xf32>
+  // CHECK-NEXT:    %6 = affine.load %0[0] : memref<1xf32>
+  // CHECK-NEXT:    affine.store %6, %2[%i0] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  return
   return
@@ -81,8 +72,6 @@
 
 // CHECK-DAG: [[MAP_SHIFT_MINUS_ONE_R1:#map[0-9]+]] = (d0) -> (d0 - 1)
 // CHECK-DAG: [[MAP_SHIFT_BY_ONE:#map[0-9]+]] = (d0) -> (d0 + 1)
-// CHECK-DAG: [[MAP_SHIFT_MINUS_IV_R2_EVEN:#map[0-9]+]] = (d0, d1, d2, d3) -> (-d0 + d2)
-// CHECK-DAG: [[MAP_SHIFT_MINUS_IV_R2_ODD:#map[0-9]+]] = (d0, d1, d2, d3) -> (-d1 + d3)
 
 // CHECK-LABEL: func @should_fuse_loop_nests_with_shifts() {
 func @should_fuse_loop_nests_with_shifts() {
@@ -93,12 +82,12 @@
     affine.for %i1 = 0 to 9 {
       %idx = affine.apply (d0) -> (d0 + 1) (%i0)
       %idy = affine.apply (d0) -> (d0 + 1) (%i1)
-      store %cf7, %a[%idx, %idy] : memref<10x10xf32>
+      affine.store %cf7, %a[%idx, %idy] : memref<10x10xf32>
     }
   }
   affine.for %i2 = 1 to 10 {
     affine.for %i3 = 1 to 10 {
-      %v0 = load %a[%i2, %i3] : memref<10x10xf32>
+      %v0 = affine.load %a[%i2, %i3] : memref<10x10xf32>
     }
   }
 
@@ -116,12 +105,8 @@
   // CHECK-NEXT:     %2 = affine.apply [[MAP_SHIFT_MINUS_ONE_R1]](%i1)
   // CHECK-NEXT:     %3 = affine.apply [[MAP_SHIFT_BY_ONE]](%1)
   // CHECK-NEXT:     %4 = affine.apply [[MAP_SHIFT_BY_ONE]](%2)
-  // CHECK-NEXT:     %5 = affine.apply [[MAP_SHIFT_MINUS_IV_R2_EVEN]](%i0, %i1, %3, %4)
-  // CHECK-NEXT:     %6 = affine.apply [[MAP_SHIFT_MINUS_IV_R2_ODD]](%i0, %i1, %3, %4)
-  // CHECK-NEXT:     store %cst, %0[%5, %6] : memref<1x1xf32>
-  // CHECK-NEXT:     %7 = affine.apply [[MAP_SHIFT_MINUS_IV_R2_EVEN]](%i0, %i1, %i0, %i1)
-  // CHECK-NEXT:     %8 = affine.apply [[MAP_SHIFT_MINUS_IV_R2_ODD]](%i0, %i1, %i0, %i1)
-  // CHECK-NEXT:     %9 = load %0[%7, %8] : memref<1x1xf32>
+  // CHECK-NEXT:     affine.store %cst, %0[0, 0] : memref<1x1xf32>
+  // CHECK-NEXT:     %5 = affine.load %0[0, 0] : memref<1x1xf32>
   // CHECK-NEXT:   }
   // CHECK-NEXT: }
   // CHECK-NEXT: return
@@ -130,9 +115,6 @@
 
 // -----
 
-// CHECK-DAG: [[MAP_D2_D0_DIFF:#map[0-9]+]] = (d0, d1, d2, d3) -> (-d0 + d2)
-// CHECK-DAG: [[MAP_D3_D1_DIFF:#map[0-9]+]] = (d0, d1, d2, d3) -> (-d1 + d3)
-
 // CHECK-LABEL: func @should_fuse_loop_nest() {
 func @should_fuse_loop_nest() {
   %a = alloc() : memref<10x10xf32>
@@ -141,18 +123,18 @@
 
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 10 {
-      store %cf7, %a[%i0, %i1] : memref<10x10xf32>
+      affine.store %cf7, %a[%i0, %i1] : memref<10x10xf32>
     }
   }
   affine.for %i2 = 0 to 10 {
     affine.for %i3 = 0 to 10 {
-      %v0 = load %a[%i3, %i2] : memref<10x10xf32>
-      store %v0, %b[%i2, %i3] : memref<10x10xf32>
+      %v0 = affine.load %a[%i3, %i2] : memref<10x10xf32>
+      affine.store %v0, %b[%i2, %i3] : memref<10x10xf32>
     }
   }
   affine.for %i4 = 0 to 10 {
     affine.for %i5 = 0 to 10 {
-      %v1 = load %b[%i4, %i5] : memref<10x10xf32>
+      %v1 = affine.load %b[%i4, %i5] : memref<10x10xf32>
     }
   }
   // Expecting private memref for '%a' first, then private memref for '%b'.
@@ -160,18 +142,10 @@
   // CHECK-DAG:  [[NEWB:%[0-9]+]] = alloc() : memref<1x1xf32>
   // CHECK:      affine.for %i0 = 0 to 10 {
   // CHECK-NEXT:   affine.for %i1 = 0 to 10 {
-  // CHECK-NEXT:     %2 = affine.apply [[MAP_D2_D0_DIFF]](%i1, %i0, %i1, %i0)
-  // CHECK-NEXT:     %3 = affine.apply [[MAP_D3_D1_DIFF]](%i1, %i0, %i1, %i0)
-  // CHECK-NEXT:     store %cst, [[NEWA]][%2, %3] : memref<1x1xf32>
-  // CHECK-NEXT:     %4 = affine.apply [[MAP_D2_D0_DIFF]](%i1, %i0, %i1, %i0)
-  // CHECK-NEXT:     %5 = affine.apply [[MAP_D3_D1_DIFF]](%i1, %i0, %i1, %i0)
-  // CHECK-NEXT:     %6 = load [[NEWA]][%4, %5] : memref<1x1xf32>
-  // CHECK-NEXT:     %7 = affine.apply [[MAP_D2_D0_DIFF]](%i0, %i1, %i0, %i1)
-  // CHECK-NEXT:     %8 = affine.apply [[MAP_D3_D1_DIFF]](%i0, %i1, %i0, %i1)
-  // CHECK-NEXT:     store %6, [[NEWB]][%7, %8] : memref<1x1xf32>
-  // CHECK-NEXT:     %9 = affine.apply [[MAP_D2_D0_DIFF]](%i0, %i1, %i0, %i1)
-  // CHECK-NEXT:     %10 = affine.apply [[MAP_D3_D1_DIFF]](%i0, %i1, %i0, %i1)
-  // CHECK-NEXT:     %11 = load [[NEWB]][%9, %10] : memref<1x1xf32>
+  // CHECK-NEXT:     affine.store %cst, [[NEWA]][0, 0] : memref<1x1xf32>
+  // CHECK-NEXT:     %2 = affine.load [[NEWA]][0, 0] : memref<1x1xf32>
+  // CHECK-NEXT:     affine.store %2, [[NEWB]][0, 0] : memref<1x1xf32>
+  // CHECK-NEXT:     %3 = affine.load [[NEWB]][0, 0] : memref<1x1xf32>
   // CHECK-NEXT:   }
   // CHECK-NEXT: }
   // CHECK-NEXT: return
@@ -180,8 +154,6 @@
 
 // -----
 
-// CHECK-DAG: [[MAP0:#map[0-9]+]] = (d0, d1) -> (-d0 + d1)
-
 // CHECK-LABEL: func @should_fuse_across_intermediate_loop_with_no_deps() {
 func @should_fuse_across_intermediate_loop_with_no_deps() {
   %a = alloc() : memref<10xf32>
@@ -191,27 +163,25 @@
   %cf7 = constant 7.0 : f32
 
   affine.for %i0 = 0 to 10 {
-    %v0 = load %a[%i0] : memref<10xf32>
-    store %v0, %b[%i0] : memref<10xf32>
+    %v0 = affine.load %a[%i0] : memref<10xf32>
+    affine.store %v0, %b[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
-    store %cf7, %c[%i1] : memref<10xf32>
+    affine.store %cf7, %c[%i1] : memref<10xf32>
   }
   affine.for %i2 = 0 to 10 {
-    %v1 = load %b[%i2] : memref<10xf32>
+    %v1 = affine.load %b[%i2] : memref<10xf32>
   }
 
   // Should fuse first loop (past second loop with no dependences) into third.
   // Note that fusion creates a private memref '%2' for the fused loop nest.
   // CHECK:      affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:   store %cst, %2[%i0] : memref<10xf32>
+  // CHECK-NEXT:   affine.store %cst, %2[%i0] : memref<10xf32>
   // CHECK-NEXT: }
   // CHECK:      affine.for %i1 = 0 to 10 {
-  // CHECK-NEXT:   %3 = load %1[%i1] : memref<10xf32>
-  // CHECK-NEXT:   %4 = affine.apply [[MAP0]](%i1, %i1)
-  // CHECK-NEXT:   store %3, %0[%4] : memref<1xf32>
-  // CHECK-NEXT:   %5 = affine.apply [[MAP0]](%i1, %i1)
-  // CHECK-NEXT:   %6 = load %0[%5] : memref<1xf32>
+  // CHECK-NEXT:   %3 = affine.load %1[%i1] : memref<10xf32>
+  // CHECK-NEXT:   affine.store %3, %0[0] : memref<1xf32>
+  // CHECK-NEXT:   %4 = affine.load %0[0] : memref<1xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: return
   return
@@ -219,8 +189,6 @@
 
 // -----
 
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1) -> (-d0 + d1)
-
 // CHECK-LABEL: func @should_fuse_all_loops() {
 func @should_fuse_all_loops() {
   %a = alloc() : memref<10xf32>
@@ -229,14 +197,14 @@
 
   // Set up flow dependences from first and second loops to third.
   affine.for %i0 = 0 to 10 {
-    store %cf7, %a[%i0] : memref<10xf32>
+    affine.store %cf7, %a[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
-    store %cf7, %b[%i1] : memref<10xf32>
+    affine.store %cf7, %b[%i1] : memref<10xf32>
   }
   affine.for %i2 = 0 to 10 {
-    %v0 = load %a[%i2] : memref<10xf32>
-    %v1 = load %b[%i2] : memref<10xf32>
+    %v0 = affine.load %a[%i2] : memref<10xf32>
+    %v1 = affine.load %b[%i2] : memref<10xf32>
   }
 
   // Should fuse first and second loops into third.
@@ -244,14 +212,10 @@
   // CHECK-DAG: [[NEWA:%[0-9]+]] = alloc() : memref<1xf32>
   // CHECK-DAG: [[NEWB:%[0-9]+]] = alloc() : memref<1xf32>
   // CHECK:      affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:   %2 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:   store %cst, [[NEWA]][%2] : memref<1xf32>
-  // CHECK-NEXT:   %3 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:   store %cst, [[NEWB]][%3] : memref<1xf32>
-  // CHECK-NEXT:   %4 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:   %5 = load [[NEWA]][%4] : memref<1xf32>
-  // CHECK-NEXT:   %6 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:   %7 = load [[NEWB]][%6] : memref<1xf32>
+  // CHECK-NEXT:   affine.store %cst, [[NEWA]][0] : memref<1xf32>
+  // CHECK-NEXT:   affine.store %cst, [[NEWB]][0] : memref<1xf32>
+  // CHECK-NEXT:   %2 = affine.load [[NEWA]][0] : memref<1xf32>
+  // CHECK-NEXT:   %3 = affine.load [[NEWB]][0] : memref<1xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: return
   return
@@ -259,8 +223,6 @@
 
 // -----
 
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1) -> (-d0 + d1)
-
 // CHECK-LABEL: func @should_fuse_first_and_second_loops() {
 func @should_fuse_first_and_second_loops() {
   %a = alloc() : memref<10xf32>
@@ -270,27 +232,25 @@
   %cf7 = constant 7.0 : f32
 
   affine.for %i0 = 0 to 10 {
-    store %cf7, %a[%i0] : memref<10xf32>
+    affine.store %cf7, %a[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
-    %v0 = load %a[%i1] : memref<10xf32>
-    store %cf7, %b[%i1] : memref<10xf32>
+    %v0 = affine.load %a[%i1] : memref<10xf32>
+    affine.store %cf7, %b[%i1] : memref<10xf32>
   }
   affine.for %i2 = 0 to 10 {
-    %v1 = load %c[%i2] : memref<10xf32>
+    %v1 = affine.load %c[%i2] : memref<10xf32>
   }
 
   // Should fuse first loop into the second (last loop should not be fused).
   // Should create private memref '%2' for fused loop.
   // CHECK:      affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:   %3 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:   store %cst, %0[%3] : memref<1xf32>
-  // CHECK-NEXT:   %4 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:   %5 = load %0[%4] : memref<1xf32>
-  // CHECK-NEXT:   store %cst, %1[%i0] : memref<10xf32>
+  // CHECK-NEXT:   affine.store %cst, %0[0] : memref<1xf32>
+  // CHECK-NEXT:   %3 = affine.load %0[0] : memref<1xf32>
+  // CHECK-NEXT:   affine.store %cst, %1[%i0] : memref<10xf32>
   // CHECK-NEXT: }
   // CHECK:      affine.for %i1 = 0 to 10 {
-  // CHECK-NEXT:   %6 = load %2[%i1] : memref<10xf32>
+  // CHECK-NEXT:   %4 = affine.load %2[%i1] : memref<10xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: return
 
@@ -312,29 +272,29 @@
   // 2) loop0 -> loop2 on memref '%b'
   // 3) loop1 -> loop2 on memref '%c'
   affine.for %i0 = 0 to 10 {
-    %v0 = load %a[%i0] : memref<10xf32>
-    store %cf7, %b[%i0] : memref<10xf32>
+    %v0 = affine.load %a[%i0] : memref<10xf32>
+    affine.store %cf7, %b[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
-    store %cf7, %a[%i1] : memref<10xf32>
-    %v1 = load %c[%i1] : memref<10xf32>
+    affine.store %cf7, %a[%i1] : memref<10xf32>
+    %v1 = affine.load %c[%i1] : memref<10xf32>
   }
   affine.for %i2 = 0 to 10 {
-    %v2 = load %b[%i2] : memref<10xf32>
-    store %cf7, %c[%i2] : memref<10xf32>
+    %v2 = affine.load %b[%i2] : memref<10xf32>
+    affine.store %cf7, %c[%i2] : memref<10xf32>
   }
   // Should not fuse: fusing loop first loop into last would create a cycle.
   // CHECK:      affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:   %3 = load %0[%i0] : memref<10xf32>
-  // CHECK-NEXT:   store %cst, %1[%i0] : memref<10xf32>
+  // CHECK-NEXT:   %3 = affine.load %0[%i0] : memref<10xf32>
+  // CHECK-NEXT:   affine.store %cst, %1[%i0] : memref<10xf32>
   // CHECK-NEXT: }
   // CHECK:      affine.for %i1 = 0 to 10 {
-  // CHECK-NEXT:   store %cst, %0[%i1] : memref<10xf32>
-  // CHECK-NEXT:   %4 = load %2[%i1] : memref<10xf32>
+  // CHECK-NEXT:   affine.store %cst, %0[%i1] : memref<10xf32>
+  // CHECK-NEXT:   %4 = affine.load %2[%i1] : memref<10xf32>
   // CHECK-NEXT: }
   // CHECK:      affine.for %i2 = 0 to 10 {
-  // CHECK-NEXT:   %5 = load %1[%i2] : memref<10xf32>
-  // CHECK-NEXT:   store %cst, %2[%i2] : memref<10xf32>
+  // CHECK-NEXT:   %5 = affine.load %1[%i2] : memref<10xf32>
+  // CHECK-NEXT:   affine.store %cst, %2[%i2] : memref<10xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: return
   return
@@ -342,21 +302,19 @@
 
 // -----
 
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1) -> (-d0 + d1)
-
 // CHECK-LABEL: func @should_fuse_producer_consumer() {
 func @should_fuse_producer_consumer() {
   %m = alloc() : memref<10xf32>
   %cf7 = constant 7.0 : f32
 
   affine.for %i0 = 0 to 10 {
-    store %cf7, %m[%i0] : memref<10xf32>
+    affine.store %cf7, %m[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
-    store %cf7, %m[%i1] : memref<10xf32>
+    affine.store %cf7, %m[%i1] : memref<10xf32>
   }
   affine.for %i2 = 0 to 10 {
-    %v1 = load %m[%i2] : memref<10xf32>
+    %v1 = affine.load %m[%i2] : memref<10xf32>
   }
   // Fusing loop %i0 to %i2 would violate the WAW dependence between %i0 and
   // %i1, but OK to fuse %i1 into %i2.
@@ -365,13 +323,11 @@
   // CHECK:      %0 = alloc() : memref<1xf32>
   // CHECK:      %1 = alloc() : memref<10xf32>
   // CHECK:      affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:   store %cst, %1[%i0] : memref<10xf32>
+  // CHECK-NEXT:   affine.store %cst, %1[%i0] : memref<10xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: affine.for %i1 = 0 to 10 {
-  // CHECK-NEXT:   %2 = affine.apply [[MAP0]](%i1, %i1)
-  // CHECK-NEXT:   store %cst, %0[%2] : memref<1xf32>
-  // CHECK-NEXT:   %3 = affine.apply [[MAP0]](%i1, %i1)
-  // CHECK-NEXT:   %4 = load %0[%3] : memref<1xf32>
+  // CHECK-NEXT:   affine.store %cst, %0[0] : memref<1xf32>
+  // CHECK-NEXT:   %2 = affine.load %0[0] : memref<1xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: return
   return
@@ -379,8 +335,6 @@
 
 // -----
 
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1) -> (-d0 + d1)
-
 // CHECK-LABEL: func @should_fuse_and_move_to_preserve_war_dep() {
 func @should_fuse_and_move_to_preserve_war_dep() {
   %a = alloc() : memref<10xf32>
@@ -388,27 +342,25 @@
   %cf7 = constant 7.0 : f32
 
   affine.for %i0 = 0 to 10 {
-    %v0 = load %a[%i0] : memref<10xf32>
-    store %v0, %b[%i0] : memref<10xf32>
+    %v0 = affine.load %a[%i0] : memref<10xf32>
+    affine.store %v0, %b[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
-    store %cf7, %a[%i1] : memref<10xf32>
+    affine.store %cf7, %a[%i1] : memref<10xf32>
   }
   affine.for %i2 = 0 to 10 {
-    %v1 = load %b[%i2] : memref<10xf32>
+    %v1 = affine.load %b[%i2] : memref<10xf32>
   }
   // Loops '%i1' and '%i2' have no dependences. We can fuse a slice of '%i0'
   // into '%i2' if we move the fused loop nest before '%i1', which preserves
   // the WAR dependence from load '%a' in '%i0' to the store '%a' in loop '%i1'.
   // CHECK:       affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:    %2 = load %1[%i0] : memref<10xf32>
-  // CHECK-NEXT:    %3 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:    store %2, %0[%3] : memref<1xf32>
-  // CHECK-NEXT:    %4 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:    %5 = load %0[%4] : memref<1xf32>
+  // CHECK-NEXT:    %2 = affine.load %1[%i0] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %2, %0[0] : memref<1xf32>
+  // CHECK-NEXT:    %3 = affine.load %0[0] : memref<1xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  affine.for %i1 = 0 to 10 {
-  // CHECK-NEXT:    store %cst, %1[%i1] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %cst, %1[%i1] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  return
   return
@@ -416,55 +368,47 @@
 
 // -----
 
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1) -> (-d0 + d1)
-
 // CHECK-LABEL: func @should_fuse_with_private_memref_if_top_level_access() {
 func @should_fuse_with_private_memref_if_top_level_access() {
   %m = alloc() : memref<10xf32>
   %cf7 = constant 7.0 : f32
 
   affine.for %i0 = 0 to 10 {
-    store %cf7, %m[%i0] : memref<10xf32>
+    affine.store %cf7, %m[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
-    %v0 = load %m[%i1] : memref<10xf32>
+    %v0 = affine.load %m[%i1] : memref<10xf32>
   }
 
   %c0 = constant 4 : index
-  %v1 = load %m[%c0] : memref<10xf32>
+  %v1 = affine.load %m[%c0] : memref<10xf32>
   // Top-level load to '%m' should prevent fusion.
   // CHECK:      affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:   store %cst, %1[%i0] : memref<10xf32>
+  // CHECK-NEXT:   affine.store %cst, %1[%i0] : memref<10xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: affine.for %i1 = 0 to 10 {
-  // CHECK-NEXT:   %2 = affine.apply [[MAP0]](%i1, %i1)
-  // CHECK-NEXT:   store %cst, %0[%2] : memref<1xf32>
-  // CHECK-NEXT:   %3 = affine.apply [[MAP0]](%i1, %i1)
-  // CHECK-NEXT:   %4 = load %0[%3] : memref<1xf32>
+  // CHECK-NEXT:   affine.store %cst, %0[0] : memref<1xf32>
+  // CHECK-NEXT:   %2 = affine.load %0[0] : memref<1xf32>
   // CHECK-NEXT: }
   return
 }
 
 // -----
 
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1) -> (-d0 + d1)
-
 // CHECK-LABEL: func @should_fuse_no_top_level_access() {
 func @should_fuse_no_top_level_access() {
   %m = alloc() : memref<10xf32>
   %cf7 = constant 7.0 : f32
 
   affine.for %i0 = 0 to 10 {
-    store %cf7, %m[%i0] : memref<10xf32>
+    affine.store %cf7, %m[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
-    %v0 = load %m[%i1] : memref<10xf32>
+    %v0 = affine.load %m[%i1] : memref<10xf32>
   }
   // CHECK:      affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:   %1 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:   store %cst, %0[%1] : memref<1xf32>
-  // CHECK-NEXT:   %2 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:   %3 = load %0[%2] : memref<1xf32>
+  // CHECK-NEXT:   affine.store %cst, %0[0] : memref<1xf32>
+  // CHECK-NEXT:   %1 = affine.load %0[0] : memref<1xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: return
   return
@@ -480,20 +424,20 @@
   %cf7 = constant 7.0 : f32
 
   affine.for %i0 = 0 to 10 {
-    store %cf7, %m[%i0] : memref<10xf32>
+    affine.store %cf7, %m[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
-    %v0 = load %m[%i1] : memref<10xf32>
+    %v0 = affine.load %m[%i1] : memref<10xf32>
   }
   %c0 = constant 4 : index
   affine.if #set0(%c0) {
   }
   // Top-level IfOp should prevent fusion.
   // CHECK:      affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:   store %cst, %0[%i0] : memref<10xf32>
+  // CHECK-NEXT:   affine.store %cst, %0[%i0] : memref<10xf32>
   // CHECK-NEXT: }
   // CHECK:      affine.for %i1 = 0 to 10 {
-  // CHECK-NEXT:   %1 = load %0[%i1] : memref<10xf32>
+  // CHECK-NEXT:   %1 = affine.load %0[%i1] : memref<10xf32>
   // CHECK-NEXT: }
   return
 }
@@ -509,32 +453,28 @@
   %c4 = constant 4 : index
 
   affine.for %i0 = 0 to 10 {
-    store %cf7, %m[%i0] : memref<10xf32>
+    affine.store %cf7, %m[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
     affine.if #set0(%c4) {
     }
-    %v0 = load %m[%i1] : memref<10xf32>
+    %v0 = affine.load %m[%i1] : memref<10xf32>
   }
 
   // IfOp in ForInst should prevent fusion.
   // CHECK:      affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:   store %cst, %0[%i0] : memref<10xf32>
+  // CHECK-NEXT:   affine.store %cst, %0[%i0] : memref<10xf32>
   // CHECK-NEXT: }
   // CHECK:      affine.for %i1 = 0 to 10 {
   // CHECK-NEXT:   affine.if #set0(%c4) {
   // CHECK-NEXT:   }  
-  // CHECK-NEXT:   %1 = load %0[%i1] : memref<10xf32>
+  // CHECK-NEXT:   %1 = affine.load %0[%i1] : memref<10xf32>
   // CHECK-NEXT: }
   return
 }
 
 // -----
 
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1, d2, d3, d4, d5) -> (-d0 + d3)
-// CHECK: [[MAP1:#map[0-9]+]] = (d0, d1, d2, d3, d4, d5) -> (-d1 + d4)
-// CHECK: [[MAP2:#map[0-9]+]] = (d0, d1, d2, d3, d4, d5) -> (-d2 + d5)
-
 // CHECK-LABEL: func @permute_and_fuse() {
 func @permute_and_fuse() {
   %m = alloc() : memref<10x20x30xf32>
@@ -543,14 +483,14 @@
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 20 {
       affine.for %i2 = 0 to 30 {
-        store %cf7, %m[%i0, %i1, %i2] : memref<10x20x30xf32>
+        affine.store %cf7, %m[%i0, %i1, %i2] : memref<10x20x30xf32>
       }
     }
   }
   affine.for %i3 = 0 to 30 {
     affine.for %i4 = 0 to 10 {
       affine.for %i5 = 0 to 20 {
-        %v0 = load %m[%i4, %i5, %i3] : memref<10x20x30xf32>
+        %v0 = affine.load %m[%i4, %i5, %i3] : memref<10x20x30xf32>
         "foo"(%v0) : (f32) -> ()
       }
     }
@@ -558,15 +498,9 @@
 // CHECK:       affine.for %i0 = 0 to 30 {
 // CHECK-NEXT:    affine.for %i1 = 0 to 10 {
 // CHECK-NEXT:      affine.for %i2 = 0 to 20 {
-// CHECK-NEXT:        %1 = affine.apply [[MAP0]](%i1, %i2, %i0, %i1, %i2, %i0)
-// CHECK-NEXT:        %2 = affine.apply [[MAP1]](%i1, %i2, %i0, %i1, %i2, %i0)
-// CHECK-NEXT:        %3 = affine.apply [[MAP2]](%i1, %i2, %i0, %i1, %i2, %i0)
-// CHECK-NEXT:        store %cst, %0[%1, %2, %3] : memref<1x1x1xf32>
-// CHECK-NEXT:        %4 = affine.apply [[MAP0]](%i1, %i2, %i0, %i1, %i2, %i0)
-// CHECK-NEXT:        %5 = affine.apply [[MAP1]](%i1, %i2, %i0, %i1, %i2, %i0)
-// CHECK-NEXT:        %6 = affine.apply [[MAP2]](%i1, %i2, %i0, %i1, %i2, %i0)
-// CHECK-NEXT:        %7 = load %0[%4, %5, %6] : memref<1x1x1xf32>
-// CHECK-NEXT:        "foo"(%7) : (f32) -> ()
+// CHECK-NEXT:        affine.store %cst, %0[0, 0, 0] : memref<1x1x1xf32>
+// CHECK-NEXT:        %1 = affine.load %0[0, 0, 0] : memref<1x1x1xf32>
+// CHECK-NEXT:        "foo"(%1) : (f32) -> ()
 // CHECK-NEXT:      }
 // CHECK-NEXT:    }
 // CHECK-NEXT:  }
@@ -587,15 +521,15 @@
   %out = alloc() : memref<16x4xf32>
 
   affine.for %i0 = 0 to 64 {
-    %v = load %in[%i0] : memref<64xf32>
+    %v = affine.load %in[%i0] : memref<64xf32>
     %idx = affine.apply (d0) -> (d0 floordiv 4) (%i0)
     %idy = affine.apply (d0) -> (d0 mod 4) (%i0)
-    store %v, %out[%idx, %idy] : memref<16x4xf32>
+    affine.store %v, %out[%idx, %idy] : memref<16x4xf32>
   }
 
   affine.for %i1 = 0 to 16 {
     affine.for %i2 = 0 to 4 {
-      %w = load %out[%i1, %i2] : memref<16x4xf32>
+      %w = affine.load %out[%i1, %i2] : memref<16x4xf32>
       "foo"(%w) : (f32) -> ()
     }
   }
@@ -612,7 +546,6 @@
 // CHECK-DAG: [[MAP0:#map[0-9]+]] = (d0) -> (d0 floordiv 4)
 // CHECK-DAG: [[MAP1:#map[0-9]+]] = (d0) -> (d0 mod 4)
 // CHECK-DAG: [[MAP2:#map[0-9]+]] = (d0, d1) -> (d0 * 4 + d1)
-// CHECK-DAG: [[MAP3:#map[0-9]+]] = (d0, d1) -> (-d0 + d1)
 
 // Reshape a 16x4xf32 to 64xf32.
 // CHECK-LABEL: func @fuse_reshape_16_4_64
@@ -622,26 +555,24 @@
 
   affine.for %i0 = 0 to 16 {
     affine.for %i1 = 0 to 4 {
-      %v = load %in[%i0, %i1] : memref<16x4xf32>
+      %v = affine.load %in[%i0, %i1] : memref<16x4xf32>
       %idx = affine.apply (d0, d1) -> (4*d0 + d1) (%i0, %i1)
-      store %v, %out[%idx] : memref<64xf32>
+      affine.store %v, %out[%idx] : memref<64xf32>
     }
   }
 
   affine.for %i2 = 0 to 64 {
-    %w = load %out[%i2] : memref<64xf32>
+    %w = affine.load %out[%i2] : memref<64xf32>
     "foo"(%w) : (f32) -> ()
   }
 // CHECK:       affine.for %i0 = 0 to 64 {
 // CHECK-NEXT:    %2 = affine.apply [[MAP0]](%i0)
 // CHECK-NEXT:    %3 = affine.apply [[MAP1]](%i0)
-// CHECK-NEXT:    %4 = load %1[%2, %3] : memref<16x4xf32>
+// CHECK-NEXT:    %4 = affine.load %1[%2, %3] : memref<16x4xf32>
 // CHECK-NEXT:    %5 = affine.apply [[MAP2]](%2, %3)
-// CHECK-NEXT:    %6 = affine.apply [[MAP3]](%i0, %5)
-// CHECK-NEXT:    store %4, %0[%6] : memref<1xf32>
-// CHECK-NEXT:    %7 = affine.apply [[MAP3]](%i0, %i0)
-// CHECK-NEXT:    %8 = load %0[%7] : memref<1xf32>
-// CHECK-NEXT:    "foo"(%8) : (f32) -> ()
+// CHECK-NEXT:    affine.store %4, %0[0] : memref<1xf32>
+// CHECK-NEXT:    %6 = affine.load %0[0] : memref<1xf32>
+// CHECK-NEXT:    "foo"(%6) : (f32) -> ()
 // CHECK-NEXT:  }
 // CHECK-NEXT:  return
   return
@@ -665,7 +596,7 @@
           affine.for %i4 = 0 to 16 {
             affine.for %i5 = 0 to 1 {
               %val = "foo"(%i0, %i1, %i2, %i3, %i4, %i5) : (index, index, index, index, index, index) -> i32
-              store %val, %in[%i0, %i1, %i2, %i3, %i4, %i5] : memref<2x2x3x3x16x1xi32>
+              affine.store %val, %in[%i0, %i1, %i2, %i3, %i4, %i5] : memref<2x2x3x3x16x1xi32>
             }
           }
         }
@@ -683,16 +614,16 @@
       %3 = affine.apply (d0) -> ((((d0 mod 288) mod 144) mod 48) floordiv (16 * 1))(%a0)
       %4 = affine.apply (d0) -> ((((d0 mod 288) mod 144) mod 48) mod 16)(%a0)
       %5 = affine.apply (d0) -> (((((d0 mod 144) mod 144) mod 48) mod 16) mod 1)(%a0)
-      %v = load %in[%0, %1, %2, %3, %4, %5] : memref<2x2x3x3x16x1xi32>
-      store %v, %out[%ii, %jj] : memref<64x9xi32>
+      %v = affine.load %in[%0, %1, %2, %3, %4, %5] : memref<2x2x3x3x16x1xi32>
+      affine.store %v, %out[%ii, %jj] : memref<64x9xi32>
     }
   }
 
   affine.for %i = 0 to 64 {
     affine.for %j = 0 to 9 {
-      %a = load %out[%i, %j] : memref<64x9xi32>
+      %a = affine.load %out[%i, %j] : memref<64x9xi32>
       %b = muli %a, %a : i32
-      store %b, %live_out[%i, %j] : memref<64x9xi32>
+      affine.store %b, %live_out[%i, %j] : memref<64x9xi32>
     }
   }
   return %live_out : memref<64x9xi32>
@@ -705,12 +636,6 @@
 // CHECK-DAG: [[MAP2:#map[0-9]+]] = (d0, d1) -> ((((d0 * 9 + d1) mod 288) mod 144) floordiv 48)
 // CHECK-DAG: [[MAP3:#map[0-9]+]] = (d0, d1) -> (((((d0 * 9 + d1) mod 288) mod 144) mod 48) floordiv 16)
 // CHECK-DAG: [[MAP4:#map[0-9]+]] = (d0, d1) -> (((((d0 * 9 + d1) mod 288) mod 144) mod 48) mod 16)
-// CHECK-DAG: [[MAP5:#map[0-9]+]] = (d0, d1, d2, d3, d4, d5, d6, d7) -> (d2 - (d0 * 9 + d1) floordiv 288)
-// CHECK-DAG: [[MAP6:#map[0-9]+]] = (d0, d1, d2, d3, d4, d5, d6, d7) -> (d3)
-// CHECK-DAG: [[MAP7:#map[0-9]+]] = (d0, d1, d2, d3, d4, d5, d6, d7) -> (d4)
-// CHECK-DAG: [[MAP8:#map[0-9]+]] = (d0, d1, d2, d3, d4, d5, d6, d7) -> (d5)
-// CHECK-DAG: [[MAP9:#map[0-9]+]] = (d0, d1, d2, d3, d4, d5, d6, d7) -> (d6)
-// CHECK-DAG: [[MAP10:#map[0-9]+]] = (d0, d1, d2, d3, d4, d5, d6, d7) -> (d7)
 // CHECK-DAG: [[MAP11:#map[0-9]+]] = (d0, d1) -> (d0 * 9 + d1)
 // CHECK-DAG: [[MAP12:#map[0-9]+]] = (d0) -> (d0 floordiv 288)
 // CHECK-DAG: [[MAP13:#map[0-9]+]] = (d0) -> ((d0 mod 288) floordiv 144)
@@ -718,8 +643,7 @@
 // CHECK-DAG: [[MAP15:#map[0-9]+]] = (d0) -> ((((d0 mod 288) mod 144) mod 48) floordiv 16)
 // CHECK-DAG: [[MAP16:#map[0-9]+]] = (d0) -> ((((d0 mod 288) mod 144) mod 48) mod 16)
 // CHECK-DAG: [[MAP17:#map[0-9]+]] = (d0) -> (0)
-// CHECK-DAG: [[MAP18:#map[0-9]+]] = (d0, d1, d2, d3) -> (-d0 + d2)
-// CHECK-DAG: [[MAP19:#map[0-9]+]] = (d0, d1, d2, d3) -> (-d1 + d3)
+
 //
 // CHECK-LABEL: func @R6_to_R2_reshape
 // CHECK:       %0 = alloc() : memref<1x2x3x3x16x1xi32>
@@ -733,35 +657,19 @@
 // CHECK-NEXT:      %6 = affine.apply [[MAP3]](%i0, %i1)
 // CHECK-NEXT:      %7 = affine.apply [[MAP4]](%i0, %i1)
 // CHECK-NEXT:      %8 = "foo"(%3, %4, %5, %6, %7, %c0) : (index, index, index, index, index, index) -> i32
-// CHECK-NEXT:      %9 = affine.apply [[MAP5]](%i0, %i1, %3, %4, %5, %6, %7, %c0)
-// CHECK-NEXT:      %10 = affine.apply [[MAP6]](%i0, %i1, %3, %4, %5, %6, %7, %c0)
-// CHECK-NEXT:      %11 = affine.apply [[MAP7]](%i0, %i1, %3, %4, %5, %6, %7, %c0)
-// CHECK-NEXT:      %12 = affine.apply [[MAP8]](%i0, %i1, %3, %4, %5, %6, %7, %c0)
-// CHECK-NEXT:      %13 = affine.apply [[MAP9]](%i0, %i1, %3, %4, %5, %6, %7, %c0)
-// CHECK-NEXT:      %14 = affine.apply [[MAP10]](%i0, %i1, %3, %4, %5, %6, %7, %c0)
-// CHECK-NEXT:      store %8, %0[%9, %10, %11, %12, %13, %14] : memref<1x2x3x3x16x1xi32>
-// CHECK-NEXT:      %15 = affine.apply [[MAP11]](%i0, %i1)
-// CHECK-NEXT:      %16 = affine.apply [[MAP12]](%15)
-// CHECK-NEXT:      %17 = affine.apply [[MAP13]](%15)
-// CHECK-NEXT:      %18 = affine.apply [[MAP14]](%15)
-// CHECK-NEXT:      %19 = affine.apply [[MAP15]](%15)
-// CHECK-NEXT:      %20 = affine.apply [[MAP16]](%15)
-// CHECK-NEXT:      %21 = affine.apply [[MAP17]](%15)
-// CHECK-NEXT:      %22 = affine.apply [[MAP5]](%i0, %i1, %16, %17, %18, %19, %20, %21)
-// CHECK-NEXT:      %23 = affine.apply [[MAP6]](%i0, %i1, %16, %17, %18, %19, %20, %21)
-// CHECK-NEXT:      %24 = affine.apply [[MAP7]](%i0, %i1, %16, %17, %18, %19, %20, %21)
-// CHECK-NEXT:      %25 = affine.apply [[MAP8]](%i0, %i1, %16, %17, %18, %19, %20, %21)
-// CHECK-NEXT:      %26 = affine.apply [[MAP9]](%i0, %i1, %16, %17, %18, %19, %20, %21)
-// CHECK-NEXT:      %27 = affine.apply [[MAP10]](%i0, %i1, %16, %17, %18, %19, %20, %21)
-// CHECK-NEXT:      %28 = load %0[%22, %23, %24, %25, %26, %27] : memref<1x2x3x3x16x1xi32>
-// CHECK-NEXT:      %29 = affine.apply [[MAP18]](%i0, %i1, %i0, %i1)
-// CHECK-NEXT:      %30 = affine.apply [[MAP19]](%i0, %i1, %i0, %i1)
-// CHECK-NEXT:      store %28, %1[%29, %30] : memref<1x1xi32>
-// CHECK-NEXT:      %31 = affine.apply [[MAP18]](%i0, %i1, %i0, %i1)
-// CHECK-NEXT:      %32 = affine.apply [[MAP19]](%i0, %i1, %i0, %i1)
-// CHECK-NEXT:      %33 = load %1[%31, %32] : memref<1x1xi32>
-// CHECK-NEXT:      %34 = muli %33, %33 : i32
-// CHECK-NEXT:      store %34, %2[%i0, %i1] : memref<64x9xi32>
+// CHECK-NEXT:      affine.store %8, %0[0, ((%i0 * 9 + %i1) mod 288) floordiv 144, (((%i0 * 9 + %i1) mod 288) mod 144) floordiv 48, ((((%i0 * 9 + %i1) mod 288) mod 144) mod 48) floordiv 16, ((((%i0 * 9 + %i1) mod 288) mod 144) mod 48) mod 16, symbol(%c0)] : memref<1x2x3x3x16x1xi32>
+// CHECK-NEXT:      %9 = affine.apply [[MAP11]](%i0, %i1)
+// CHECK-NEXT:      %10 = affine.apply [[MAP12]](%9)
+// CHECK-NEXT:      %11 = affine.apply [[MAP13]](%9)
+// CHECK-NEXT:      %12 = affine.apply [[MAP14]](%9)
+// CHECK-NEXT:      %13 = affine.apply [[MAP15]](%9)
+// CHECK-NEXT:      %14 = affine.apply [[MAP16]](%9)
+// CHECK-NEXT:      %15 = affine.apply [[MAP17]](%9)
+// CHECK-NEXT:      %16 = affine.load %0[0, ((%i0 * 9 + %i1) mod 288) floordiv 144, (((%i0 * 9 + %i1) mod 288) mod 144) floordiv 48, ((((%i0 * 9 + %i1) mod 288) mod 144) mod 48) floordiv 16, ((((%i0 * 9 + %i1) mod 288) mod 144) mod 48) mod 16, 0] : memref<1x2x3x3x16x1xi32>
+// CHECK-NEXT:      affine.store %16, %1[0, 0] : memref<1x1xi32>
+// CHECK-NEXT:      %17 = affine.load %1[0, 0] : memref<1x1xi32>
+// CHECK-NEXT:      %18 = muli %17, %17 : i32
+// CHECK-NEXT:      affine.store %18, %2[%i0, %i1] : memref<64x9xi32>
 // CHECK-NEXT:    }
 // CHECK-NEXT:  }
 // CHECK-NEXT:  return %2 : memref<64x9xi32>
@@ -778,14 +686,14 @@
 
   affine.for %i0 = 0 to %M {
     affine.for %i1 = 0 to (d0) -> (d0 + 5) (%N) {
-      store %c0, %m[%i0, %i1] : memref<? x ? x f32>
+      affine.store %c0, %m[%i0, %i1] : memref<? x ? x f32>
     }
   }
 
   affine.for %i2 = 0 to %M {
     affine.for %i3 = 0 to %N {
       %idy = affine.apply (d0)[s0] -> (d0 + s0) (%i3)[%s]
-      %v = load %m[%i2, %idy] : memref<? x ? x f32>
+      %v = affine.load %m[%i2, %idy] : memref<? x ? x f32>
     }
   }
 
@@ -793,7 +701,6 @@
 }
 
 // -----
-// CHECK-DAG: [[MAP0:#map[0-9]+]] = (d0, d1) -> (-d0 + d1)
 
 // CHECK-LABEL: func @should_fuse_reduction_at_depth1
 func @should_fuse_reduction_at_depth1() {
@@ -802,18 +709,18 @@
 
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 100 {
-      %v0 = load %b[%i0] : memref<10xf32>
-      %v1 = load %a[%i0, %i1] : memref<10x100xf32>
+      %v0 = affine.load %b[%i0] : memref<10xf32>
+      %v1 = affine.load %a[%i0, %i1] : memref<10x100xf32>
       %v2 = "maxf"(%v0, %v1) : (f32, f32) -> f32
-      store %v2, %b[%i0] : memref<10xf32>
+      affine.store %v2, %b[%i0] : memref<10xf32>
     }
   }
   affine.for %i2 = 0 to 10 {
     affine.for %i3 = 0 to 100 {
-      %v3 = load %b[%i2] : memref<10xf32>
-      %v4 = load %a[%i2, %i3] : memref<10x100xf32>
+      %v3 = affine.load %b[%i2] : memref<10xf32>
+      %v4 = affine.load %a[%i2, %i3] : memref<10x100xf32>
       %v5 = subf %v4, %v3 : f32
-      store %v5, %b[%i2] : memref<10xf32>
+      affine.store %v5, %b[%i2] : memref<10xf32>
     }
   }
   // This test should fuse the src reduction loop at depth 1 in the destination
@@ -822,20 +729,16 @@
   // memory space.
   // CHECK:       affine.for %i0 = 0 to 10 {
   // CHECK-NEXT:    affine.for %i1 = 0 to 100 {
-  // CHECK-NEXT:      %2 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:      %3 = load %0[%2] : memref<1xf32>
-  // CHECK-NEXT:      %4 = load %1[%i0, %i1] : memref<10x100xf32>
-  // CHECK-NEXT:      %5 = "maxf"(%3, %4) : (f32, f32) -> f32
-  // CHECK-NEXT:      %6 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:      store %5, %0[%6] : memref<1xf32>
+  // CHECK-NEXT:      %2 = affine.load %0[0] : memref<1xf32>
+  // CHECK-NEXT:      %3 = affine.load %1[%i0, %i1] : memref<10x100xf32>
+  // CHECK-NEXT:      %4 = "maxf"(%2, %3) : (f32, f32) -> f32
+  // CHECK-NEXT:      affine.store %4, %0[0] : memref<1xf32>
   // CHECK-NEXT:    }
   // CHECK-NEXT:    affine.for %i2 = 0 to 100 {
-  // CHECK-NEXT:      %7 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:      %8 = load %0[%7] : memref<1xf32>
-  // CHECK-NEXT:      %9 = load %1[%i0, %i2] : memref<10x100xf32>
-  // CHECK-NEXT:      %10 = subf %9, %8 : f32
-  // CHECK-NEXT:      %11 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:      store %10, %0[%11] : memref<1xf32>
+  // CHECK-NEXT:      %5 = affine.load %0[0] : memref<1xf32>
+  // CHECK-NEXT:      %6 = affine.load %1[%i0, %i2] : memref<10x100xf32>
+  // CHECK-NEXT:      %7 = subf %6, %5 : f32
+  // CHECK-NEXT:      affine.store %7, %0[0] : memref<1xf32>
   // CHECK-NEXT:    }
   // CHECK-NEXT:  }
   // CHECK-NEXT:  return
@@ -843,8 +746,6 @@
 }
 
 // -----
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1, d2) -> (-d0 + d1)
-// CHECK: [[MAP1:#map[0-9]+]] = (d0, d1, d2) -> (d2)
 
 // CHECK-LABEL: func @should_fuse_at_src_depth1_and_dst_depth1
 func @should_fuse_at_src_depth1_and_dst_depth1() {
@@ -853,18 +754,18 @@
 
   affine.for %i0 = 0 to 100 {
     affine.for %i1 = 0 to 16 {
-      %v0 = load %a[%i0, %i1] : memref<100x16xf32>
+      %v0 = affine.load %a[%i0, %i1] : memref<100x16xf32>
       "op0"(%v0) : (f32) -> ()
     }
     affine.for %i2 = 0 to 16 {
       %v1 = "op1"() : () -> (f32)
-      store %v1, %b[%i0, %i2] : memref<100x16xf32>
+      affine.store %v1, %b[%i0, %i2] : memref<100x16xf32>
     }
   }
 
   affine.for %i3 = 0 to 100 {
     affine.for %i4 = 0 to 16 {
-      %v2 = load %b[%i3, %i4] : memref<100x16xf32>
+      %v2 = affine.load %b[%i3, %i4] : memref<100x16xf32>
       "op2"(%v2) : (f32) -> ()
     }
   }
@@ -875,20 +776,16 @@
   // at depth 1 and the slice should be inserted at depth 1.
   // CHECK:       affine.for %i0 = 0 to 100 {
   // CHECK-NEXT:    affine.for %i1 = 0 to 16 {
-  // CHECK-NEXT:      %2 = load %1[%i0, %i1] : memref<100x16xf32>
+  // CHECK-NEXT:      %2 = affine.load %1[%i0, %i1] : memref<100x16xf32>
   // CHECK-NEXT:      "op0"(%2) : (f32) -> ()
   // CHECK-NEXT:    }
   // CHECK-NEXT:    affine.for %i2 = 0 to 16 {
   // CHECK-NEXT:      %3 = "op1"() : () -> f32
-  // CHECK-NEXT:      %4 = affine.apply [[MAP0]](%i0, %i0, %i2)
-  // CHECK-NEXT:      %5 = affine.apply [[MAP1]](%i0, %i0, %i2)
-  // CHECK-NEXT:      store %3, %0[%4, %5] : memref<1x16xf32>
+  // CHECK-NEXT:      affine.store %3, %0[0, %i2] : memref<1x16xf32>
   // CHECK-NEXT:    }
   // CHECK-NEXT:    affine.for %i3 = 0 to 16 {
-  // CHECK-NEXT:      %6 = affine.apply [[MAP0]](%i0, %i0, %i3)
-  // CHECK-NEXT:      %7 = affine.apply [[MAP1]](%i0, %i0, %i3)
-  // CHECK-NEXT:      %8 = load %0[%6, %7] : memref<1x16xf32>
-  // CHECK-NEXT:      "op2"(%8) : (f32) -> ()
+  // CHECK-NEXT:      %4 = affine.load %0[0, %i3] : memref<1x16xf32>
+  // CHECK-NEXT:      "op2"(%4) : (f32) -> ()
   // CHECK-NEXT:    }
   // CHECK-NEXT:  }
   // CHECK-NEXT:  return
@@ -897,7 +794,6 @@
 
 // -----
 // CHECK: [[MAP0:#map[0-9]+]] = (d0, d1) -> (d0 * 10 + d1)
-// CHECK: [[MAP1:#map[0-9]+]] = (d0, d1, d2) -> (d0 * -10 - d1 + d2)
 
 // CHECK-LABEL: func @should_fuse_src_depth1_at_dst_depth2
 func @should_fuse_src_depth1_at_dst_depth2() {
@@ -905,13 +801,13 @@
   %c0 = constant 0.0 : f32
 
   affine.for %i0 = 0 to 100 {
-    store %c0, %a[%i0] : memref<100xf32>
+    affine.store %c0, %a[%i0] : memref<100xf32>
   }
 
   affine.for %i1 = 0 to 10 {
     affine.for %i2 = 0 to 10 {
       %a0 = affine.apply (d0, d1) -> (d0 * 10 + d1) (%i1, %i2)
-      %v0 = load %a[%a0] : memref<100xf32>
+      %v0 = affine.load %a[%a0] : memref<100xf32>
     }
   }
   // The source loop nest slice loop bound is a function of both destination
@@ -919,11 +815,9 @@
   // CHECK:       affine.for %i0 = 0 to 10 {
   // CHECK-NEXT:    affine.for %i1 = 0 to 10 {
   // CHECK-NEXT:      %1 = affine.apply [[MAP0]](%i0, %i1)
-  // CHECK-NEXT:      %2 = affine.apply [[MAP1]](%i0, %i1, %1)
-  // CHECK-NEXT:      store %cst, %0[%2] : memref<1xf32>
-  // CHECK-NEXT:      %3 = affine.apply [[MAP0]](%i0, %i1)
-  // CHECK-NEXT:      %4 = affine.apply [[MAP1]](%i0, %i1, %3)
-  // CHECK-NEXT:      %5 = load %0[%4] : memref<1xf32>
+  // CHECK-NEXT:      affine.store %cst, %0[0] : memref<1xf32>
+  // CHECK-NEXT:      %2 = affine.apply [[MAP0]](%i0, %i1)
+  // CHECK-NEXT:      %3 = affine.load %0[0] : memref<1xf32>
   // CHECK-NEXT:    }
   // CHECK-NEXT:  }
   // CHECK-NEXT:  return
@@ -938,17 +832,17 @@
   %c0 = constant 0 : index
   %cst = constant 0.000000e+00 : f32
   affine.for %i0 = 0 to 10 {
-    store %cst, %0[%i0] : memref<10xf32>
+    affine.store %cst, %0[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
-    %1 = load %0[%c0] : memref<10xf32>
+    %1 = affine.load %0[%c0] : memref<10xf32>
   }
   // NOTE: Should shrink memref size to 1 element access by load in dst loop
   // nest, and make the store in the slice store to the same element.
   // CHECK-DAG:   %0 = alloc() : memref<1xf32>
   // CHECK:       affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:    store %cst, %0[%c0] : memref<1xf32>
-  // CHECK-NEXT:    %1 = load %0[%c0_0] : memref<1xf32>
+  // CHECK-NEXT:    affine.store %cst, %0[symbol(%c0)] : memref<1xf32>
+  // CHECK-NEXT:    %1 = affine.load %0[symbol(%c0_0)] : memref<1xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  return
   return
@@ -956,13 +850,6 @@
 
 // -----
 
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (-d0 + d4)
-// CHECK: [[MAP1:#map[0-9]+]] = (d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (-d1 + d5)
-// CHECK: [[MAP2:#map[0-9]+]] = (d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (-d2 + d6)
-// CHECK: [[MAP3:#map[0-9]+]] = (d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (-d3 + d7)
-// CHECK: [[MAP4:#map[0-9]+]] = (d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d8)
-// CHECK: [[MAP5:#map[0-9]+]] = (d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) -> (d9)
-
 // CHECK-LABEL: func @should_fuse_deep_loop_nests
 func @should_fuse_deep_loop_nests() {
   %0 = alloc() : memref<2x2x3x3x16x10xf32, 2>
@@ -978,13 +865,13 @@
         affine.for %i3 = 0 to 3 {
           affine.for %i4 = 0 to 16 {
             affine.for %i5 = 0 to 10 {
-              %3 = load %0[%i0, %i1, %i2, %i3, %i4, %i5]
+              %3 = affine.load %0[%i0, %i1, %i2, %i3, %i4, %i5]
                 : memref<2x2x3x3x16x10xf32, 2>
             }
           }
           affine.for %i6 = 0 to 16 {
             affine.for %i7 = 0 to 10 {
-              store %cst, %1[%i0, %i1, %i2, %i3, %i6, %i7]
+              affine.store %cst, %1[%i0, %i1, %i2, %i3, %i6, %i7]
                 : memref<2x2x3x3x16x10xf32, 2>
             }
           }
@@ -1002,13 +889,13 @@
                 affine.for %i15 = 0 to 2 {
                   affine.for %i16 = 0 to 16 {
                     affine.for %i17 = 0 to 10 {
-                      %5 = load %0[%i14, %i15, %i12, %i13, %i16, %i17]
+                      %5 = affine.load %0[%i14, %i15, %i12, %i13, %i16, %i17]
                         : memref<2x2x3x3x16x10xf32, 2>
                     }
                   }
                   affine.for %i18 = 0 to 16 {
                     affine.for %i19 = 0 to 10 {
-                      %6 = load %1[%i10, %i11, %i8, %i9, %i18, %i19]
+                      %6 = affine.load %1[%i10, %i11, %i8, %i9, %i18, %i19]
                         : memref<2x2x3x3x16x10xf32, 2>
                     }
                   }
@@ -1033,36 +920,24 @@
 // CHECK-NEXT:            affine.for %i5 = 0 to 3 {
 // CHECK-NEXT:              affine.for %i6 = 0 to 16 {
 // CHECK-NEXT:                affine.for %i7 = 0 to 10 {
-// CHECK-NEXT:                  %3 = load %1[%i2, %i3, %i0, %i1, %i6, %i7] : memref<2x2x3x3x16x10xf32, 2>
+// CHECK-NEXT:                  %3 = affine.load %1[%i2, %i3, %i0, %i1, %i6, %i7] : memref<2x2x3x3x16x10xf32, 2>
 // CHECK-NEXT:                }
 // CHECK-NEXT:              }
 // CHECK-NEXT:              affine.for %i8 = 0 to 16 {
 // CHECK-NEXT:                affine.for %i9 = 0 to 10 {
-// CHECK-NEXT:                  %4 = affine.apply [[MAP0]](%i2, %i3, %i0, %i1, %i2, %i3, %i0, %i1, %i8, %i9)
-// CHECK-NEXT:                  %5 = affine.apply [[MAP1]](%i2, %i3, %i0, %i1, %i2, %i3, %i0, %i1, %i8, %i9)
-// CHECK-NEXT:                  %6 = affine.apply [[MAP2]](%i2, %i3, %i0, %i1, %i2, %i3, %i0, %i1, %i8, %i9)
-// CHECK-NEXT:                  %7 = affine.apply [[MAP3]](%i2, %i3, %i0, %i1, %i2, %i3, %i0, %i1, %i8, %i9)
-// CHECK-NEXT:                  %8 = affine.apply [[MAP4]](%i2, %i3, %i0, %i1, %i2, %i3, %i0, %i1, %i8, %i9)
-// CHECK-NEXT:                  %9 = affine.apply [[MAP5]](%i2, %i3, %i0, %i1, %i2, %i3, %i0, %i1, %i8, %i9)
-// CHECK-NEXT:                  store %cst, %0[%4, %5, %6, %7, %8, %9] : memref<1x1x1x1x16x10xf32, 2>
+// CHECK-NEXT:                  affine.store %cst, %0[0, 0, 0, 0, %i8, %i9] : memref<1x1x1x1x16x10xf32, 2>
 // CHECK-NEXT:                }
 // CHECK-NEXT:              }
 // CHECK-NEXT:              affine.for %i10 = 0 to 2 {
 // CHECK-NEXT:                affine.for %i11 = 0 to 2 {
 // CHECK-NEXT:                  affine.for %i12 = 0 to 16 {
 // CHECK-NEXT:                    affine.for %i13 = 0 to 10 {
-// CHECK-NEXT:                      %10 = load %1[%i10, %i11, %i4, %i5, %i12, %i13] : memref<2x2x3x3x16x10xf32, 2>
+// CHECK-NEXT:                      %4 = affine.load %1[%i10, %i11, %i4, %i5, %i12, %i13] : memref<2x2x3x3x16x10xf32, 2>
 // CHECK-NEXT:                    }
 // CHECK-NEXT:                  }
 // CHECK-NEXT:                  affine.for %i14 = 0 to 16 {
 // CHECK-NEXT:                    affine.for %i15 = 0 to 10 {
-// CHECK-NEXT:                      %11 = affine.apply [[MAP0]](%i2, %i3, %i0, %i1, %i2, %i3, %i0, %i1, %i14, %i15)
-// CHECK-NEXT:                      %12 = affine.apply [[MAP1]](%i2, %i3, %i0, %i1, %i2, %i3, %i0, %i1, %i14, %i15)
-// CHECK-NEXT:                      %13 = affine.apply [[MAP2]](%i2, %i3, %i0, %i1, %i2, %i3, %i0, %i1, %i14, %i15)
-// CHECK-NEXT:                      %14 = affine.apply [[MAP3]](%i2, %i3, %i0, %i1, %i2, %i3, %i0, %i1, %i14, %i15)
-// CHECK-NEXT:                      %15 = affine.apply [[MAP4]](%i2, %i3, %i0, %i1, %i2, %i3, %i0, %i1, %i14, %i15)
-// CHECK-NEXT:                      %16 = affine.apply [[MAP5]](%i2, %i3, %i0, %i1, %i2, %i3, %i0, %i1, %i14, %i15)
-// CHECK-NEXT:                      %17 = load %0[%11, %12, %13, %14, %15, %16] : memref<1x1x1x1x16x10xf32, 2>
+// CHECK-NEXT:                      %5 = affine.load %0[0, 0, 0, 0, %i14, %i15] : memref<1x1x1x1x16x10xf32, 2>
 // CHECK-NEXT:                    }
 // CHECK-NEXT:                  }
 // CHECK-NEXT:                }
@@ -1078,8 +953,6 @@
 }
 
 // -----
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1, d2) -> (-d0 + d1)
-// CHECK: [[MAP1:#map[0-9]+]] = (d0, d1, d2) -> (d2)
 
 // CHECK-LABEL: func @should_fuse_at_depth1_and_reduce_slice_trip_count
 func @should_fuse_at_depth1_and_reduce_slice_trip_count() {
@@ -1091,16 +964,16 @@
 
   affine.for %i0 = 0 to 4 {
     affine.for %i1 = 0 to 256 {
-      %v0 = load %b[%i0, %i1] : memref<4x256xf32>
+      %v0 = affine.load %b[%i0, %i1] : memref<4x256xf32>
     }
     affine.for %i2 = 0 to 256 {
-      store %cf0, %a[%i0, %i2] : memref<4x256xf32>
+      affine.store %cf0, %a[%i0, %i2] : memref<4x256xf32>
     }
   }
 
   affine.for %d0 = 0 to 4 {
     affine.for %d1 = 0 to 16 {
-      %v1 = load %a[%d0, %d1] : memref<4x256xf32>
+      %v1 = affine.load %a[%d0, %d1] : memref<4x256xf32>
     }
   }
   // The cost of fusing at depth 2 is greater than the cost of fusing at depth 1
@@ -1115,17 +988,13 @@
   // CHECK-DAG:   %0 = alloc() : memref<1x16xf32>
   // CHECK:       affine.for %i0 = 0 to 4 {
   // CHECK-NEXT:    affine.for %i1 = 0 to 256 {
-  // CHECK-NEXT:      %2 = load %1[%i0, %i1] : memref<4x256xf32>
+  // CHECK-NEXT:      %2 = affine.load %1[%i0, %i1] : memref<4x256xf32>
   // CHECK-NEXT:    }
   // CHECK-NEXT:    affine.for %i2 = 0 to 16 {
-  // CHECK-NEXT:      %3 = affine.apply [[MAP0]](%i0, %i0, %i2)
-  // CHECK-NEXT:      %4 = affine.apply [[MAP1]](%i0, %i0, %i2)
-  // CHECK-NEXT:      store %cst, %0[%3, %4] : memref<1x16xf32>
+  // CHECK-NEXT:      affine.store %cst, %0[0, %i2] : memref<1x16xf32>
   // CHECK-NEXT:    }
   // CHECK-NEXT:    affine.for %i3 = 0 to 16 {
-  // CHECK-NEXT:      %5 = affine.apply [[MAP0]](%i0, %i0, %i3)
-  // CHECK-NEXT:      %6 = affine.apply [[MAP1]](%i0, %i0, %i3)
-  // CHECK-NEXT:      %7 = load %0[%5, %6] : memref<1x16xf32>
+  // CHECK-NEXT:      %3 = affine.load %0[0, %i3] : memref<1x16xf32>
   // CHECK-NEXT:    }
   // CHECK-NEXT:  }
   // CHECK-NEXT:  return
@@ -1141,16 +1010,16 @@
   %cf0 = constant 0.0 : f32
 
   affine.for %i0 = 0 to 100 {
-    store %cf0, %a[%i0]: memref<100xf32>
+    affine.store %cf0, %a[%i0]: memref<100xf32>
   }
 
   affine.for %i1 = 0 to 5 {
     affine.for %i2 = 0 to 10 {
-      %v0 = load %a[%i2]: memref<100xf32>
+      %v0 = affine.load %a[%i2]: memref<100xf32>
     }
     affine.for %i3 = 0 to 10 {
       affine.for %i4 = 0 to 20 {
-        %v1 = load %a[%i4]: memref<100xf32>
+        %v1 = affine.load %a[%i4]: memref<100xf32>
       }
     }
   }
@@ -1158,14 +1027,14 @@
   // CHECK-DAG:   %0 = alloc() : memref<20xf32>
   // CHECK:       affine.for %i0 = 0 to 5 {
   // CHECK-NEXT:    affine.for %i1 = 0 to 20 {
-  // CHECK-NEXT:      store %cst, %0[%i1] : memref<20xf32>
+  // CHECK-NEXT:      affine.store %cst, %0[%i1] : memref<20xf32>
   // CHECK-NEXT:    }
   // CHECK-NEXT:    affine.for %i2 = 0 to 10 {
-  // CHECK-NEXT:      %1 = load %0[%i2] : memref<20xf32>
+  // CHECK-NEXT:      %1 = affine.load %0[%i2] : memref<20xf32>
   // CHECK-NEXT:    }
   // CHECK-NEXT:    affine.for %i3 = 0 to 10 {
   // CHECK-NEXT:      affine.for %i4 = 0 to 20 {
-  // CHECK-NEXT:        %2 = load %0[%i4] : memref<20xf32>
+  // CHECK-NEXT:        %2 = affine.load %0[%i4] : memref<20xf32>
   // CHECK-NEXT:      }
   // CHECK-NEXT:    }
   // CHECK-NEXT:  }
@@ -1182,16 +1051,16 @@
   %cf0 = constant 0.0 : f32
 
   affine.for %i0 = 0 to 100 {
-    store %cf0, %a[%i0]: memref<100xf32>
+    affine.store %cf0, %a[%i0]: memref<100xf32>
   }
 
   affine.for %i1 = 0 to 5 {
     affine.for %i2 = 0 to 19 {
-      %v0 = load %a[%i2]: memref<100xf32>
+      %v0 = affine.load %a[%i2]: memref<100xf32>
     }
     affine.for %i3 = 0 to 10 {
       affine.for %i4 = 0 to 10 {
-        %v1 = load %a[%i4]: memref<100xf32>
+        %v1 = affine.load %a[%i4]: memref<100xf32>
       }
     }
   }
@@ -1199,14 +1068,14 @@
   // CHECK-DAG:   %0 = alloc() : memref<19xf32>
   // CHECK:       affine.for %i0 = 0 to 5 {
   // CHECK-NEXT:    affine.for %i1 = 0 to 19 {
-  // CHECK-NEXT:      store %cst, %0[%i1] : memref<19xf32>
+  // CHECK-NEXT:      affine.store %cst, %0[%i1] : memref<19xf32>
   // CHECK-NEXT:    }
   // CHECK-NEXT:    affine.for %i2 = 0 to 19 {
-  // CHECK-NEXT:      %1 = load %0[%i2] : memref<19xf32>
+  // CHECK-NEXT:      %1 = affine.load %0[%i2] : memref<19xf32>
   // CHECK-NEXT:    }
   // CHECK-NEXT:    affine.for %i3 = 0 to 10 {
   // CHECK-NEXT:      affine.for %i4 = 0 to 10 {
-  // CHECK-NEXT:        %2 = load %0[%i4] : memref<19xf32>
+  // CHECK-NEXT:        %2 = affine.load %0[%i4] : memref<19xf32>
   // CHECK-NEXT:      }
   // CHECK-NEXT:    }
   // CHECK-NEXT:  }
@@ -1216,7 +1085,6 @@
 
 
 // -----
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1) -> (-d0 + d1)
 
 // CHECK-LABEL: func @should_fuse_with_private_memrefs_with_diff_shapes() {
 func @should_fuse_with_private_memrefs_with_diff_shapes() {
@@ -1224,29 +1092,25 @@
   %cf7 = constant 7.0 : f32
 
   affine.for %i0 = 0 to 100 {
-    store %cf7, %m[%i0] : memref<100xf32>
+    affine.store %cf7, %m[%i0] : memref<100xf32>
   }
   affine.for %i1 = 0 to 17 {
-    %v0 = load %m[%i1] : memref<100xf32>
+    %v0 = affine.load %m[%i1] : memref<100xf32>
   }
   affine.for %i2 = 0 to 82 {
-    %v1 = load %m[%i2] : memref<100xf32>
+    %v1 = affine.load %m[%i2] : memref<100xf32>
   }
   // Should create two new private memrefs customized to the shapes accessed
   // by loops %i1 and %i2.
   // CHECK-DAG:  %0 = alloc() : memref<1xf32>
   // CHECK-DAG:  %1 = alloc() : memref<1xf32>
   // CHECK:      affine.for %i0 = 0 to 17 {
-  // CHECK-NEXT:   %2 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:   store %cst, %1[%2] : memref<1xf32>
-  // CHECK-NEXT:   %3 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:   %4 = load %1[%3] : memref<1xf32>
+  // CHECK-NEXT:   affine.store %cst, %1[0] : memref<1xf32>
+  // CHECK-NEXT:   %2 = affine.load %1[0] : memref<1xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: affine.for %i1 = 0 to 82 {
-  // CHECK-NEXT:   %5 = affine.apply [[MAP0]](%i1, %i1)
-  // CHECK-NEXT:   store %cst, %0[%5] : memref<1xf32>
-  // CHECK-NEXT:   %6 = affine.apply [[MAP0]](%i1, %i1)
-  // CHECK-NEXT:   %7 = load %0[%6] : memref<1xf32>
+  // CHECK-NEXT:   affine.store %cst, %0[0] : memref<1xf32>
+  // CHECK-NEXT:   %3 = affine.load %0[0] : memref<1xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: return
   return
@@ -1259,10 +1123,10 @@
   %cf7 = constant 7.0 : f32
 
   affine.for %i0 = 0 to 10 {
-    store %cf7, %arg0[%i0] : memref<10xf32>
+    affine.store %cf7, %arg0[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 9 {
-    %v0 = load %arg0[%i1] : memref<10xf32>
+    %v0 = affine.load %arg0[%i1] : memref<10xf32>
   }
   // This tests that the loop nest '%i0' should not be removed after fusion
   // because it writes to memref argument '%arg0', and its read region
@@ -1270,10 +1134,10 @@
   // in the fused loop nest, so complete live out data region would not
   // be written).
   // CHECK:       affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:    store %cst, %arg0[%i0] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %cst, %arg0[%i0] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  affine.for %i1 = 0 to 9 {
-  // CHECK-NEXT:    %0 = load %arg0[%i1] : memref<10xf32>
+  // CHECK-NEXT:    %0 = affine.load %arg0[%i1] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  return
   return
@@ -1286,17 +1150,17 @@
   %cf7 = constant 7.0 : f32
 
   affine.for %i0 = 0 to 10 {
-    store %cf7, %arg0[%i0] : memref<10xf32>
+    affine.store %cf7, %arg0[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
-    %v0 = load %arg0[%i1] : memref<10xf32>
+    %v0 = affine.load %arg0[%i1] : memref<10xf32>
   }
   // The read/write regions for memref '%arg0' are the same for both
   // loops, so they should fuse.
 
   // CHECK:       affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:    store %cst, %arg0[%i0] : memref<10xf32>
-  // CHECK-NEXT:    %0 = load %arg0[%i0] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %cst, %arg0[%i0] : memref<10xf32>
+  // CHECK-NEXT:    %0 = affine.load %arg0[%i0] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  return
   return
@@ -1309,19 +1173,19 @@
   %cf7 = constant 7.0 : f32
   %m = alloc() : memref<10xf32>
   affine.for %i0 = 0 to 10 {
-    store %cf7, %m[%i0] : memref<10xf32>
+    affine.store %cf7, %m[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 9 {
-    %v0 = load %m[%i1] : memref<10xf32>
+    %v0 = affine.load %m[%i1] : memref<10xf32>
   }
   // This tests that the loop nest '%i0' should not be removed after fusion
   // because it writes to memref '%m' which is returned by the function. 
   // CHECK-DAG:   %0 = alloc() : memref<10xf32>
   // CHECK:       affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:    store %cst, %0[%i0] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %cst, %0[%i0] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  affine.for %i1 = 0 to 9 {
-  // CHECK-NEXT:    %1 = load %0[%i1] : memref<10xf32>
+  // CHECK-NEXT:    %1 = affine.load %0[%i1] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  return %0 : memref<10xf32>
   return %m : memref<10xf32>
@@ -1339,7 +1203,7 @@
     affine.for %i1 = 0 to 3 {
       affine.for %i2 = 0 to 16 {
         %val = "foo"(%i0, %i1, %i2) : (index, index, index) -> i32
-        store %val, %in[%i0, %i1, %i2] : memref<2x3x16xi32>
+        affine.store %val, %in[%i0, %i1, %i2] : memref<2x3x16xi32>
       }
     }
   }
@@ -1348,18 +1212,15 @@
     affine.for %jj = 0 to 3 {
       %a0 = affine.apply (d0, d1) -> (d0 * 3 + d1) (%ii, %jj)
       %idx = affine.apply (d0) -> (d0 floordiv (3 * 16)) (%a0)
-      %v = load %in[%idx, %jj, %c0]
+      %v = affine.load %in[%idx, %jj, %c0]
         : memref<2x3x16xi32>
     }
   }
   return
 }
-// CHECK:      [[MAP0:#map[0-9]+]] = (d0, d1) -> ((d0 * 3 + d1) floordiv 48)
-// CHECK-NEXT: [[MAP2:#map[0-9]+]] = (d0, d1, d2, d3, d4) -> (d2 - (d0 * 3 + d1) floordiv 48)
-// CHECK-NEXT: [[MAP3:#map[0-9]+]] = (d0, d1, d2, d3, d4) -> (-d1 + d3)
-// CHECK-NEXT: [[MAP4:#map[0-9]+]] = (d0, d1, d2, d3, d4) -> (d4)
-// CHECK-NEXT: [[MAP5:#map[0-9]+]] = (d0, d1) -> (d0 * 3 + d1)
-// CHECK-NEXT: [[MAP6:#map[0-9]+]] = (d0) -> (d0 floordiv 48)
+// CHECK-DAG: [[MAP0:#map[0-9]+]] = (d0, d1) -> ((d0 * 3 + d1) floordiv 48)
+// CHECK-DAG: [[MAP1:#map[0-9]+]] = (d0, d1) -> (d0 * 3 + d1)
+// CHECK-DAG: [[MAP2:#map[0-9]+]] = (d0) -> (d0 floordiv 48)
 
 // CHECK-LABEL: func @R3_to_R2_reshape()
 // CHECK-DAG:    %0 = alloc() : memref<1x1x1xi32>
@@ -1367,16 +1228,10 @@
 // CHECK-NEXT:     affine.for %i1 = 0 to 3 {
 // CHECK-NEXT:      %1 = affine.apply [[MAP0]](%i0, %i1)
 // CHECK-NEXT:      %2 = "foo"(%1, %i1, %c0) : (index, index, index) -> i32
-// CHECK-NEXT:      %3 = affine.apply [[MAP2]](%i0, %i1, %1, %i1, %c0)
-// CHECK-NEXT:      %4 = affine.apply [[MAP3]](%i0, %i1, %1, %i1, %c0)
-// CHECK-NEXT:      %5 = affine.apply [[MAP4]](%i0, %i1, %1, %i1, %c0)
-// CHECK-NEXT:      store %2, %0[%3, %4, %5] : memref<1x1x1xi32>
-// CHECK-NEXT:      %6 = affine.apply [[MAP5]](%i0, %i1)
-// CHECK-NEXT:      %7 = affine.apply [[MAP6]](%6)
-// CHECK-NEXT:      %8 = affine.apply [[MAP2]](%i0, %i1, %7, %i1, %c0_0)
-// CHECK-NEXT:      %9 = affine.apply [[MAP3]](%i0, %i1, %7, %i1, %c0_0)
-// CHECK-NEXT:      %10 = affine.apply [[MAP4]](%i0, %i1, %7, %i1, %c0_0)
-// CHECK-NEXT:      %11 = load %0[%8, %9, %10] : memref<1x1x1xi32>
+// CHECK-NEXT:      affine.store %2, %0[0, 0, symbol(%c0)] : memref<1x1x1xi32>
+// CHECK-NEXT:      %3 = affine.apply [[MAP1]](%i0, %i1)
+// CHECK-NEXT:      %4 = affine.apply [[MAP2]](%3)
+// CHECK-NEXT:      %5 = affine.load %0[0, 0, symbol(%c0_0)] : memref<1x1x1xi32>
 // CHECK-NEXT:    }
 // CHECK-NEXT:  }
 // CHECK-NEXT:  return
@@ -1391,19 +1246,19 @@
   %cf7 = constant 7.0 : f32
 
   affine.for %i0 = 0 to 10 {
-    store %cf7, %a[%i0] : memref<10xf32>
-    store %cf7, %b[%i0] : memref<10xf32>
+    affine.store %cf7, %a[%i0] : memref<10xf32>
+    affine.store %cf7, %b[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
-    %v0 = load %a[%i1] : memref<10xf32>
+    %v0 = affine.load %a[%i1] : memref<10xf32>
   }
 
   // CHECK:       affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:    store %cst, %0[%i0] : memref<10xf32>
-  // CHECK-NEXT:    store %cst, %1[%i0] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %cst, %0[%i0] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %cst, %1[%i0] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  affine.for %i1 = 0 to 10 {
-  // CHECK-NEXT:    %2 = load %0[%i1] : memref<10xf32>
+  // CHECK-NEXT:    %2 = affine.load %0[%i1] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  return
   return
@@ -1420,31 +1275,31 @@
   %cf7 = constant 7.0 : f32
 
   affine.for %i0 = 0 to 10 {
-    %v0 = load %a[%i0] : memref<10xf32>
-    store %v0, %b[%i0] : memref<10xf32>
+    %v0 = affine.load %a[%i0] : memref<10xf32>
+    affine.store %v0, %b[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
-    store %cf7, %a[%i1] : memref<10xf32>
-    %v1 = load %c[%i1] : memref<10xf32>
+    affine.store %cf7, %a[%i1] : memref<10xf32>
+    %v1 = affine.load %c[%i1] : memref<10xf32>
   }
   affine.for %i2 = 0 to 10 {
-    %v2 = load %b[%i2] : memref<10xf32>
-    store %v2, %c[%i2] : memref<10xf32>
+    %v2 = affine.load %b[%i2] : memref<10xf32>
+    affine.store %v2, %c[%i2] : memref<10xf32>
   }
   // Loops '%i0' and '%i2' cannot fuse along producer/consumer edge on memref
   // '%b', because of the WAR dep from '%i0' to '%i1' on memref '%a' and
   // because of the WAR dep from '%i1' to '%i2' on memref '%c'.
   // CHECK:       affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:    %3 = load %0[%i0] : memref<10xf32>
-  // CHECK-NEXT:    store %3, %1[%i0] : memref<10xf32>
+  // CHECK-NEXT:    %3 = affine.load %0[%i0] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %3, %1[%i0] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  affine.for %i1 = 0 to 10 {
-  // CHECK-NEXT:    store %cst, %0[%i1] : memref<10xf32>
-  // CHECK-NEXT:    %4 = load %2[%i1] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %cst, %0[%i1] : memref<10xf32>
+  // CHECK-NEXT:    %4 = affine.load %2[%i1] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  affine.for %i2 = 0 to 10 {
-  // CHECK-NEXT:    %5 = load %1[%i2] : memref<10xf32>
-  // CHECK-NEXT:    store %5, %2[%i2] : memref<10xf32>
+  // CHECK-NEXT:    %5 = affine.load %1[%i2] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %5, %2[%i2] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  return
   return
@@ -1452,8 +1307,6 @@
 
 // -----
 
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1) -> (-d0 + d1)
-
 // CHECK-LABEL: func @should_fuse_and_move_to_preserve_war_dep() {
 func @should_fuse_and_move_to_preserve_war_dep() {
   %a = alloc() : memref<10xf32>
@@ -1463,18 +1316,18 @@
   %cf7 = constant 7.0 : f32
 
   affine.for %i0 = 0 to 10 {
-    %v0 = load %b[%i0] : memref<10xf32>
-    store %v0, %a[%i0] : memref<10xf32>
+    %v0 = affine.load %b[%i0] : memref<10xf32>
+    affine.store %v0, %a[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 3 {
-    %v2 = load %c[%i1] : memref<10xf32>
+    %v2 = affine.load %c[%i1] : memref<10xf32>
   }
   affine.for %i2 = 0 to 5 {
-    store %cf7, %b[%i2] : memref<10xf32>
+    affine.store %cf7, %b[%i2] : memref<10xf32>
   }
   affine.for %i3 = 0 to 10 {
-    %v1 = load %a[%i3] : memref<10xf32>
-    store %cf7, %c[%i3] : memref<10xf32>
+    %v1 = affine.load %a[%i3] : memref<10xf32>
+    affine.store %cf7, %c[%i3] : memref<10xf32>
   }
 
   // Dependence graph:
@@ -1492,18 +1345,16 @@
 
   // CHECK-DAG:   %0 = alloc() : memref<1xf32>
   // CHECK:       affine.for %i0 = 0 to 3 {
-  // CHECK-NEXT:    %3 = load %2[%i0] : memref<10xf32>
+  // CHECK-NEXT:    %3 = affine.load %2[%i0] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  affine.for %i1 = 0 to 10 {
-  // CHECK-NEXT:    %4 = load %1[%i1] : memref<10xf32>
-  // CHECK-NEXT:    %5 = affine.apply [[MAP0]](%i1, %i1)
-  // CHECK-NEXT:    store %4, %0[%5] : memref<1xf32>
-  // CHECK-NEXT:    %6 = affine.apply [[MAP0]](%i1, %i1)
-  // CHECK-NEXT:    %7 = load %0[%6] : memref<1xf32>
-  // CHECK-NEXT:    store %cst, %2[%i1] : memref<10xf32>
+  // CHECK-NEXT:    %4 = affine.load %1[%i1] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %4, %0[0] : memref<1xf32>
+  // CHECK-NEXT:    %5 = affine.load %0[0] : memref<1xf32>
+  // CHECK-NEXT:    affine.store %cst, %2[%i1] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  affine.for %i2 = 0 to 5 {
-  // CHECK-NEXT:    store %cst, %1[%i2] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %cst, %1[%i2] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  return
   return
@@ -1520,31 +1371,31 @@
   %cf7 = constant 7.0 : f32
 
   affine.for %i0 = 0 to 10 {
-    %v0 = load %b[%i0] : memref<10xf32>
-    store %cf7, %a[%i0] : memref<10xf32>
+    %v0 = affine.load %b[%i0] : memref<10xf32>
+    affine.store %cf7, %a[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
-    store %cf7, %b[%i1] : memref<10xf32>
+    affine.store %cf7, %b[%i1] : memref<10xf32>
   }
   %cf11 = constant 11.0 : f32
   affine.for %i2 = 0 to 10 {
-    %v2 = load %a[%i2] : memref<10xf32>
-    store %cf11, %c[%i2] : memref<10xf32>
+    %v2 = affine.load %a[%i2] : memref<10xf32>
+    affine.store %cf11, %c[%i2] : memref<10xf32>
   }
   // Loops '%i0' and '%i2' cannot fuse along producer/consumer edge on memref
   // '%a', because of the WAR dep from '%i0' to '%i1' on memref '%b' and
   // because of the SSA value dep from '%cf11' def to use in '%i2'.
   // CHECK:       affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:    %3 = load %1[%i0] : memref<10xf32>
-  // CHECK-NEXT:    store %cst, %0[%i0] : memref<10xf32>
+  // CHECK-NEXT:    %3 = affine.load %1[%i0] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %cst, %0[%i0] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  affine.for %i1 = 0 to 10 {
-  // CHECK-NEXT:    store %cst, %1[%i1] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %cst, %1[%i1] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  %cst_0 = constant 1.100000e+01 : f32
   // CHECK-NEXT:  affine.for %i2 = 0 to 10 {
-  // CHECK-NEXT:    %4 = load %0[%i2] : memref<10xf32>
-  // CHECK-NEXT:    store %cst_0, %2[%i2] : memref<10xf32>
+  // CHECK-NEXT:    %4 = affine.load %0[%i2] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %cst_0, %2[%i2] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  return
   return
@@ -1552,8 +1403,6 @@
 
 // -----
 
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1) -> (-d0 + d1)
-
 // CHECK-LABEL: func @should_fuse_and_preserve_dep_on_constant() {
 func @should_fuse_and_preserve_dep_on_constant() {
   %a = alloc() : memref<10xf32>
@@ -1563,15 +1412,15 @@
   %cf7 = constant 7.0 : f32
   %cf11 = constant 11.0 : f32
   affine.for %i0 = 0 to 10 {
-    %v0 = load %b[%i0] : memref<10xf32>
-    store %cf7, %a[%i0] : memref<10xf32>
+    %v0 = affine.load %b[%i0] : memref<10xf32>
+    affine.store %cf7, %a[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
-    store %cf7, %b[%i1] : memref<10xf32>
+    affine.store %cf7, %b[%i1] : memref<10xf32>
   }
   affine.for %i2 = 0 to 10 {
-    %v2 = load %a[%i2] : memref<10xf32>
-    store %cf11, %c[%i2] : memref<10xf32>
+    %v2 = affine.load %a[%i2] : memref<10xf32>
+    affine.store %cf11, %c[%i2] : memref<10xf32>
   }
 
   // Loops '%i0' and '%i2' can fuse along producer/consumer edge on memref
@@ -1580,15 +1429,13 @@
 
   // CHECK:       %cst_0 = constant 1.100000e+01 : f32
   // CHECK-NEXT:  affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:    %3 = load %1[%i0] : memref<10xf32>
-  // CHECK-NEXT:    %4 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:    store %cst, %0[%4] : memref<1xf32>
-  // CHECK-NEXT:    %5 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:    %6 = load %0[%5] : memref<1xf32>
-  // CHECK-NEXT:    store %cst_0, %2[%i0] : memref<10xf32>
+  // CHECK-NEXT:    %3 = affine.load %1[%i0] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %cst, %0[0] : memref<1xf32>
+  // CHECK-NEXT:    %4 = affine.load %0[0] : memref<1xf32>
+  // CHECK-NEXT:    affine.store %cst_0, %2[%i0] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  affine.for %i1 = 0 to 10 {
-  // CHECK-NEXT:    store %cst, %1[%i1] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %cst, %1[%i1] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  return
   return
@@ -1596,8 +1443,6 @@
 
 // -----
 
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1, d2) -> (d1)
-// CHECK: [[MAP1:#map[0-9]+]] = (d0, d1, d2) -> (-d0 + d2)
 // CHECK: [[MAP2:#map[0-9]+]] = (d0, d1) -> (d0 * 16 - d1 + 15)
 // CHECK: [[MAP3:#map[0-9]+]] = (d0, d1) -> (d0 * 16 + d1)
 
@@ -1607,28 +1452,28 @@
   %0 = constant 0.0 : f32
   affine.for %i0 = 0 to 64 {
     affine.for %i1 = 0 to 4 {
-      store %0, %out[%i0, %i1] : memref<64x4xf32>
+      affine.store %0, %out[%i0, %i1] : memref<64x4xf32>
     }
   }
   affine.for %i2 = 0 to 4 {
     affine.for %i3 = 0 to 4 {
       affine.for %i4 = 0 to 16 {
         %1 = affine.apply (d0, d1) -> (d0 * 16 - d1 + 15)(%i3, %i4)
-        %2 = load %arg1[%1, %i2] : memref<64x4xf32>
+        %2 = affine.load %arg1[%1, %i2] : memref<64x4xf32>
         "op0"(%2) : (f32) -> ()
       }
       affine.for %i5 = 0 to 4 {
         affine.for %i6 = 0 to 16 {
           %3 = affine.apply (d0, d1) -> (d0 * 16 - d1 + 15)(%i5, %i6)
-          %4 = load %arg0[%3, %i3] : memref<64x4xf32>
+          %4 = affine.load %arg0[%3, %i3] : memref<64x4xf32>
           "op1"(%4) : (f32) -> ()
         }
         affine.for %i7 = 0 to 16 {
           %5 = "op2"() : () -> (f32)
           %6 = affine.apply (d0, d1) -> (d0 * 16 + d1)(%i5, %i7)
-          %7 = load %out[%6, %i2] : memref<64x4xf32>
+          %7 = affine.load %out[%6, %i2] : memref<64x4xf32>
           %8 = addf %7, %5 : f32
-          store %8, %out[%6, %i2] : memref<64x4xf32>
+          affine.store %8, %out[%6, %i2] : memref<64x4xf32>
         }
       }
     }
@@ -1645,32 +1490,26 @@
   // CHECK:       %0 = alloc() : memref<64x1xf32>
   // CHECK:       affine.for %i0 = 0 to 4 {
   // CHECK-NEXT:    affine.for %i1 = 0 to 64 {
-  // CHECK-NEXT:      %1 = affine.apply [[MAP0]](%i0, %i1, %i0)
-  // CHECK-NEXT:      %2 = affine.apply [[MAP1]](%i0, %i1, %i0)
-  // CHECK-NEXT:      store %cst, %0[%1, %2] : memref<64x1xf32>
+  // CHECK-NEXT:      affine.store %cst, %0[%i1, 0] : memref<64x1xf32>
   // CHECK-NEXT:    }
   // CHECK-NEXT:    affine.for %i2 = 0 to 4 {
   // CHECK-NEXT:      affine.for %i3 = 0 to 16 {
-  // CHECK-NEXT:        %3 = affine.apply [[MAP2]](%i2, %i3)
-  // CHECK-NEXT:        %4 = load %arg1[%3, %i0] : memref<64x4xf32>
-  // CHECK-NEXT:        "op0"(%4) : (f32) -> ()
+  // CHECK-NEXT:        %1 = affine.apply [[MAP2]](%i2, %i3)
+  // CHECK-NEXT:        %2 = affine.load %arg1[%1, %i0] : memref<64x4xf32>
+  // CHECK-NEXT:        "op0"(%2) : (f32) -> ()
   // CHECK-NEXT:      }
   // CHECK-NEXT:      affine.for %i4 = 0 to 4 {
   // CHECK-NEXT:        affine.for %i5 = 0 to 16 {
-  // CHECK-NEXT:          %5 = affine.apply [[MAP2]](%i4, %i5)
-  // CHECK-NEXT:          %6 = load %arg0[%5, %i2] : memref<64x4xf32>
-  // CHECK-NEXT:          "op1"(%6) : (f32) -> ()
+  // CHECK-NEXT:          %3 = affine.apply [[MAP2]](%i4, %i5)
+  // CHECK-NEXT:          %4 = affine.load %arg0[%3, %i2] : memref<64x4xf32>
+  // CHECK-NEXT:          "op1"(%4) : (f32) -> ()
   // CHECK-NEXT:        }
   // CHECK-NEXT:        affine.for %i6 = 0 to 16 {
-  // CHECK-NEXT:          %7 = "op2"() : () -> f32
-  // CHECK-NEXT:          %8 = affine.apply [[MAP3]](%i4, %i6)
-  // CHECK-NEXT:          %9 = affine.apply [[MAP0]](%i0, %8, %i0)
-  // CHECK-NEXT:          %10 = affine.apply [[MAP1]](%i0, %8, %i0)
-  // CHECK-NEXT:          %11 = load %0[%9, %10] : memref<64x1xf32>
-  // CHECK-NEXT:          %12 = addf %11, %7 : f32
-  // CHECK-NEXT:          %13 = affine.apply [[MAP0]](%i0, %8, %i0)
-  // CHECK-NEXT:          %14 = affine.apply [[MAP1]](%i0, %8, %i0)
-  // CHECK-NEXT:          store %12, %0[%13, %14] : memref<64x1xf32>
+  // CHECK-NEXT:          %5 = "op2"() : () -> f32
+  // CHECK-NEXT:          %6 = affine.apply [[MAP3]](%i4, %i6)
+  // CHECK-NEXT:          %7 = affine.load %0[%i4 * 16 + %i6, 0] : memref<64x1xf32>
+  // CHECK-NEXT:          %8 = addf %7, %5 : f32
+  // CHECK-NEXT:          affine.store %8, %0[%i4 * 16 + %i6, 0] : memref<64x1xf32>
   // CHECK-NEXT:        }
   // CHECK-NEXT:      }
   // CHECK-NEXT:    }
@@ -1681,8 +1520,6 @@
 
 // -----
 
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1) -> (-d0 + d1)
-
 // CHECK-LABEL: func @should_fuse_after_private_memref_creation() {
 func @should_fuse_after_private_memref_creation() {
   %a = alloc() : memref<10xf32>
@@ -1691,15 +1528,15 @@
   %cf7 = constant 7.0 : f32
 
   affine.for %i0 = 0 to 10 {
-    store %cf7, %a[%i0] : memref<10xf32>
+    affine.store %cf7, %a[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
-    %v0 = load %a[%i1] : memref<10xf32>
-    store %v0, %b[%i1] : memref<10xf32>
+    %v0 = affine.load %a[%i1] : memref<10xf32>
+    affine.store %v0, %b[%i1] : memref<10xf32>
   }
   affine.for %i2 = 0 to 10 {
-    %v1 = load %a[%i2] : memref<10xf32>
-    store %v1, %b[%i2] : memref<10xf32>
+    %v1 = affine.load %a[%i2] : memref<10xf32>
+    affine.store %v1, %b[%i2] : memref<10xf32>
   }
 
   // On the first visit to '%i2', the fusion algorithm can not fuse loop nest
@@ -1709,18 +1546,14 @@
   // longer exists, so '%i0' can now be fused into '%i2'.
 
   // CHECK:       affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:    %3 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:    store %cst, %1[%3] : memref<1xf32>
-  // CHECK-NEXT:    %4 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:    %5 = load %1[%4] : memref<1xf32>
-  // CHECK-NEXT:    store %5, %2[%i0] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %cst, %1[0] : memref<1xf32>
+  // CHECK-NEXT:    %3 = affine.load %1[0] : memref<1xf32>
+  // CHECK-NEXT:    affine.store %3, %2[%i0] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  affine.for %i1 = 0 to 10 {
-  // CHECK-NEXT:    %6 = affine.apply [[MAP0]](%i1, %i1)
-  // CHECK-NEXT:    store %cst, %0[%6] : memref<1xf32>
-  // CHECK-NEXT:    %7 = affine.apply [[MAP0]](%i1, %i1)
-  // CHECK-NEXT:    %8 = load %0[%7] : memref<1xf32>
-  // CHECK-NEXT:    store %8, %2[%i1] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %cst, %0[0] : memref<1xf32>
+  // CHECK-NEXT:    %4 = affine.load %0[0] : memref<1xf32>
+  // CHECK-NEXT:    affine.store %4, %2[%i1] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:   return
   return
@@ -1728,38 +1561,33 @@
 
 // -----
 
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1) -> (-d0 + d1)
-
 // CHECK-LABEL: func @should_fuse_after_one_loop_interchange() {
 func @should_fuse_after_one_loop_interchange() {
   %a = alloc() : memref<10xf32>
 
   %cf0 = constant 0.0 : f32
   affine.for %i0 = 0 to 10 {
-    store %cf0, %a[%i0] : memref<10xf32>
+    affine.store %cf0, %a[%i0] : memref<10xf32>
   }
 
   affine.for %i1 = 0 to 5 {
     affine.for %i2 = 0 to 10 {
-      %v0 = load %a[%i2] : memref<10xf32>
-      store %v0, %a[%i2] : memref<10xf32>
+      %v0 = affine.load %a[%i2] : memref<10xf32>
+      affine.store %v0, %a[%i2] : memref<10xf32>
     }
   }
 
-  // The dependence between the load and store is carried on loop '%i1', and
+  // The dependence between the load and affine.store is carried on loop '%i1', and
   // cannot be fused with loop '%i0' without violating this dependence.
   // Once loops '%i1' and %i2' are interchanged, loop '%i0' can be fused
   // at loop depth 1, because the loop carrying the dependence has been
   // interchanged and is now at depth 2.
 
   // CHECK:       affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:    %1 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:    store %cst, %0[%1] : memref<1xf32>
+  // CHECK-NEXT:    affine.store %cst, %0[0] : memref<1xf32>
   // CHECK-NEXT:    affine.for %i1 = 0 to 5 {
-  // CHECK-NEXT:      %2 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:      %3 = load %0[%2] : memref<1xf32>
-  // CHECK-NEXT:      %4 = affine.apply [[MAP0]](%i0, %i0)
-  // CHECK-NEXT:      store %3, %0[%4] : memref<1xf32>
+  // CHECK-NEXT:      %1 = affine.load %0[0] : memref<1xf32>
+  // CHECK-NEXT:      affine.store %1, %0[0] : memref<1xf32>
   // CHECK-NEXT:    }
   // CHECK-NEXT:  }
   // CHECK-NEXT:  return
@@ -1768,9 +1596,6 @@
 
 // -----
 
-// CHECK: [[MAP0:#map[0-9]+]] = (d0, d1, d2, d3) -> (-d0 + d2)
-// CHECK: [[MAP1:#map[0-9]+]] = (d0, d1, d2, d3) -> (-d1 + d3)
-
 // CHECK-LABEL: func @should_fuse_after_two_loop_interchanges() {
 func @should_fuse_after_two_loop_interchanges() {
   %a = alloc() : memref<6x8xf32>
@@ -1778,7 +1603,7 @@
   %cf0 = constant 0.0 : f32
   affine.for %i0 = 0 to 6 {
     affine.for %i1 = 0 to 8 {
-      store %cf0, %a[%i0, %i1] : memref<6x8xf32>
+      affine.store %cf0, %a[%i0, %i1] : memref<6x8xf32>
     }
   }
 
@@ -1786,15 +1611,15 @@
     affine.for %i3 = 0 to 6 {
       affine.for %i4 = 0 to 2 {
         affine.for %i5 = 0 to 8 {
-          %v0 = load %a[%i3, %i5] : memref<6x8xf32>
+          %v0 = affine.load %a[%i3, %i5] : memref<6x8xf32>
           %v1 = addf %v0, %v0 : f32
-          store %v1, %a[%i3, %i5] : memref<6x8xf32>
+          affine.store %v1, %a[%i3, %i5] : memref<6x8xf32>
         }
       }
     }
   }
 
-  // The dependence between the load and store is carried on loops '%i2' and
+  // The dependence between the load and affine.store is carried on loops '%i2' and
   // '%i4', and cannot be fused with loop '%i0' without violating this
   // dependence.
   // Once loop '%i2' is interchanged with loop '%i3', and again with loop
@@ -1803,18 +1628,12 @@
 
   // CHECK:       affine.for %i0 = 0 to 6 {
   // CHECK-NEXT:    affine.for %i1 = 0 to 8 {
-  // CHECK-NEXT:      %1 = affine.apply [[MAP0]](%i0, %i1, %i0, %i1)
-  // CHECK-NEXT:      %2 = affine.apply [[MAP1]](%i0, %i1, %i0, %i1)
-  // CHECK-NEXT:      store %cst, %0[%1, %2] : memref<1x1xf32>
+  // CHECK-NEXT:      affine.store %cst, %0[0, 0] : memref<1x1xf32>
   // CHECK-NEXT:      affine.for %i2 = 0 to 4 {
   // CHECK-NEXT:        affine.for %i3 = 0 to 2 {
-  // CHECK-NEXT:          %3 = affine.apply [[MAP0]](%i0, %i1, %i0, %i1)
-  // CHECK-NEXT:          %4 = affine.apply [[MAP1]](%i0, %i1, %i0, %i1)
-  // CHECK-NEXT:          %5 = load %0[%3, %4] : memref<1x1xf32>
-  // CHECK-NEXT:          %6 = addf %5, %5 : f32
-  // CHECK-NEXT:          %7 = affine.apply [[MAP0]](%i0, %i1, %i0, %i1)
-  // CHECK-NEXT:          %8 = affine.apply [[MAP1]](%i0, %i1, %i0, %i1)
-  // CHECK-NEXT:          store %6, %0[%7, %8] : memref<1x1xf32>
+  // CHECK-NEXT:          %1 = affine.load %0[0, 0] : memref<1x1xf32>
+  // CHECK-NEXT:          %2 = addf %1, %1 : f32
+  // CHECK-NEXT:          affine.store %2, %0[0, 0] : memref<1x1xf32>
   // CHECK-NEXT:        }
   // CHECK-NEXT:      }
   // CHECK-NEXT:    }
@@ -1828,19 +1647,19 @@
 func @should_fuse_live_out_writer(%arg0 : memref<10xf32>) -> memref<10xf32> {
   %cst = constant 0.000000e+00 : f32
   affine.for %i0 = 0 to 10 {
-    store %cst, %arg0[%i0] : memref<10xf32>
+    affine.store %cst, %arg0[%i0] : memref<10xf32>
   }
   affine.for %i1 = 0 to 10 {
-    %1 = load %arg0[%i1] : memref<10xf32>
-    store %1, %arg0[%i1] : memref<10xf32>
+    %1 = affine.load %arg0[%i1] : memref<10xf32>
+    affine.store %1, %arg0[%i1] : memref<10xf32>
   }
   return %arg0 : memref<10xf32>
 
   // CHECK:       %cst = constant 0.000000e+00 : f32
   // CHECK-NEXT:  affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:    store %cst, %arg0[%i0] : memref<10xf32>
-  // CHECK-NEXT:    %0 = load %arg0[%i0] : memref<10xf32>
-  // CHECK-NEXT:    store %0, %arg0[%i0] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %cst, %arg0[%i0] : memref<10xf32>
+  // CHECK-NEXT:    %0 = affine.load %arg0[%i0] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %0, %arg0[%i0] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT:  return %arg0 : memref<10xf32>
 }
@@ -1858,7 +1677,7 @@
 func @slice_tile(%arg0: memref<128x8xf32>, %arg1: memref<32x8xf32>, %0 : f32) -> memref<32x8xf32> {
   affine.for %i0 = 0 to 32 {
     affine.for %i1 = 0 to 8 {
-      store %0, %arg1[%i0, %i1] : memref<32x8xf32>
+      affine.store %0, %arg1[%i0, %i1] : memref<32x8xf32>
     }
   }
   affine.for %i = 0 to 2 {
@@ -1866,14 +1685,14 @@
       affine.for %k = 0 to 8 {
         affine.for %kk = 0 to 16 {
           %1 = affine.apply #map(%k, %kk)
-          %2 = load %arg0[%1, %j] : memref<128x8xf32>
+          %2 = affine.load %arg0[%1, %j] : memref<128x8xf32>
           %3 = "foo"(%2) : (f32) -> f32
         }
         affine.for %ii = 0 to 16 {
           %6 = affine.apply #map(%i, %ii)
-          %7 = load %arg1[%6, %j] : memref<32x8xf32>
+          %7 = affine.load %arg1[%6, %j] : memref<32x8xf32>
           %8 = addf %7, %7 : f32
-          store %8, %arg1[%6, %j] : memref<32x8xf32>
+          affine.store %8, %arg1[%6, %j] : memref<32x8xf32>
         }
       }
     }
@@ -1883,19 +1702,19 @@
 // CHECK:       affine.for %i0 = 0 to 2 {
 // CHECK-NEXT:    affine.for %i1 = 0 to 8 {
 // CHECK-NEXT:      affine.for %i2 = [[MAP_LB]](%i0) to [[MAP_UB]](%i0) {
-// CHECK-NEXT:        store %arg2, %arg1[%i2, %i1] : memref<32x8xf32>
+// CHECK-NEXT:        affine.store %arg2, %arg1[%i2, %i1] : memref<32x8xf32>
 // CHECK-NEXT:      }
 // CHECK-NEXT:      affine.for %i3 = 0 to 8 {
 // CHECK-NEXT:        affine.for %i4 = 0 to 16 {
 // CHECK-NEXT:          %0 = affine.apply #map{{[0-9]+}}(%i3, %i4)
-// CHECK-NEXT:          %1 = load %arg0[%0, %i1] : memref<128x8xf32>
+// CHECK-NEXT:          %1 = affine.load %arg0[%0, %i1] : memref<128x8xf32>
 // CHECK-NEXT:          %2 = "foo"(%1) : (f32) -> f32
 // CHECK-NEXT:        }
 // CHECK-NEXT:        affine.for %i5 = 0 to 16 {
 // CHECK-NEXT:          %3 = affine.apply #map{{[0-9]+}}(%i0, %i5)
-// CHECK-NEXT:          %4 = load %arg1[%3, %i1] : memref<32x8xf32>
+// CHECK-NEXT:          %4 = affine.load %arg1[%3, %i1] : memref<32x8xf32>
 // CHECK-NEXT:          %5 = addf %4, %4 : f32
-// CHECK-NEXT:          store %5, %arg1[%3, %i1] : memref<32x8xf32>
+// CHECK-NEXT:          affine.store %5, %arg1[%3, %i1] : memref<32x8xf32>
 // CHECK-NEXT:        }
 // CHECK-NEXT:      }
 // CHECK-NEXT:    }
@@ -1918,14 +1737,14 @@
         %a0 = affine.apply (d0) -> (d0) (%i0)
         %a1 = affine.apply (d0) -> (d0) (%i0)
         %a2 = affine.apply (d0, d1) -> (d0 - d1) (%a0, %a1)
-        store %cf7, %a[%a2] : memref<10xf32>
+        affine.store %cf7, %a[%a2] : memref<10xf32>
       }
     }
   }
   affine.for %i3 = 0 to 10 {
     affine.for %i4 = 0 to 10 {
       affine.for %i5 = 0 to 10 {
-        %v0 = load %a[%c0] : memref<10xf32>
+        %v0 = affine.load %a[%c0] : memref<10xf32>
       }
     }
   }
@@ -1936,14 +1755,14 @@
 // CHECK-NEXT:         %2 = affine.apply #map0(%i0)
 // CHECK-NEXT:         %3 = affine.apply #map0(%i0)
 // CHECK-NEXT:         %4 = affine.apply #map1(%2, %3)
-// CHECK-NEXT:         store %cst, %0[%4] : memref<10xf32>
+// CHECK-NEXT:         affine.store %cst, %0[%4] : memref<10xf32>
 // CHECK-NEXT:       }
 // CHECK-NEXT:     }
 // CHECK-NEXT:   }
 // CHECK-NEXT:   affine.for %i3 = 0 to 10 {
 // CHECK-NEXT:     affine.for %i4 = 0 to 10 {
 // CHECK-NEXT:       affine.for %i5 = 0 to 10 {
-// CHECK-NEXT:         %5 = load %0[%c0] : memref<10xf32>
+// CHECK-NEXT:         %5 = affine.load %0[%c0] : memref<10xf32>
 // CHECK-NEXT:       }
 // CHECK-NEXT:     }
 // CHECK-NEXT:   }
@@ -1951,8 +1770,6 @@
 }
 
 // -----
-// CHECK-DAG: [[MAP0:#map[0-9]+]] = (d0, d1, d2, d3) -> (-d0 + d2)
-// CHECK-DAG: [[MAP1:#map[0-9]+]] = (d0, d1, d2, d3) -> (-d1 + d3)
 
 func @should_fuse_init_loops_siblings_then_shared_producer(%arg0: memref<10x10xf32>, %arg1: memref<10x10xf32>) {
   %0 = alloc() : memref<10x10xf32>
@@ -1961,33 +1778,33 @@
   %cst_1 = constant 7.000000e+00 : f32
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 10 {
-      store %cst_1, %0[%i0, %i1] : memref<10x10xf32>
+      affine.store %cst_1, %0[%i0, %i1] : memref<10x10xf32>
     }
   }
   affine.for %i2 = 0 to 3 {
     affine.for %i3 = 0 to 3 {
-      store %cst, %arg0[%i2, %i3] : memref<10x10xf32>
+      affine.store %cst, %arg0[%i2, %i3] : memref<10x10xf32>
     }
   }
   affine.for %i4 = 0 to 3 {
     affine.for %i5 = 0 to 3 {
-      %1 = load %0[%i4, %i5] : memref<10x10xf32>
-      %2 = load %arg0[%i4, %i5] : memref<10x10xf32>
+      %1 = affine.load %0[%i4, %i5] : memref<10x10xf32>
+      %2 = affine.load %arg0[%i4, %i5] : memref<10x10xf32>
       %3 = mulf %1, %2 : f32
-      store %3, %arg0[%i4, %i5] : memref<10x10xf32>
+      affine.store %3, %arg0[%i4, %i5] : memref<10x10xf32>
     }
   }
   affine.for %i6 = 0 to 3 {
     affine.for %i7 = 0 to 3 {
-      store %cst_0, %arg1[%i6, %i7] : memref<10x10xf32>
+      affine.store %cst_0, %arg1[%i6, %i7] : memref<10x10xf32>
     }
   }
   affine.for %i8 = 0 to 3 {
     affine.for %i9 = 0 to 3 {
-      %4 = load %0[%i8, %i9] : memref<10x10xf32>
-      %5 = load %arg1[%i8, %i9] : memref<10x10xf32>
+      %4 = affine.load %0[%i8, %i9] : memref<10x10xf32>
+      %5 = affine.load %arg1[%i8, %i9] : memref<10x10xf32>
       %6 = addf %4, %5 : f32
-      store %6, %arg1[%i8, %i9] : memref<10x10xf32>
+      affine.store %6, %arg1[%i8, %i9] : memref<10x10xf32>
     }
   }
 
@@ -2001,23 +1818,17 @@
 
 // CHECK:        affine.for %i0 = 0 to 3 {
 // CHECK-NEXT:     affine.for %i1 = 0 to 3 {
-// CHECK-NEXT:       %1 = affine.apply [[MAP0]](%i0, %i1, %i0, %i1)
-// CHECK-NEXT:       %2 = affine.apply [[MAP1]](%i0, %i1, %i0, %i1)
-// CHECK-NEXT:       store %cst_1, %0[%1, %2] : memref<1x1xf32>
-// CHECK-NEXT:       store %cst, %arg0[%i0, %i1] : memref<10x10xf32>
-// CHECK-NEXT:       %3 = affine.apply [[MAP0]](%i0, %i1, %i0, %i1)
-// CHECK-NEXT:       %4 = affine.apply [[MAP1]](%i0, %i1, %i0, %i1)
-// CHECK-NEXT:       %5 = load %0[%3, %4] : memref<1x1xf32>
-// CHECK-NEXT:       %6 = load %arg0[%i0, %i1] : memref<10x10xf32>
-// CHECK-NEXT:       %7 = mulf %5, %6 : f32
-// CHECK-NEXT:       store %7, %arg0[%i0, %i1] : memref<10x10xf32>
-// CHECK-NEXT:       store %cst_0, %arg1[%i0, %i1] : memref<10x10xf32>
-// CHECK-NEXT:      %8 = affine.apply [[MAP0]](%i0, %i1, %i0, %i1)
-// CHECK-NEXT:       %9 = affine.apply [[MAP1]](%i0, %i1, %i0, %i1)
-// CHECK-NEXT:       %10 = load %0[%8, %9] : memref<1x1xf32>
-// CHECK-NEXT:       %11 = load %arg1[%i0, %i1] : memref<10x10xf32>
-// CHECK-NEXT:       %12 = addf %10, %11 : f32
-// CHECK-NEXT:       store %12, %arg1[%i0, %i1] : memref<10x10xf32>
+// CHECK-NEXT:       affine.store %cst_1, %0[0, 0] : memref<1x1xf32>
+// CHECK-NEXT:       affine.store %cst, %arg0[%i0, %i1] : memref<10x10xf32>
+// CHECK-NEXT:       %1 = affine.load %0[0, 0] : memref<1x1xf32>
+// CHECK-NEXT:       %2 = affine.load %arg0[%i0, %i1] : memref<10x10xf32>
+// CHECK-NEXT:       %3 = mulf %1, %2 : f32
+// CHECK-NEXT:       affine.store %3, %arg0[%i0, %i1] : memref<10x10xf32>
+// CHECK-NEXT:       affine.store %cst_0, %arg1[%i0, %i1] : memref<10x10xf32>
+// CHECK-NEXT:       %4 = affine.load %0[0, 0] : memref<1x1xf32>
+// CHECK-NEXT:       %5 = affine.load %arg1[%i0, %i1] : memref<10x10xf32>
+// CHECK-NEXT:       %6 = addf %4, %5 : f32
+// CHECK-NEXT:       affine.store %6, %arg1[%i0, %i1] : memref<10x10xf32>
 // CHECK-NEXT:     }
 // CHECK-NEXT:   }
 // CHECK-NEXT:   return
@@ -2026,8 +1837,6 @@
 }
 
 // -----
-// CHECK-DAG: [[MAP2:#map[0-9]+]] = (d0, d1, d2) -> (d1)
-// CHECK-DAG: [[MAP3:#map[0-9]+]] = (d0, d1, d2) -> (-d0 + d2)
 
 func @two_matrix_vector_products() {
   %in_matrix = alloc() : memref<10x10xf32>
@@ -2040,57 +1849,51 @@
   // Populate input matrix.
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 10 {
-      store %cf7, %in_matrix[%i0, %i1] : memref<10x10xf32>
+      affine.store %cf7, %in_matrix[%i0, %i1] : memref<10x10xf32>
     }
   }
   // out_vec0 = in_matrix x in_vec0
   affine.for %i2 = 0 to 10 {
     affine.for %i3 = 0 to 10 {
-      %v0 = load %in_matrix[%i2, %i3] : memref<10x10xf32>
-      %v1 = load %in_vec0[%i3] : memref<10xf32>
+      %v0 = affine.load %in_matrix[%i2, %i3] : memref<10x10xf32>
+      %v1 = affine.load %in_vec0[%i3] : memref<10xf32>
       %v2 = mulf %v0, %v1 : f32
-      %v3 = load %out_vec0[%i3] : memref<10xf32>
+      %v3 = affine.load %out_vec0[%i3] : memref<10xf32>
       %v4 = addf %v2, %v3 : f32
-      store %v4, %out_vec0[%i3] : memref<10xf32>
+      affine.store %v4, %out_vec0[%i3] : memref<10xf32>
     }
   }
   // out_vec1 = in_matrix x in_vec1
   affine.for %i4 = 0 to 10 {
     affine.for %i5 = 0 to 10 {
-      %v5 = load %in_matrix[%i4, %i5] : memref<10x10xf32>
-      %v6 = load %in_vec1[%i5] : memref<10xf32>
+      %v5 = affine.load %in_matrix[%i4, %i5] : memref<10x10xf32>
+      %v6 = affine.load %in_vec1[%i5] : memref<10xf32>
       %v7 = mulf %v5, %v6 : f32
-      %v8 = load %out_vec1[%i5] : memref<10xf32>
+      %v8 = affine.load %out_vec1[%i5] : memref<10xf32>
       %v9 = addf %v7, %v8 : f32
-      store %v9, %out_vec1[%i5] : memref<10xf32>
+      affine.store %v9, %out_vec1[%i5] : memref<10xf32>
     }
   }
 
 // CHECK:        affine.for %i0 = 0 to 10 {
 // CHECK-NEXT:     affine.for %i1 = 0 to 10 {
-// CHECK-NEXT:       %5 = affine.apply [[MAP2]](%i0, %i1, %i0)
-// CHECK-NEXT:       %6 = affine.apply [[MAP3]](%i0, %i1, %i0)
-// CHECK-NEXT:       store %cst, %0[%5, %6] : memref<10x1xf32>
+// CHECK-NEXT:       affine.store %cst, %0[%i1, 0] : memref<10x1xf32>
 // CHECK-NEXT:     }
 // CHECK-NEXT:     affine.for %i2 = 0 to 10 {
-// CHECK-NEXT:       %7 = affine.apply [[MAP2]](%i0, %i2, %i0)
-// CHECK-NEXT:       %8 = affine.apply [[MAP3]](%i0, %i2, %i0)
-// CHECK-NEXT:       %9 = load %0[%7, %8] : memref<10x1xf32>
-// CHECK-NEXT:       %10 = load %1[%i0] : memref<10xf32>
-// CHECK-NEXT:       %11 = mulf %9, %10 : f32
-// CHECK-NEXT:       %12 = load %3[%i0] : memref<10xf32>
-// CHECK-NEXT:       %13 = addf %11, %12 : f32
-// CHECK-NEXT:       store %13, %3[%i0] : memref<10xf32>
+// CHECK-NEXT:       %5 = affine.load %0[%i2, 0] : memref<10x1xf32>
+// CHECK-NEXT:       %6 = affine.load %1[%i0] : memref<10xf32>
+// CHECK-NEXT:       %7 = mulf %5, %6 : f32
+// CHECK-NEXT:       %8 = affine.load %3[%i0] : memref<10xf32>
+// CHECK-NEXT:       %9 = addf %7, %8 : f32
+// CHECK-NEXT:       affine.store %9, %3[%i0] : memref<10xf32>
 // CHECK-NEXT:     }
 // CHECK-NEXT:     affine.for %i3 = 0 to 10 {
-// CHECK-NEXT:       %14 = affine.apply [[MAP2]](%i0, %i3, %i0)
-// CHECK-NEXT:       %15 = affine.apply [[MAP3]](%i0, %i3, %i0)
-// CHECK-NEXT:       %16 = load %0[%14, %15] : memref<10x1xf32>
-// CHECK-NEXT:       %17 = load %2[%i0] : memref<10xf32>
-// CHECK-NEXT:       %18 = mulf %16, %17 : f32
-// CHECK-NEXT:       %19 = load %4[%i0] : memref<10xf32>
-// CHECK-NEXT:       %20 = addf %18, %19 : f32
-// CHECK-NEXT:       store %20, %4[%i0] : memref<10xf32>
+// CHECK-NEXT:       %10 = affine.load %0[%i3, 0] : memref<10x1xf32>
+// CHECK-NEXT:       %11 = affine.load %2[%i0] : memref<10xf32>
+// CHECK-NEXT:       %12 = mulf %10, %11 : f32
+// CHECK-NEXT:       %13 = affine.load %4[%i0] : memref<10xf32>
+// CHECK-NEXT:       %14 = addf %12, %13 : f32
+// CHECK-NEXT:       affine.store %14, %4[%i0] : memref<10xf32>
 // CHECK-NEXT:     }
 // CHECK-NEXT:   }
 // CHECK-NEXT:   return
@@ -2098,20 +1901,18 @@
 }
 
 // -----
-// CHECK-DAG: [[MAP3:#map[0-9]+]] = (d0, d1, d2) -> (-d0 + d1)
-// CHECK-DAG: [[MAP4:#map[0-9]+]] = (d0, d1, d2) -> (d2)
 
 func @should_not_slice_past_slice_barrier() {
   %0 = alloc() : memref<100x16xf32>
   affine.for %i0 = 0 to 100 {
     affine.for %i1 = 0 to 16 {
       %1 = "op1"() : () -> f32
-      store %1, %0[%i0, %i1] : memref<100x16xf32>
+      affine.store %1, %0[%i0, %i1] : memref<100x16xf32>
     } {slice_fusion_barrier = true}
   }
   affine.for %i2 = 0 to 100 {
     affine.for %i3 = 0 to 16 {
-      %2 = load %0[%i2, %i3] : memref<100x16xf32>
+      %2 = affine.load %0[%i2, %i3] : memref<100x16xf32>
       "op2"(%2) : (f32) -> ()
     }
   }
@@ -2120,15 +1921,11 @@
 // CHECK:        affine.for %i0 = 0 to 100 {
 // CHECK-NEXT:     affine.for %i1 = 0 to 16 {
 // CHECK-NEXT:       %1 = "op1"() : () -> f32
-// CHECK-NEXT:       %2 = affine.apply [[MAP3]](%i0, %i0, %i1)
-// CHECK-NEXT:       %3 = affine.apply [[MAP4]](%i0, %i0, %i1)
-// CHECK-NEXT:       store %1, %0[%2, %3] : memref<1x16xf32>
+// CHECK-NEXT:       affine.store %1, %0[0, %i1] : memref<1x16xf32>
 // CHECK-NEXT:     } {slice_fusion_barrier = true}
 // CHECK-NEXT:     affine.for %i2 = 0 to 16 {
-// CHECK-NEXT:       %4 = affine.apply [[MAP3]](%i0, %i0, %i2)
-// CHECK-NEXT:       %5 = affine.apply [[MAP4]](%i0, %i0, %i2)
-// CHECK-NEXT:       %6 = load %0[%4, %5] : memref<1x16xf32>
-// CHECK-NEXT:       "op2"(%6) : (f32) -> ()
+// CHECK-NEXT:       %2 = affine.load %0[0, %i2] : memref<1x16xf32>
+// CHECK-NEXT:       "op2"(%2) : (f32) -> ()
 // CHECK-NEXT:     }
 // CHECK-NEXT:   }
   return
@@ -2144,7 +1941,7 @@
     affine.for %i3 = 0 to 4 {
       affine.for %i5 = 0 to 16 {
         %7 = affine.apply #map0(%i2, %i5)
-        store %2, %1[%7, %i3] : memref<144x4xf32>
+        affine.store %2, %1[%7, %i3] : memref<144x4xf32>
       }
     }
   }
@@ -2153,7 +1950,7 @@
       affine.for %i8 = 0 to 4 {
         affine.for %i10 = 0 to 16 {
           %10 = affine.apply #map0(%i6, %i10)
-          %11 = load %1[%10, %i8] : memref<144x4xf32>
+          %11 = affine.load %1[%10, %i8] : memref<144x4xf32>
         }
       }
     }
@@ -2161,8 +1958,6 @@
   return
 }
 // MAXIMAL:      #map0 = (d0, d1) -> (d0 * 16 + d1)
-// MAXIMAL-NEXT: #map1 = (d0, d1, d2, d3, d4) -> (d0 * -16 - d1 + d3)
-// MAXIMAL-NEXT: #map2 = (d0, d1, d2, d3, d4) -> (-d2 + d4)
 // MAXIMAL-LABEL: func @fuse_across_dim_mismatch
 // MAXIMAL:        %0 = alloc() : memref<1x1xf32>
 // MAXIMAL:        affine.for %i0 = 0 to 9 {
@@ -2170,13 +1965,9 @@
 // MAXIMAL-NEXT:      affine.for %i2 = 0 to 4 {
 // MAXIMAL-NEXT:        affine.for %i3 = 0 to 16 {
 // MAXIMAL-NEXT:          %1 = affine.apply #map0(%i0, %i3)
-// MAXIMAL-NEXT:          %2 = affine.apply #map1(%i0, %i3, %i2, %1, %i2)
-// MAXIMAL-NEXT:          %3 = affine.apply #map2(%i0, %i3, %i2, %1, %i2)
-// MAXIMAL-NEXT:          store %cst, %0[%2, %3] : memref<1x1xf32>
-// MAXIMAL-NEXT:          %4 = affine.apply #map0(%i0, %i3)
-// MAXIMAL-NEXT:          %5 = affine.apply #map1(%i0, %i3, %i2, %4, %i2)
-// MAXIMAL-NEXT:          %6 = affine.apply #map2(%i0, %i3, %i2, %4, %i2)
-// MAXIMAL-NEXT:          %7 = load %0[%5, %6] : memref<1x1xf32>
+// MAXIMAL-NEXT:          affine.store %cst, %0[0, 0] : memref<1x1xf32>
+// MAXIMAL-NEXT:          %2 = affine.apply #map0(%i0, %i3)
+// MAXIMAL-NEXT:          %3 = affine.load %0[0, 0] : memref<1x1xf32>
 // MAXIMAL-NEXT:        }
 // MAXIMAL-NEXT:      }
 // MAXIMAL-NEXT:    }
@@ -2204,20 +1995,20 @@
       %6 = affine.apply #map5(%i0, %i1)
       %7 = affine.apply #map6(%i0, %i1)
       %8 = affine.apply #map7(%i0, %i1)
-      %9 = load %0[%4, %5, %7, %8, %6, %c0] : memref<2x2x3x3x16x1xf32>
-      store %9, %1[%i0, %i1] : memref<64x9xf32>
+      %9 = affine.load %0[%4, %5, %7, %8, %6, %c0] : memref<2x2x3x3x16x1xf32>
+      affine.store %9, %1[%i0, %i1] : memref<64x9xf32>
     }
   }
   affine.for %i2 = 0 to 9 {
     affine.for %i3 = 0 to 4 {
       affine.for %i4 = 0 to 16 {
         %10 = affine.apply #map10(%i3, %i4)
-        %11 = load %1[%10, %i2] : memref<64x9xf32>
+        %11 = affine.load %1[%10, %i2] : memref<64x9xf32>
       }
       affine.for %i5 = 0 to 16 {
         %13 = "bar"() : () -> f32
         %14 = affine.apply #map11(%i2, %i5)
-        store %13, %2[%14, %i3] : memref<144x4xf32>
+        affine.store %13, %2[%14, %i3] : memref<144x4xf32>
       }
     }
   }
@@ -2226,7 +2017,7 @@
       affine.for %i8 = 0 to 4 {
         affine.for %i9 = 0 to 16 {
           %15 = affine.apply #map12(%i8, %i9)
-          %16 = load %1[%15, %i7] : memref<64x9xf32>
+          %16 = affine.load %1[%15, %i7] : memref<64x9xf32>
         }
       }
     }
@@ -2238,8 +2029,6 @@
 // MAXIMAL-DAG: [[MAP2:#map[0-9]+]] = (d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) floordiv 9) floordiv 8)
 // MAXIMAL-DAG: [[MAP3:#map[0-9]+]] = (d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) mod 9) floordiv 3)
 // MAXIMAL-DAG: [[MAP4:#map[0-9]+]] = (d0, d1) -> (((((d0 * 72 + d1) mod 2304) mod 1152) mod 9) mod 3)
-// MAXIMAL-DAG: [[MAP5:#map[0-9]+]] = (d0, d1, d2) -> (d1)
-// MAXIMAL-DAG: [[MAP6:#map[0-9]+]] = (d0, d1, d2) -> (-d0 + d2)
 // MAXIMAL-DAG: [[MAP7:#map[0-9]+]] = (d0, d1) -> (d0 * 16 + d1)
 // MAXIMAL-DAG: [[MAP8:#map[0-9]+]] = (d0, d1) -> (d0 * 16 - d1 + 15)
 // MAXIMAL-LABEL: func @fuse_across_varying_dims_complex
@@ -2257,35 +2046,28 @@
 // MAXIMAL-NEXT:            %5 = affine.apply [[MAP2]](%i4, %i0)
 // MAXIMAL-NEXT:            %6 = affine.apply [[MAP3]](%i4, %i0)
 // MAXIMAL-NEXT:            %7 = affine.apply [[MAP4]](%i4, %i0)
-// MAXIMAL-NEXT:            %8 = load %1[%3, %4, %6, %7, %5, %c0] : memref<2x2x3x3x16x1xf32>
-// MAXIMAL-NEXT:            %9 = affine.apply [[MAP5]](%i0, %i4, %i0)
-// MAXIMAL-NEXT:            %10 = affine.apply [[MAP6]](%i0, %i4, %i0)
-// MAXIMAL-NEXT:            store %8, %0[%9, %10] : memref<64x1xf32>
+// MAXIMAL-NEXT:            %8 = affine.load %1[%3, %4, %6, %7, %5, %c0] : memref<2x2x3x3x16x1xf32>
+// MAXIMAL-NEXT:            affine.store %8, %0[%i4, 0] : memref<64x1xf32>
 // MAXIMAL-NEXT:          }
 // MAXIMAL-NEXT:          affine.for %i5 = 0 to 4 {
 // MAXIMAL-NEXT:            affine.for %i6 = 0 to 16 {
-// MAXIMAL-NEXT:              %11 = affine.apply [[MAP7]](%i5, %i6)
-// MAXIMAL-NEXT:              %12 = affine.apply [[MAP5]](%i0, %11, %i0)
-// MAXIMAL-NEXT:              %13 = affine.apply [[MAP6]](%i0, %11, %i0)
-// MAXIMAL-NEXT:              %14 = load %0[%12, %13] : memref<64x1xf32>
+// MAXIMAL-NEXT:              %9 = affine.apply [[MAP7]](%i5, %i6)
+// MAXIMAL-NEXT:              %10 = affine.load %0[%i5 * 16 + %i6, 0] : memref<64x1xf32>
 // MAXIMAL-NEXT:            }
 // MAXIMAL-NEXT:            affine.for %i7 = 0 to 16 {
-// MAXIMAL-NEXT:              %15 = "bar"() : () -> f32
-// MAXIMAL-NEXT:              %16 = affine.apply [[MAP7]](%i0, %i7)
-// MAXIMAL-NEXT:              store %15, %2[%16, %i5] : memref<144x4xf32>
+// MAXIMAL-NEXT:              %11 = "bar"() : () -> f32
+// MAXIMAL-NEXT:              %12 = affine.apply [[MAP7]](%i0, %i7)
+// MAXIMAL-NEXT:              affine.store %11, %2[%12, %i5] : memref<144x4xf32>
 // MAXIMAL-NEXT:            }
 // MAXIMAL-NEXT:          }
-// MAXIMAL-NEXT:          %17 = affine.apply [[MAP8]](%i2, %i3)
-// MAXIMAL-NEXT:          %18 = affine.apply [[MAP5]](%i0, %17, %i0)
-// MAXIMAL-NEXT:          %19 = affine.apply [[MAP6]](%i0, %17, %i0)
-// MAXIMAL-NEXT:          %20 = load %0[%18, %19] : memref<64x1xf32>
+// MAXIMAL-NEXT:          %13 = affine.apply [[MAP8]](%i2, %i3)
+// MAXIMAL-NEXT:          %14 = affine.load %0[%i2 * 16 - %i3 + 15, 0] : memref<64x1xf32>
 // MAXIMAL-NEXT:        }
 // MAXIMAL-NEXT:      }
 // MAXIMAL-NEXT:    }
 // MAXIMAL-NEXT:  }
 
 // -----
-// CHECK-DAG: [[MAP3:#map[0-9]+]] = (d0) -> (d0 - 10)
 
 func @should_fuse_with_slice_union() {
   %a = alloc() : memref<100xf32>
@@ -2293,13 +2075,13 @@
   %cf0 = constant 0.0 : f32
 
   affine.for %i0 = 0 to 100 {
-    store %cf0, %a[%i0]: memref<100xf32>
+    affine.store %cf0, %a[%i0]: memref<100xf32>
   }
 
   affine.for %i1 = 10 to 20 {
-    %v0 = load %a[%i1]: memref<100xf32>
+    %v0 = affine.load %a[%i1]: memref<100xf32>
     affine.for %i2 = 15 to 25 {
-      %v1 = load %a[%i2]: memref<100xf32>
+      %v1 = affine.load %a[%i2]: memref<100xf32>
     }
   }
   // The union of two slice bounds (calculated between the store and each of
@@ -2309,14 +2091,11 @@
   // the fused loops based on the union calculation.
 // CHECK:      affine.for %i0 = 10 to 20 {
 // CHECK-NEXT:   affine.for %i1 = 10 to 25 {
-// CHECK-NEXT:     %1 = affine.apply [[MAP3]](%i1)
-// CHECK-NEXT:     store %cst, %0[%1] : memref<15xf32>
+// CHECK-NEXT:     affine.store %cst, %0[%i1 - 10] : memref<15xf32>
 // CHECK-NEXT:   }
-// CHECK-NEXT:   %2 = affine.apply [[MAP3]](%i0)
-// CHECK-NEXT:   %3 = load %0[%2] : memref<15xf32>
+// CHECK-NEXT:   %1 = affine.load %0[%i0 - 10] : memref<15xf32>
 // CHECK-NEXT:   affine.for %i2 = 15 to 25 {
-// CHECK-NEXT:     %4 = affine.apply [[MAP3]](%i2)
-// CHECK-NEXT:     %5 = load %0[%4] : memref<15xf32>
+// CHECK-NEXT:     %2 = affine.load %0[%i2 - 10] : memref<15xf32>
 // CHECK-NEXT:   }
 // CHECK-NEXT: }
 // CHECK-NEXT: return
@@ -2328,21 +2107,21 @@
 func @affine_add_mm_fused(%arg0: memref<1024x1024xf32>, %arg1: memref<1024x1024xf32>, %arg2: memref<1024x1024xf32>, %arg3: memref<1024x1024xf32>) {
   affine.for %i2 = 0 to 1024 {
     affine.for %i3 = 0 to 1024 {
-      %0 = load %arg3[%i2, %i3] : memref<1024x1024xf32>
-      %1 = load %arg2[%i2, %i3] : memref<1024x1024xf32>
+      %0 = affine.load %arg3[%i2, %i3] : memref<1024x1024xf32>
+      %1 = affine.load %arg2[%i2, %i3] : memref<1024x1024xf32>
       %2 = addf %1, %0 : f32
-      store %2, %arg2[%i2, %i3] : memref<1024x1024xf32>
+      affine.store %2, %arg2[%i2, %i3] : memref<1024x1024xf32>
     }
   }
   affine.for %i4 = 0 to 1024 {
     affine.for %i5 = 0 to 1024 {
       affine.for %i6 = 0 to 1024 {
-        %3 = load %arg1[%i6, %i5] : memref<1024x1024xf32>
-        %4 = load %arg0[%i4, %i6] : memref<1024x1024xf32>
+        %3 = affine.load %arg1[%i6, %i5] : memref<1024x1024xf32>
+        %4 = affine.load %arg0[%i4, %i6] : memref<1024x1024xf32>
         %5 = mulf %4, %3 : f32
-        %6 = load %arg2[%i4, %i5] : memref<1024x1024xf32>
+        %6 = affine.load %arg2[%i4, %i5] : memref<1024x1024xf32>
         %7 = addf %6, %5 : f32
-        store %7, %arg2[%i4, %i5] : memref<1024x1024xf32>
+        affine.store %7, %arg2[%i4, %i5] : memref<1024x1024xf32>
       }
     }
   }
@@ -2350,17 +2129,17 @@
   // dependence between load/store on '%arg2', carried on reduction loop %i6.
   // CHECK:       affine.for %i0 = 0 to 1024 {
   // CHECK-NEXT:    affine.for %i1 = 0 to 1024 {
-  // CHECK-NEXT:      %0 = load %arg3[%i0, %i1] : memref<1024x1024xf32>
-  // CHECK-NEXT:      %1 = load %arg2[%i0, %i1] : memref<1024x1024xf32>
+  // CHECK-NEXT:      %0 = affine.load %arg3[%i0, %i1] : memref<1024x1024xf32>
+  // CHECK-NEXT:      %1 = affine.load %arg2[%i0, %i1] : memref<1024x1024xf32>
   // CHECK-NEXT:      %2 = addf %1, %0 : f32
-  // CHECK-NEXT:      store %2, %arg2[%i0, %i1] : memref<1024x1024xf32>
+  // CHECK-NEXT:      affine.store %2, %arg2[%i0, %i1] : memref<1024x1024xf32>
   // CHECK-NEXT:      affine.for %i2 = 0 to 1024 {
-  // CHECK-NEXT:        %3 = load %arg1[%i2, %i1] : memref<1024x1024xf32>
-  // CHECK-NEXT:        %4 = load %arg0[%i0, %i2] : memref<1024x1024xf32>
+  // CHECK-NEXT:        %3 = affine.load %arg1[%i2, %i1] : memref<1024x1024xf32>
+  // CHECK-NEXT:        %4 = affine.load %arg0[%i0, %i2] : memref<1024x1024xf32>
   // CHECK-NEXT:        %5 = mulf %4, %3 : f32
-  // CHECK-NEXT:        %6 = load %arg2[%i0, %i1] : memref<1024x1024xf32>
+  // CHECK-NEXT:        %6 = affine.load %arg2[%i0, %i1] : memref<1024x1024xf32>
   // CHECK-NEXT:        %7 = addf %6, %5 : f32
-  // CHECK-NEXT:        store %7, %arg2[%i0, %i1] : memref<1024x1024xf32>
+  // CHECK-NEXT:        affine.store %7, %arg2[%i0, %i1] : memref<1024x1024xf32>
   // CHECK-NEXT:      }
   // CHECK-NEXT:    }
   // CHECK-NEXT:  }
@@ -2373,35 +2152,35 @@
   %cst = constant 0.000000e+00 : f32
   affine.for %i0 = 0 to 1024 {
     affine.for %i1 = 0 to 1024 {
-      store %cst, %arg2[%i0, %i1] : memref<1024x1024xf32>
+      affine.store %cst, %arg2[%i0, %i1] : memref<1024x1024xf32>
     }
   }
   affine.for %i2 = 0 to 1024 {
     affine.for %i3 = 0 to 1024 {
-      store %cst, %arg4[%i2, %i3] : memref<1024x1024xf32>
+      affine.store %cst, %arg4[%i2, %i3] : memref<1024x1024xf32>
     }
   }
   affine.for %i4 = 0 to 1024 {
     affine.for %i5 = 0 to 1024 {
       affine.for %i6 = 0 to 1024 {
-        %0 = load %arg1[%i6, %i5] : memref<1024x1024xf32>
-        %1 = load %arg0[%i4, %i6] : memref<1024x1024xf32>
+        %0 = affine.load %arg1[%i6, %i5] : memref<1024x1024xf32>
+        %1 = affine.load %arg0[%i4, %i6] : memref<1024x1024xf32>
         %2 = mulf %1, %0 : f32
-        %3 = load %arg2[%i4, %i5] : memref<1024x1024xf32>
+        %3 = affine.load %arg2[%i4, %i5] : memref<1024x1024xf32>
         %4 = addf %3, %2 : f32
-        store %4, %arg2[%i4, %i5] : memref<1024x1024xf32>
+        affine.store %4, %arg2[%i4, %i5] : memref<1024x1024xf32>
       }
     }
   }
   affine.for %i7 = 0 to 1024 {
     affine.for %i8 = 0 to 1024 {
       affine.for %i9 = 0 to 1024 {
-        %5 = load %arg1[%i9, %i8] : memref<1024x1024xf32>
-        %6 = load %arg0[%i7, %i9] : memref<1024x1024xf32>
+        %5 = affine.load %arg1[%i9, %i8] : memref<1024x1024xf32>
+        %6 = affine.load %arg0[%i7, %i9] : memref<1024x1024xf32>
         %7 = mulf %6, %5 : f32
-        %8 = load %arg4[%i7, %i8] : memref<1024x1024xf32>
+        %8 = affine.load %arg4[%i7, %i8] : memref<1024x1024xf32>
         %9 = addf %8, %7 : f32
-        store %9, %arg4[%i7, %i8] : memref<1024x1024xf32>
+        affine.store %9, %arg4[%i7, %i8] : memref<1024x1024xf32>
       }
     }
   }
@@ -2411,25 +2190,25 @@
 
   // CHECK:        affine.for %i0 = 0 to 1024 {
   // CHECK-NEXT:     affine.for %i1 = 0 to 1024 {
-  // CHECK-NEXT:       store %cst, %arg4[%i0, %i1] : memref<1024x1024xf32>
+  // CHECK-NEXT:       affine.store %cst, %arg4[%i0, %i1] : memref<1024x1024xf32>
   // CHECK-NEXT:       affine.for %i2 = 0 to 1024 {
-  // CHECK-NEXT:         %0 = load %arg1[%i2, %i1] : memref<1024x1024xf32>
-  // CHECK-NEXT:         %1 = load %arg0[%i0, %i2] : memref<1024x1024xf32>
+  // CHECK-NEXT:         %0 = affine.load %arg1[%i2, %i1] : memref<1024x1024xf32>
+  // CHECK-NEXT:         %1 = affine.load %arg0[%i0, %i2] : memref<1024x1024xf32>
   // CHECK-NEXT:         %2 = mulf %1, %0 : f32
-  // CHECK-NEXT:         %3 = load %arg4[%i0, %i1] : memref<1024x1024xf32>
+  // CHECK-NEXT:         %3 = affine.load %arg4[%i0, %i1] : memref<1024x1024xf32>
   // CHECK-NEXT:         %4 = addf %3, %2 : f32
-  // CHECK-NEXT:         store %4, %arg4[%i0, %i1] : memref<1024x1024xf32>
+  // CHECK-NEXT:         affine.store %4, %arg4[%i0, %i1] : memref<1024x1024xf32>
   // CHECK-NEXT:       }
   // CHECK-NEXT:     }
   // CHECK-NEXT:     affine.for %i3 = 0 to 1024 {
-  // CHECK-NEXT:       store %cst, %arg2[%i0, %i3] : memref<1024x1024xf32>
+  // CHECK-NEXT:       affine.store %cst, %arg2[%i0, %i3] : memref<1024x1024xf32>
   // CHECK-NEXT:       affine.for %i4 = 0 to 1024 {
-  // CHECK-NEXT:         %5 = load %arg1[%i4, %i3] : memref<1024x1024xf32>
-  // CHECK-NEXT:         %6 = load %arg0[%i0, %i4] : memref<1024x1024xf32>
+  // CHECK-NEXT:         %5 = affine.load %arg1[%i4, %i3] : memref<1024x1024xf32>
+  // CHECK-NEXT:         %6 = affine.load %arg0[%i0, %i4] : memref<1024x1024xf32>
   // CHECK-NEXT:         %7 = mulf %6, %5 : f32
-  // CHECK-NEXT:         %8 = load %arg2[%i0, %i3] : memref<1024x1024xf32>
+  // CHECK-NEXT:         %8 = affine.load %arg2[%i0, %i3] : memref<1024x1024xf32>
   // CHECK-NEXT:         %9 = addf %8, %7 : f32
-  // CHECK-NEXT:         store %9, %arg2[%i0, %i3] : memref<1024x1024xf32>
+  // CHECK-NEXT:         affine.store %9, %arg2[%i0, %i3] : memref<1024x1024xf32>
   // CHECK-NEXT:       }
   // CHECK-NEXT:     }
   // CHECK-NEXT:   }
@@ -2443,24 +2222,24 @@
   affine.for %i0 = 0 to 1024 {
     affine.for %i1 = 0 to 1024 {
       affine.for %i2 = 0 to 1024 {
-        %0 = load %arg1[%i2, %i1] : memref<1024x1024xf32>
-        %1 = load %arg0[%i0, %i2] : memref<1024x1024xf32>
+        %0 = affine.load %arg1[%i2, %i1] : memref<1024x1024xf32>
+        %1 = affine.load %arg0[%i0, %i2] : memref<1024x1024xf32>
         %2 = mulf %1, %0 : f32
-        %3 = load %arg2[%i0, %i1] : memref<1024x1024xf32>
+        %3 = affine.load %arg2[%i0, %i1] : memref<1024x1024xf32>
         %4 = addf %3, %2 : f32
-        store %4, %arg2[%i0, %i1] : memref<1024x1024xf32>
+        affine.store %4, %arg2[%i0, %i1] : memref<1024x1024xf32>
       }
     }
   }
   affine.for %i3 = 0 to 1024 {
     affine.for %i4 = 0 to 1024 {
       affine.for %i5 = 0 to 1024 {
-        %5 = load %arg3[%i5, %i4] : memref<1024x1024xf32>
-        %6 = load %arg2[%i3, %i5] : memref<1024x1024xf32>
+        %5 = affine.load %arg3[%i5, %i4] : memref<1024x1024xf32>
+        %6 = affine.load %arg2[%i3, %i5] : memref<1024x1024xf32>
         %7 = mulf %6, %5 : f32
-        %8 = load %arg4[%i3, %i4] : memref<1024x1024xf32>
+        %8 = affine.load %arg4[%i3, %i4] : memref<1024x1024xf32>
         %9 = addf %8, %7 : f32
-        store %9, %arg4[%i3, %i4] : memref<1024x1024xf32>
+        affine.store %9, %arg4[%i3, %i4] : memref<1024x1024xf32>
       }
     }
   }
@@ -2468,22 +2247,22 @@
   // CHECK:  affine.for %i0 = 0 to 1024 {
   // CHECK-NEXT:     affine.for %i1 = 0 to 1024 {
   // CHECK-NEXT:       affine.for %i2 = 0 to 1024 {
-  // CHECK-NEXT:         %0 = load %arg1[%i2, %i1] : memref<1024x1024xf32>
-  // CHECK-NEXT:         %1 = load %arg0[%i0, %i2] : memref<1024x1024xf32>
+  // CHECK-NEXT:         %0 = affine.load %arg1[%i2, %i1] : memref<1024x1024xf32>
+  // CHECK-NEXT:         %1 = affine.load %arg0[%i0, %i2] : memref<1024x1024xf32>
   // CHECK-NEXT:         %2 = mulf %1, %0 : f32
-  // CHECK-NEXT:         %3 = load %arg2[%i0, %i1] : memref<1024x1024xf32>
+  // CHECK-NEXT:         %3 = affine.load %arg2[%i0, %i1] : memref<1024x1024xf32>
   // CHECK-NEXT:         %4 = addf %3, %2 : f32
-  // CHECK-NEXT:         store %4, %arg2[%i0, %i1] : memref<1024x1024xf32>
+  // CHECK-NEXT:         affine.store %4, %arg2[%i0, %i1] : memref<1024x1024xf32>
   // CHECK-NEXT:       }
   // CHECK-NEXT:     }
   // CHECK-NEXT:     affine.for %i3 = 0 to 1024 {
   // CHECK-NEXT:       affine.for %i4 = 0 to 1024 {
-  // CHECK-NEXT:         %5 = load %arg3[%i4, %i3] : memref<1024x1024xf32>
-  // CHECK-NEXT:         %6 = load %arg2[%i0, %i4] : memref<1024x1024xf32>
+  // CHECK-NEXT:         %5 = affine.load %arg3[%i4, %i3] : memref<1024x1024xf32>
+  // CHECK-NEXT:         %6 = affine.load %arg2[%i0, %i4] : memref<1024x1024xf32>
   // CHECK-NEXT:         %7 = mulf %6, %5 : f32
-  // CHECK-NEXT:         %8 = load %arg4[%i0, %i3] : memref<1024x1024xf32>
+  // CHECK-NEXT:         %8 = affine.load %arg4[%i0, %i3] : memref<1024x1024xf32>
   // CHECK-NEXT:         %9 = addf %8, %7 : f32
-  // CHECK-NEXT:         store %9, %arg4[%i0, %i3] : memref<1024x1024xf32>
+  // CHECK-NEXT:         affine.store %9, %arg4[%i0, %i3] : memref<1024x1024xf32>
   // CHECK-NEXT:       }
   // CHECK-NEXT:     }
   // CHECK-NEXT:   }
diff --git a/mlir/test/Transforms/loop-invariant-code-motion.mlir b/mlir/test/Transforms/loop-invariant-code-motion.mlir
index 4173386..0493ed1 100644
--- a/mlir/test/Transforms/loop-invariant-code-motion.mlir
+++ b/mlir/test/Transforms/loop-invariant-code-motion.mlir
@@ -8,7 +8,7 @@
   affine.for %i0 = 0 to 10 {
     %v0 = addf %cf7, %cf8 : f32
     affine.for %i1 = 0 to 10 {
-      store %v0, %m[%i0] : memref<10xf32>
+      affine.store %v0, %m[%i0] : memref<10xf32>
     }
   }
 
@@ -17,7 +17,7 @@
   // CHECK-NEXT: %cst_0 = constant 8.000000e+00 : f32
   // CHECK-NEXT: %1 = addf %cst, %cst_0 : f32
   // CHECK-NEXT: affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT: store %1, %0[%i0] : memref<10xf32>
+  // CHECK-NEXT: affine.store %1, %0[%i0] : memref<10xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: return
   return
@@ -31,14 +31,14 @@
   %m = alloc() : memref<10xf32>
   affine.for %i0 = 0 to 10 {
       %t0 = affine.apply (d1) -> (d1 + 1)(%i0)
-      store %cf7, %m[%t0] : memref<10xf32>
+      affine.store %cf7, %m[%t0] : memref<10xf32>
   }
   return %m : memref<10xf32>
 // CHECK:       %cst = constant 7.000000e+00 : f32
 // CHECK-NEXT:  %0 = alloc() : memref<10xf32>
 // CHECK-NEXT:  affine.for %i0 = 0 to 10 {
-// CHECK-NEXT:      %1 = affine.apply #map2(%i0)
-// CHECK-NEXT:      store %cst, %0[%1] : memref<10xf32>
+// CHECK-NEXT:      %1 = affine.apply #map3(%i0)
+// CHECK-NEXT:      affine.store %cst, %0[%1] : memref<10xf32>
 // CHECK-NEXT:  }
 // CHECK-NEXT:  return %0 : memref<10xf32>
 }
@@ -66,19 +66,19 @@
   %m1 = alloc() : memref<10xf32>
   %m2 = alloc() : memref<10xf32>
   affine.for %i0 = 0 to 10 {
-    %v0 = load %m1[%i0] : memref<10xf32>
-    %v1 = load %m2[%i0] : memref<10xf32>
+    %v0 = affine.load %m1[%i0] : memref<10xf32>
+    %v1 = affine.load %m2[%i0] : memref<10xf32>
     %v2 = addf %v0, %v1 : f32
-    store %v2, %m1[%i0] : memref<10xf32>
+    affine.store %v2, %m1[%i0] : memref<10xf32>
   }
 
   // CHECK: %0 = alloc() : memref<10xf32>
   // CHECK-NEXT: %1 = alloc() : memref<10xf32>
   // CHECK-NEXT: affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT: %2 = load %0[%i0] : memref<10xf32>
-  // CHECK-NEXT: %3 = load %1[%i0] : memref<10xf32>
+  // CHECK-NEXT: %2 = affine.load %0[%i0] : memref<10xf32>
+  // CHECK-NEXT: %3 = affine.load %1[%i0] : memref<10xf32>
   // CHECK-NEXT: %4 = addf %2, %3 : f32
-  // CHECK-NEXT: store %4, %0[%i0] : memref<10xf32>
+  // CHECK-NEXT: affine.store %4, %0[%i0] : memref<10xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: return
   return
@@ -93,7 +93,7 @@
     %t0 = affine.apply (d1) -> (d1 + 1)(%i0)
     affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %t0) {
         %cf9 = addf %cf8, %cf8 : f32
-        store %cf9, %m[%i0] : memref<10xf32>
+        affine.store %cf9, %m[%i0] : memref<10xf32>
 
     }
   }
@@ -101,10 +101,10 @@
   // CHECK: %0 = alloc() : memref<10xf32>
   // CHECK-NEXT: %cst = constant 8.000000e+00 : f32
   // CHECK-NEXT: affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT: %1 = affine.apply #map2(%i0)
+  // CHECK-NEXT: %1 = affine.apply #map3(%i0)
   // CHECK-NEXT: affine.if #set0(%i0, %1) {
   // CHECK-NEXT: %2 = addf %cst, %cst : f32
-  // CHECK-NEXT: store %2, %0[%i0] : memref<10xf32>
+  // CHECK-NEXT: affine.store %2, %0[%i0] : memref<10xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: }
   // CHECK-NEXT: return
@@ -122,8 +122,8 @@
     %v0 = addf %cf7, %cf8 : f32
     affine.for %i1 = 0 to 10 {
       %v1 = addf %cf7, %cf7 : f32
-      store %v1, %m[%i1] : memref<10xf32>
-      store %v0, %m[%i0] : memref<10xf32>
+      affine.store %v1, %m[%i1] : memref<10xf32>
+      affine.store %v0, %m[%i0] : memref<10xf32>
     }
   }
 
@@ -135,8 +135,8 @@
   // CHECK-NEXT: affine.for %i0 = 0 to 10 {
   
   // CHECK-NEXT: affine.for %i1 = 0 to 10 {
-  // CHECK-NEXT:   store %2, %0[%i1] : memref<10xf32>
-  // CHECK-NEXT:   store %1, %0[%i0] : memref<10xf32>
+  // CHECK-NEXT:   affine.store %2, %0[%i1] : memref<10xf32>
+  // CHECK-NEXT:   affine.store %1, %0[%i0] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT: }
   // CHECK-NEXT: return
@@ -152,8 +152,8 @@
     %v0 = addf %cf7, %cf8 : f32
     affine.for %i1 = 0 to 10 {
       %v1 = addf %cf7, %cf7 : f32
-      store %v0, %m[%i0] : memref<10xf32>
-      store %v1, %m[%i1] : memref<10xf32>
+      affine.store %v0, %m[%i0] : memref<10xf32>
+      affine.store %v1, %m[%i1] : memref<10xf32>
     }
   }
 
@@ -164,8 +164,8 @@
   // CHECK-NEXT: %2 = addf %cst, %cst : f32
   // CHECK-NEXT: affine.for %i0 = 0 to 10 {
   // CHECK-NEXT:   affine.for %i1 = 0 to 10 {
-  // CHECK-NEXT:     store %1, %0[%i0] : memref<10xf32> 
-  // CHECK-NEXT:     store %2, %0[%i1] : memref<10xf32>
+  // CHECK-NEXT:     affine.store %1, %0[%i0] : memref<10xf32> 
+  // CHECK-NEXT:     affine.store %2, %0[%i1] : memref<10xf32>
   // CHECK-NEXT:    }
   // CHECK-NEXT: }
   // CHECK-NEXT: return
@@ -181,8 +181,8 @@
     %v0 = addf %cf7, %cf8 : f32
     affine.for %i1 = 0 to 10 {
       %v1 = addf %cf7, %cf7 : f32
-      store %v0, %m[%i1] : memref<10xf32>
-      %v2 = load %m[%i0] : memref<10xf32>
+      affine.store %v0, %m[%i1] : memref<10xf32>
+      %v2 = affine.load %m[%i0] : memref<10xf32>
     }
   }
 
@@ -193,8 +193,8 @@
   // CHECK-NEXT: %2 = addf %cst, %cst : f32
   // CHECK-NEXT: affine.for %i0 = 0 to 10 {
   // CHECK-NEXT: affine.for %i1 = 0 to 10 {
-  // CHECK-NEXT:   store %1, %0[%i1] : memref<10xf32>
-  // CHECK-NEXT:   %3 = load %0[%i0] : memref<10xf32> 
+  // CHECK-NEXT:   affine.store %1, %0[%i1] : memref<10xf32>
+  // CHECK-NEXT:   %3 = affine.load %0[%i0] : memref<10xf32> 
   // CHECK-NEXT:  }
   // CHECK-NEXT: }
   // CHECK-NEXT: return
@@ -210,8 +210,8 @@
     %v0 = addf %cf7, %cf8 : f32
     affine.for %i1 = 0 to 10 {
       %v1 = addf %cf7, %cf7 : f32
-      %v3 = load %m[%i1] : memref<10xf32>
-      %v2 = load %m[%i0] : memref<10xf32>
+      %v3 = affine.load %m[%i1] : memref<10xf32>
+      %v2 = affine.load %m[%i0] : memref<10xf32>
     }
   }
 
@@ -221,9 +221,9 @@
   // CHECK-NEXT: %1 = addf %cst, %cst_0 : f32
   // CHECK-NEXT: %2 = addf %cst, %cst : f32
   // CHECK-NEXT: affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT: %3 = load %0[%i0] : memref<10xf32> 
+  // CHECK-NEXT: %3 = affine.load %0[%i0] : memref<10xf32> 
   // CHECK-NEXT: affine.for %i1 = 0 to 10 {
-  // CHECK-NEXT: %4 = load %0[%i1] : memref<10xf32> 
+  // CHECK-NEXT: %4 = affine.load %0[%i1] : memref<10xf32> 
   // CHECK-NEXT:  }
   // CHECK-NEXT: }
   // CHECK-NEXT: return
@@ -237,7 +237,7 @@
     affine.for %i1 = 0 to 10 {
       affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) {
           %cf9 = addf %cf8, %cf8 : f32
-          store %cf9, %m[%i0] : memref<10xf32>
+          affine.store %cf9, %m[%i0] : memref<10xf32>
 
       }
     }
@@ -248,7 +248,7 @@
   // CHECK-NEXT: affine.for %i0 = 0 to 10 {
   // CHECK-NEXT: affine.if #set0(%i0, %i0) {
   // CHECK-NEXT: %1 = addf %cst, %cst : f32
-  // CHECK-NEXT: store %1, %0[%i0] : memref<10xf32>
+  // CHECK-NEXT: affine.store %1, %0[%i0] : memref<10xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: }
   // CHECK-NEXT: return
@@ -263,7 +263,7 @@
     affine.for %i1 = 0 to 10 {
       affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) {
           %cf9 = addf %cf8, %cf8 : f32
-          store %cf9, %m[%i1] : memref<10xf32>
+          affine.store %cf9, %m[%i1] : memref<10xf32>
 
       }
     }
@@ -275,7 +275,7 @@
   // CHECK-NEXT: affine.for %i1 = 0 to 10 {
   // CHECK-NEXT: affine.if #set0(%i0, %i0) {
   // CHECK-NEXT: %1 = addf %cst, %cst : f32
-  // CHECK-NEXT: store %1, %0[%i1] : memref<10xf32>
+  // CHECK-NEXT: affine.store %1, %0[%i1] : memref<10xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: }
   // CHECK-NEXT: }
@@ -291,9 +291,9 @@
     affine.for %i1 = 0 to 10 {
       affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) {
           %cf9 = addf %cf8, %cf8 : f32
-          store %cf9, %m[%i0] : memref<10xf32>
+          affine.store %cf9, %m[%i0] : memref<10xf32>
           affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) {
-            store %cf9, %m[%i1] : memref<10xf32>
+            affine.store %cf9, %m[%i1] : memref<10xf32>
           }
       }
     }
@@ -305,9 +305,9 @@
   // CHECK-NEXT: affine.for %i1 = 0 to 10 {
   // CHECK-NEXT: affine.if #set0(%i0, %i0) {
   // CHECK-NEXT: %1 = addf %cst, %cst : f32
-  // CHECK-NEXT: store %1, %0[%i0] : memref<10xf32>
+  // CHECK-NEXT: affine.store %1, %0[%i0] : memref<10xf32>
   // CHECK-NEXT: affine.if #set0(%i0, %i0) {
-  // CHECK-NEXT: store %1, %0[%i1] : memref<10xf32>
+  // CHECK-NEXT: affine.store %1, %0[%i1] : memref<10xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: }
   // CHECK-NEXT: }
@@ -324,11 +324,11 @@
     affine.for %i1 = 0 to 10 {
       affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) {
           %cf9 = addf %cf8, %cf8 : f32
-          store %cf9, %m[%i0] : memref<10xf32>
+          affine.store %cf9, %m[%i0] : memref<10xf32>
           affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) {
-            store %cf9, %m[%i0] : memref<10xf32>
+            affine.store %cf9, %m[%i0] : memref<10xf32>
           } else {
-            store %cf9, %m[%i1] : memref<10xf32>
+            affine.store %cf9, %m[%i1] : memref<10xf32>
           }
       }
     }
@@ -340,11 +340,11 @@
   // CHECK-NEXT: affine.for %i1 = 0 to 10 {
   // CHECK-NEXT: affine.if #set0(%i0, %i0) {
   // CHECK-NEXT: %1 = addf %cst, %cst : f32
-  // CHECK-NEXT: store %1, %0[%i0] : memref<10xf32>
+  // CHECK-NEXT: affine.store %1, %0[%i0] : memref<10xf32>
   // CHECK-NEXT: affine.if #set0(%i0, %i0) {
-  // CHECK-NEXT: store %1, %0[%i0] : memref<10xf32>
+  // CHECK-NEXT: affine.store %1, %0[%i0] : memref<10xf32>
   // CHECK-NEXT: } else {
-  // CHECK-NEXT: store %1, %0[%i1] : memref<10xf32>
+  // CHECK-NEXT: affine.store %1, %0[%i1] : memref<10xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: }
   // CHECK-NEXT: }
@@ -362,11 +362,11 @@
     affine.for %i1 = 0 to 10 {
       affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) {
           %cf9 = addf %cf8, %cf8 : f32
-          %tload1 = load %m[%i0] : memref<10xf32>
+          %tload1 = affine.load %m[%i0] : memref<10xf32>
           affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) {
-            store %cf9, %m2[%i0] : memref<10xf32>
+            affine.store %cf9, %m2[%i0] : memref<10xf32>
           } else {
-            %tload2 = load %m[%i0] : memref<10xf32>
+            %tload2 = affine.load %m[%i0] : memref<10xf32>
           }
       }
     }
@@ -378,11 +378,11 @@
   // CHECK-NEXT: affine.for %i0 = 0 to 10 {
   // CHECK-NEXT: affine.if #set0(%i0, %i0) {
   // CHECK-NEXT: %2 = addf %cst, %cst : f32
-  // CHECK-NEXT: %3 = load %0[%i0] : memref<10xf32>
+  // CHECK-NEXT: %3 = affine.load %0[%i0] : memref<10xf32>
   // CHECK-NEXT: affine.if #set0(%i0, %i0) {
-  // CHECK-NEXT: store %2, %1[%i0] : memref<10xf32>
+  // CHECK-NEXT: affine.store %2, %1[%i0] : memref<10xf32>
   // CHECK-NEXT: } else {
-  // CHECK-NEXT: %4 = load %0[%i0] : memref<10xf32>
+  // CHECK-NEXT: %4 = affine.load %0[%i0] : memref<10xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: }
   // CHECK-NEXT: }
@@ -399,9 +399,9 @@
     affine.for %i1 = 0 to 10 {
       affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) {
           %cf9 = addf %cf8, %cf8 : f32
-          %v1 = load %m[%i0] : memref<10xf32>
+          %v1 = affine.load %m[%i0] : memref<10xf32>
           affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) {
-            %v2 = load %m[%i0] : memref<10xf32>
+            %v2 = affine.load %m[%i0] : memref<10xf32>
           }
       }
     }
@@ -412,9 +412,9 @@
   // CHECK-NEXT: affine.for %i0 = 0 to 10 {
   // CHECK-NEXT: affine.if #set0(%i0, %i0) {
   // CHECK-NEXT: %1 = addf %cst, %cst : f32
-  // CHECK-NEXT: %2 = load %0[%i0] : memref<10xf32>
+  // CHECK-NEXT: %2 = affine.load %0[%i0] : memref<10xf32>
   // CHECK-NEXT: affine.if #set0(%i0, %i0) {
-  // CHECK-NEXT: %3 = load %0[%i0] : memref<10xf32>
+  // CHECK-NEXT: %3 = affine.load %0[%i0] : memref<10xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: }
   // CHECK-NEXT: }
@@ -430,9 +430,9 @@
     affine.for %i1 = 0 to 10 {
       affine.if (d0, d1) : (d1 - d0 >= 0) (%i0, %i0) {
           %cf9 = addf %cf8, %cf8 : f32
-          store %cf9, %m[%i0] : memref<10xf32>
+          affine.store %cf9, %m[%i0] : memref<10xf32>
           affine.for %i2 = 0 to 10 {
-            store %cf9, %m[%i2] : memref<10xf32>
+            affine.store %cf9, %m[%i2] : memref<10xf32>
           }
       }
     }
@@ -444,9 +444,9 @@
   // CHECK-NEXT: affine.for %i1 = 0 to 10 {
   // CHECK-NEXT: affine.if #set0(%i0, %i0) {
   // CHECK-NEXT: %1 = addf %cst, %cst : f32
-  // CHECK-NEXT: store %1, %0[%i0] : memref<10xf32>
+  // CHECK-NEXT: affine.store %1, %0[%i0] : memref<10xf32>
   // CHECK-NEXT: affine.for %i2 = 0 to 10 {
-  // CHECK-NEXT: store %1, %0[%i2] : memref<10xf32>
+  // CHECK-NEXT: affine.store %1, %0[%i2] : memref<10xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: }
   // CHECK-NEXT: }
@@ -462,16 +462,16 @@
   %m2 = alloc() : memref<100xf32>
   affine.for %i0 = 0 to 5 {
     %c0 = constant 0 : index
-    %v = load %m2[%c0] : memref<100xf32>
-    store %v, %m[%i0] : memref<100xf32>
+    %v = affine.load %m2[%c0] : memref<100xf32>
+    affine.store %v, %m[%i0] : memref<100xf32>
   }
 
   // CHECK: %0 = alloc() : memref<100xf32>
   // CHECK-NEXT: %1 = alloc() : memref<100xf32>
   // CHECK-NEXT: %c0 = constant 0 : index
-  // CHECK-NEXT: %2 = load %1[%c0] : memref<100xf32>
+  // CHECK-NEXT: %2 = affine.load %1[%c0] : memref<100xf32>
   // CHECK-NEXT: affine.for %i0 = 0 to 5 {
-  // CHECK-NEXT:  store %2, %0[%i0] : memref<100xf32>
+  // CHECK-NEXT:  affine.store %2, %0[%i0] : memref<100xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: return
 
@@ -484,9 +484,9 @@
   %cst = constant 8.0 : f32
   %c0 = constant 0 : index
    affine.for %i0 = 0 to 10 {
-    %v0 = load %m[%c0] : memref<10xf32>
+    %v0 = affine.load %m[%c0] : memref<10xf32>
     affine.for %i1 = 0 to 10 {
-      store %cst, %m[%i1] : memref<10xf32>
+      affine.store %cst, %m[%i1] : memref<10xf32>
     }
   }
 
@@ -494,9 +494,9 @@
   // CHECK-NEXT: %cst = constant 8.000000e+00 : f32
   // CHECK-NEXT: %c0 = constant 0 : index
   // CHECK-NEXT: affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:  %1 = load %0[%c0] : memref<10xf32>
+  // CHECK-NEXT:  %1 = affine.load %0[%c0] : memref<10xf32>
   // CHECK-NEXT:   affine.for %i1 = 0 to 10 {
-  // CHECK-NEXT:    store %cst, %0[%i1] : memref<10xf32>
+  // CHECK-NEXT:    affine.store %cst, %0[%i1] : memref<10xf32>
   // CHECK-NEXT:  }
   // CHECK-NEXT: }
   // CHECK-NEXT: return
@@ -510,9 +510,9 @@
   %cst = constant 8.0 : f32
   %c0 = constant 0 : index
    affine.for %i0 = 0 to 10 {
-     store %cst, %m[%c0] : memref<10xf32>
+     affine.store %cst, %m[%c0] : memref<10xf32>
       affine.for %i1 = 0 to 10 {
-        %v0 = load %m[%i0] : memref<10xf32>
+        %v0 = affine.load %m[%i0] : memref<10xf32>
     }
   }
 
@@ -520,8 +520,8 @@
   // CHECK-NEXT: %cst = constant 8.000000e+00 : f32
   // CHECK-NEXT: %c0 = constant 0 : index
   // CHECK-NEXT: affine.for %i0 = 0 to 10 {
-  // CHECK-NEXT:   store %cst, %0[%c0] : memref<10xf32>
-  // CHECK-NEXT:   %1 = load %0[%i0] : memref<10xf32>
+  // CHECK-NEXT:   affine.store %cst, %0[%c0] : memref<10xf32>
+  // CHECK-NEXT:   %1 = affine.load %0[%i0] : memref<10xf32>
   // CHECK-NEXT: }
   // CHECK-NEXT: return
 
diff --git a/mlir/test/Transforms/loop-tiling.mlir b/mlir/test/Transforms/loop-tiling.mlir
index b57ee2d..bf1b0ac 100644
--- a/mlir/test/Transforms/loop-tiling.mlir
+++ b/mlir/test/Transforms/loop-tiling.mlir
@@ -87,12 +87,12 @@
   affine.for %i = 0 to 256 {
     affine.for %j = 0 to 256 {
       affine.for %k = 0 to 250 {
-        %l = load %arg0[%i, %k] : memref<256x256xvector<64xf32>>
-        %r = load %arg1[%k, %j] : memref<256x256xvector<64xf32>>
-        %o = load %arg2[%i, %j] : memref<256x256xvector<64xf32>>
+        %l = affine.load %arg0[%i, %k] : memref<256x256xvector<64xf32>>
+        %r = affine.load %arg1[%k, %j] : memref<256x256xvector<64xf32>>
+        %o = affine.load %arg2[%i, %j] : memref<256x256xvector<64xf32>>
         %m = mulf %l, %r : vector<64xf32>
         %a = addf %o, %m : vector<64xf32>
-        store %a, %arg2[%i, %j] : memref<256x256xvector<64xf32>>
+        affine.store %a, %arg2[%i, %j] : memref<256x256xvector<64xf32>>
       }
     }
   }
@@ -112,14 +112,14 @@
   %0 = dim %arg0, 0 : memref<?x?xf32>
   affine.for %i0 = 0 to %0 {
     affine.for %i1 = 0 to %0 {
-      store %cst, %arg2[%i0, %i1] : memref<?x?xf32>
+      affine.store %cst, %arg2[%i0, %i1] : memref<?x?xf32>
       affine.for %i2 = 0 to %0 {
-        %1 = load %arg0[%i0, %i2] : memref<?x?xf32>
-        %2 = load %arg1[%i2, %i1] : memref<?x?xf32>
+        %1 = affine.load %arg0[%i0, %i2] : memref<?x?xf32>
+        %2 = affine.load %arg1[%i2, %i1] : memref<?x?xf32>
         %3 = mulf %1, %2 : f32
-        %4 = load %arg2[%i0, %i1] : memref<?x?xf32>
+        %4 = affine.load %arg2[%i0, %i1] : memref<?x?xf32>
         %5 = addf %4, %3 : f32
-        store %5, %arg2[%i0, %i1] : memref<?x?xf32>
+        affine.store %5, %arg2[%i0, %i1] : memref<?x?xf32>
       }
     }
   }
@@ -129,16 +129,16 @@
 // CHECK:       %0 = dim %arg0, 0 : memref<?x?xf32>
 // CHECK-NEXT:  affine.for %i0 = 0 to %0 step 32 {
 // CHECK-NEXT:    affine.for %i1 = 0 to %0 step 32 {
-// CHECK-NEXT:      affine.for %i2 = #map2(%i0) to min [[UBMAP]](%i0)[%0] {
-// CHECK-NEXT:        affine.for %i3 = #map2(%i1) to min [[UBMAP]](%i1)[%0] {
-// CHECK-NEXT:          store %cst, %arg2[%i2, %i3] : memref<?x?xf32>
+// CHECK-NEXT:      affine.for %i2 = #map3(%i0) to min [[UBMAP]](%i0)[%0] {
+// CHECK-NEXT:        affine.for %i3 = #map3(%i1) to min [[UBMAP]](%i1)[%0] {
+// CHECK-NEXT:          affine.store %cst, %arg2[%i2, %i3] : memref<?x?xf32>
 // CHECK-NEXT:          affine.for %i4 = 0 to %0 {
-// CHECK-NEXT:            %1 = load %arg0[%i2, %i4] : memref<?x?xf32>
-// CHECK-NEXT:            %2 = load %arg1[%i4, %i3] : memref<?x?xf32>
+// CHECK-NEXT:            %1 = affine.load %arg0[%i2, %i4] : memref<?x?xf32>
+// CHECK-NEXT:            %2 = affine.load %arg1[%i4, %i3] : memref<?x?xf32>
 // CHECK-NEXT:            %3 = mulf %1, %2 : f32
-// CHECK-NEXT:            %4 = load %arg2[%i2, %i3] : memref<?x?xf32>
+// CHECK-NEXT:            %4 = affine.load %arg2[%i2, %i3] : memref<?x?xf32>
 // CHECK-NEXT:            %5 = addf %4, %3 : f32
-// CHECK-NEXT:            store %5, %arg2[%i2, %i3] : memref<?x?xf32>
+// CHECK-NEXT:            affine.store %5, %arg2[%i2, %i3] : memref<?x?xf32>
 // CHECK-NEXT:          }
 // CHECK-NEXT:        }
 // CHECK-NEXT:      }
@@ -155,7 +155,7 @@
 func @tile_with_loop_upper_bounds_in_two_symbols(%arg0: memref<?xf32>, %limit: index) {
   %dim0 = dim %arg0, 0 : memref<?xf32>
   affine.for %i0 = 0 to ()[s0, s1] -> (s0 + s1) ()[%dim0, %limit] {
-    %v0 = load %arg0[%i0] : memref<?xf32>
+    %v0 = affine.load %arg0[%i0] : memref<?xf32>
   }
   return
 }
@@ -163,7 +163,7 @@
 // CHECK:       %0 = dim %arg0, 0 : memref<?xf32>
 // CHECK-NEXT:  affine.for %i0 = 0 to [[MAP1]]()[%0, %arg1] step 32 {
 // CHECK-NEXT:    affine.for %i1 = [[MAP0]](%i0) to min [[UBMAP]](%i0)[%0, %arg1] {
-// CHECK-NEXT:      %1 = load %arg0[%i1] : memref<?xf32>
+// CHECK-NEXT:      %1 = affine.load %arg0[%i1] : memref<?xf32>
 // CHECK-NEXT:    }
 // CHECK-NEXT:  }
 
@@ -173,12 +173,12 @@
     -> memref<196608x1xf32> {
   affine.for %i1 = 0 to 196608 {
     affine.for %i3 = 0 to 1 {
-      %4 = load %arg0[%i1, %i3] : memref<196608x1xf32>
-      store %4, %arg1[%i1, %i3] : memref<196608x1xf32>
+      %4 = affine.load %arg0[%i1, %i3] : memref<196608x1xf32>
+      affine.store %4, %arg1[%i1, %i3] : memref<196608x1xf32>
     }
   }
   return %arg1 : memref<196608x1xf32>
 }
 
-// CHECK: %0 = load %arg0[%i2, %i3] : memref<196608x1xf32>
+// CHECK: %0 = affine.load %arg0[%i2, %i3] : memref<196608x1xf32>
 
diff --git a/mlir/test/Transforms/memref-bound-check.mlir b/mlir/test/Transforms/memref-bound-check.mlir
index cbad0ae..b83e9c9 100644
--- a/mlir/test/Transforms/memref-bound-check.mlir
+++ b/mlir/test/Transforms/memref-bound-check.mlir
@@ -16,24 +16,24 @@
       %idx0 = affine.apply (d0, d1) -> (d0)(%i, %j)
       %idx1 = affine.apply (d0, d1) -> (d1)(%i, %j)
       // Out of bound access.
-      %x  = load %A[%idx0, %idx1] : memref<9 x 9 x i32>
-      // expected-error@-1 {{'std.load' op memref out of upper bound access along dimension #1}}
-      // expected-error@-2 {{'std.load' op memref out of lower bound access along dimension #1}}
-      // expected-error@-3 {{'std.load' op memref out of upper bound access along dimension #2}}
-      // expected-error@-4 {{'std.load' op memref out of lower bound access along dimension #2}}
+      %x  = affine.load %A[%idx0, %idx1] : memref<9 x 9 x i32>
+      // expected-error@-1 {{'affine.load' op memref out of upper bound access along dimension #1}}
+      // expected-error@-2 {{'affine.load' op memref out of lower bound access along dimension #1}}
+      // expected-error@-3 {{'affine.load' op memref out of upper bound access along dimension #2}}
+      // expected-error@-4 {{'affine.load' op memref out of lower bound access along dimension #2}}
       // This will access 0 to 110 - hence an overflow.
       %idy = affine.apply (d0, d1) -> (10*d0 - d1 + 19)(%i, %j)
-      %y = load %B[%idy] : memref<111 x i32>
+      %y = affine.load %B[%idy] : memref<111 x i32>
     }
   }
 
   affine.for %k = 0 to 10 {
       // In bound.
-      %u = load %B[%zero] : memref<111 x i32>
+      %u = affine.load %B[%zero] : memref<111 x i32>
       // Out of bounds.
-      %v = load %B[%sym] : memref<111 x i32> // expected-error {{'std.load' op memref out of upper bound access along dimension #1}}
+      %v = affine.load %B[%sym] : memref<111 x i32> // expected-error {{'affine.load' op memref out of upper bound access along dimension #1}}
       // Out of bounds.
-      store %v, %B[%minusone] : memref<111 x i32>  // expected-error {{'std.store' op memref out of lower bound access along dimension #1}}
+      affine.store %v, %B[%minusone] : memref<111 x i32>  // expected-error {{'affine.store' op memref out of lower bound access along dimension #1}}
   }
   return
 }
@@ -48,14 +48,14 @@
       %idx0 = affine.apply (d0, d1, d2) -> (d0 mod 128 + 1)(%i, %j, %j)
       %idx1 = affine.apply (d0, d1, d2) -> (d1 floordiv 4 + 1)(%i, %j, %j)
       %idx2 = affine.apply (d0, d1, d2) -> (d2 ceildiv 4)(%i, %j, %j)
-      %x  = load %A[%idx0, %idx1, %idx2] : memref<128 x 64 x 64 x i32>
-      // expected-error@-1 {{'std.load' op memref out of upper bound access along dimension #1}}
-      // expected-error@-2 {{'std.load' op memref out of upper bound access along dimension #2}}
-      // expected-error@-3 {{'std.load' op memref out of upper bound access along dimension #3}}
+      %x  = affine.load %A[%idx0, %idx1, %idx2] : memref<128 x 64 x 64 x i32>
+      // expected-error@-1 {{'affine.load' op memref out of upper bound access along dimension #1}}
+      // expected-error@-2 {{'affine.load' op memref out of upper bound access along dimension #2}}
+      // expected-error@-3 {{'affine.load' op memref out of upper bound access along dimension #3}}
       %idy0 = affine.apply (d0, d1, d2) -> (d0 mod 128)(%i, %j, %j)
       %idy1 = affine.apply (d0, d1, d2) -> (d1 floordiv 4)(%i, %j, %j)
       %idy2 = affine.apply (d0, d1, d2) -> (d2 ceildiv 4 - 1)(%i, %j, %j)
-      store %x, %A[%idy0, %idy1, %idy2] : memref<128 x 64 x 64 x i32> // expected-error {{'std.store' op memref out of lower bound access along dimension #3}}
+      affine.store %x, %A[%idy0, %idy1, %idy2] : memref<128 x 64 x 64 x i32> // expected-error {{'affine.store' op memref out of lower bound access along dimension #3}}
     } // CHECK }
   } // CHECK }
   return
@@ -72,16 +72,16 @@
     affine.for %j = 0 to 256 {
       // All of these accesses are in bound; check that no errors are emitted.
       // CHECK: %3 = affine.apply {{#map.*}}(%i0, %i1)
-      // CHECK-NEXT: %4 = load %0[%3, %c0] : memref<257x256xi32>
+      // CHECK-NEXT: %4 = affine.load %0[%3, %c0] : memref<257x256xi32>
       // CHECK-NEXT: %5 = affine.apply {{#map.*}}(%i0, %i0)
-      // CHECK-NEXT: %6 = load %2[%5] : memref<1xi32>
+      // CHECK-NEXT: %6 = affine.load %2[%5] : memref<1xi32>
       %idx0 = affine.apply (d0, d1) -> ( 64 * (d0 ceildiv 64))(%i, %j)
       // Without GCDTightenInequalities(), the upper bound on the region
       // accessed along first memref dimension would have come out as d0 <= 318
       // (instead of d0 <= 256), and led to a false positive out of bounds.
-      %x  = load %A[%idx0, %zero] : memref<257 x 256 x i32>
+      %x  = affine.load %A[%idx0, %zero] : memref<257 x 256 x i32>
       %idy = affine.apply (d0, d1) -> (d0 floordiv 256)(%i, %i)
-      %y  = load %B[%idy] : memref<1 x i32>
+      %y  = affine.load %B[%idy] : memref<1 x i32>
     } // CHECK-NEXT }
   }
   return
@@ -97,14 +97,14 @@
       %idx0 = affine.apply (d0, d1, d2) -> (d0 mod 128 + 1)(%i, %j, %j)
       %idx1 = affine.apply (d0, d1, d2) -> (d1 floordiv 4 + 1)(%i, %j, %j)
       %idx2 = affine.apply (d0, d1, d2) -> (d2 ceildiv 4)(%i, %j, %j)
-      %x  = load %A[%idx0, %idx1, %idx2] : memref<128 x 64 x 64 x i32>
-      // expected-error@-1 {{'std.load' op memref out of upper bound access along dimension #1}}
-      // expected-error@-2 {{'std.load' op memref out of upper bound access along dimension #2}}
-      // expected-error@-3 {{'std.load' op memref out of upper bound access along dimension #3}}
+      %x  = affine.load %A[%idx0, %idx1, %idx2] : memref<128 x 64 x 64 x i32>
+      // expected-error@-1 {{'affine.load' op memref out of upper bound access along dimension #1}}
+      // expected-error@-2 {{'affine.load' op memref out of upper bound access along dimension #2}}
+      // expected-error@-3 {{'affine.load' op memref out of upper bound access along dimension #3}}
       %idy0 = affine.apply (d0, d1, d2) -> (d0 mod 128)(%i, %j, %j)
       %idy1 = affine.apply (d0, d1, d2) -> (d1 floordiv 4)(%i, %j, %j)
       %idy2 = affine.apply (d0, d1, d2) -> (d2 ceildiv 4 - 1)(%i, %j, %j)
-      store %x, %A[%idy0, %idy1, %idy2] : memref<128 x 64 x 64 x i32> // expected-error {{'std.store' op memref out of lower bound access along dimension #3}}
+      affine.store %x, %A[%idy0, %idy1, %idy2] : memref<128 x 64 x 64 x i32> // expected-error {{'affine.store' op memref out of lower bound access along dimension #3}}
     }
   }
   return
@@ -118,7 +118,7 @@
     affine.for %j = 0 to 256 {
       %idx0 = affine.apply (d0, d1) -> ((d0 mod 1024) floordiv 4)(%i, %j)
       %idx1 = affine.apply (d0, d1) -> ((((d1 mod 128) mod 32) ceildiv 4) * 32)(%i, %j)
-      load %A[%idx0, %idx1] : memref<256 x 256 x i32> // expected-error {{'std.load' op memref out of upper bound access along dimension #2}}
+      affine.load %A[%idx0, %idx1] : memref<256 x 256 x i32> // expected-error {{'affine.load' op memref out of upper bound access along dimension #2}}
     }
   }
   return
@@ -129,7 +129,7 @@
   %B = alloc() : memref<10 x i32>
   affine.for %i = 0 to 10 {
     %idx = affine.apply (d0)[s0] -> (d0 * s0)(%i)[%N]
-    %y = load %B[%idx] : memref<10 x i32>
+    %y = affine.load %B[%idx] : memref<10 x i32>
     // expected-error@-1 {{getMemRefRegion: compose affine map failed}}
   }
   return
@@ -141,7 +141,7 @@
   affine.for %ii = 0 to 64 {
       %idx0 = affine.apply (d0) -> ((d0 mod 147456) floordiv 1152) (%ii)
       %idx1 = affine.apply (d0) -> (((d0 mod 147456) mod 1152) floordiv 384) (%ii)
-      %v = load %A[%idx0, %idx1] : memref<2x2xi32>
+      %v = affine.load %A[%idx0, %idx1] : memref<2x2xi32>
   }
   return
 }
@@ -169,7 +169,7 @@
       %a15 = affine.apply (d0) ->
         ((((((d0 mod 294912) mod 147456) mod 1152) mod 384) mod 128)
           floordiv 128) (%a0)
-      %v0 = load %in[%a10, %a11, %a13, %a14, %a12, %a15]
+      %v0 = affine.load %in[%a10, %a11, %a13, %a14, %a12, %a15]
         : memref<2x2x3x3x16x1xi32>
     }
   }
@@ -180,7 +180,7 @@
 func @zero_d_memref(%arg0: memref<i32>) {
   %c0 = constant 0 : i32
   // A 0-d memref always has in-bound accesses!
-  store %c0, %arg0[] : memref<i32>
+  affine.store %c0, %arg0[] : memref<i32>
   return
 }
 
@@ -191,7 +191,7 @@
 
   affine.for %i0 = 10 to 11 {
     %idy = affine.apply (d0) ->  (100 * d0 floordiv 1000) (%i0)
-    store %c9, %in[%idy] : memref<1xi32> // expected-error {{'std.store' op memref out of upper bound access along dimension #1}}
+    affine.store %c9, %in[%idy] : memref<1xi32> // expected-error {{'affine.store' op memref out of upper bound access along dimension #1}}
   }
   return
 }
@@ -214,7 +214,7 @@
       %2 = affine.apply #map3(%i0, %i1)
       %3 = affine.apply #map4(%i0, %i1)
       %4 = affine.apply #map5(%i0, %i1)
-      %5 = load %arg0[%2, %c0, %4, %c0] : memref<4x4x16x1xf32>
+      %5 = affine.load %arg0[%2, %c0, %4, %c0] : memref<4x4x16x1xf32>
     }
   }
   return
@@ -232,9 +232,9 @@
   %1 = alloc() : memref<6 x f32>
   affine.for %i0 = 0 to 4096 {
     affine.for %i1 = #map0(%i0) to #map1(%i0) {
-      load %0[%i1] : memref<7 x f32>
-      load %1[%i1] : memref<6 x f32>
-      // expected-error@-1 {{'std.load' op memref out of upper bound access along dimension #1}}
+      affine.load %0[%i1] : memref<7 x f32>
+      affine.load %1[%i1] : memref<6 x f32>
+      // expected-error@-1 {{'affine.load' op memref out of upper bound access along dimension #1}}
     }
   }
   return
@@ -254,13 +254,13 @@
   %N = constant 2048 : index
   affine.for %i0 = 0 to 4096 {
     affine.for %i1 = #map0(%i0) to #map1(%i0) {
-      load %0[%i1] : memref<1027 x f32>
-      load %1[%i1] : memref<1026 x f32>
-      // expected-error@-1 {{'std.load' op memref out of upper bound access along dimension #1}}
+      affine.load %0[%i1] : memref<1027 x f32>
+      affine.load %1[%i1] : memref<1026 x f32>
+      // expected-error@-1 {{'affine.load' op memref out of upper bound access along dimension #1}}
     }
     affine.for %i2 = 0 to #map2(%N) {
       // Within bounds.
-      %v = load %2[%i2] : memref<4096 x f32>
+      %v = affine.load %2[%i2] : memref<4096 x f32>
     }
   }
   return
@@ -279,7 +279,7 @@
   affine.for %i0 = 4 to 1028 step 4 {
     %i1 = affine.apply (d0) -> (d0 - 4) (%i0)
     affine.for %i2 = #map_lb(%i1) to #map_ub(%i1) {
-        %0 = load %arg0[%i2] : memref<1024xf32>
+        %0 = affine.load %arg0[%i2] : memref<1024xf32>
     }
   }
   return
diff --git a/mlir/test/Transforms/memref-dataflow-opt.mlir b/mlir/test/Transforms/memref-dataflow-opt.mlir
index 979f486..764d524 100644
--- a/mlir/test/Transforms/memref-dataflow-opt.mlir
+++ b/mlir/test/Transforms/memref-dataflow-opt.mlir
@@ -11,8 +11,8 @@
   %cf7 = constant 7.0 : f32
   %m = alloc() : memref<10xf32>
   affine.for %i0 = 0 to 10 {
-    store %cf7, %m[%i0] : memref<10xf32>
-    %v0 = load %m[%i0] : memref<10xf32>
+    affine.store %cf7, %m[%i0] : memref<10xf32>
+    %v0 = affine.load %m[%i0] : memref<10xf32>
     %v1 = addf %v0, %v0 : f32
   }
   return
@@ -31,13 +31,13 @@
   %cf9 = constant 9.0 : f32
   %m = alloc() : memref<10xf32>
   affine.for %i0 = 0 to 10 {
-    store %cf7, %m[%i0] : memref<10xf32>
-    %v0 = load %m[%i0] : memref<10xf32>
+    affine.store %cf7, %m[%i0] : memref<10xf32>
+    %v0 = affine.load %m[%i0] : memref<10xf32>
     %v1 = addf %v0, %v0 : f32
-    store %cf8, %m[%i0] : memref<10xf32>
-    store %cf9, %m[%i0] : memref<10xf32>
-    %v2 = load %m[%i0] : memref<10xf32>
-    %v3 = load %m[%i0] : memref<10xf32>
+    affine.store %cf8, %m[%i0] : memref<10xf32>
+    affine.store %cf9, %m[%i0] : memref<10xf32>
+    %v2 = affine.load %m[%i0] : memref<10xf32>
+    %v3 = affine.load %m[%i0] : memref<10xf32>
     %v4 = mulf %v2, %v3 : f32
   }
   return
@@ -65,9 +65,9 @@
       %t1 = affine.apply (d0, d1) -> (d0)(%i0, %i1)
       %idx0 = affine.apply (d0, d1) -> (d1) (%t0, %t1)
       %idx1 = affine.apply (d0, d1) -> (d0 - 1) (%t0, %t1)
-      store %cf7, %m[%idx0, %idx1] : memref<10x10xf32>
-      // CHECK-NOT: load %{{[0-9]+}}
-      %v0 = load %m[%i0, %i1] : memref<10x10xf32>
+      affine.store %cf7, %m[%idx0, %idx1] : memref<10x10xf32>
+      // CHECK-NOT: affine.load %{{[0-9]+}}
+      %v0 = affine.load %m[%i0, %i1] : memref<10x10xf32>
       %v1 = addf %v0, %v0 : f32
     }
   }
@@ -81,7 +81,7 @@
 // CHECK-NEXT:      %2 = affine.apply [[MAP1]](%i0, %i1)
 // CHECK-NEXT:      %3 = affine.apply [[MAP2]](%1, %2)
 // CHECK-NEXT:      %4 = affine.apply [[MAP3]](%1, %2)
-// CHECK-NEXT:      store %cst, %0[%3, %4] : memref<10x10xf32>
+// CHECK-NEXT:      affine.store %cst, %0[%3, %4] : memref<10x10xf32>
 // CHECK-NEXT:      %5 = addf %cst, %cst : f32
 // CHECK-NEXT:    }
 // CHECK-NEXT:  }
@@ -93,9 +93,9 @@
   %cf7 = constant 7.0 : f32
   %m = alloc() : memref<10xf32>
   affine.for %i0 = 0 to 10 {
-    store %cf7, %m[%i0] : memref<10xf32>
+    affine.store %cf7, %m[%i0] : memref<10xf32>
     affine.for %i1 = 0 to %N {
-      %v0 = load %m[%i0] : memref<10xf32>
+      %v0 = affine.load %m[%i0] : memref<10xf32>
       %v1 = addf %v0, %v0 : f32
     }
   }
@@ -118,13 +118,13 @@
   %cf8 = constant 8.0 : f32
   %m = alloc() : memref<10xf32>
   affine.for %i0 = 0 to 10 {
-    store %cf7, %m[%i0] : memref<10xf32>
+    affine.store %cf7, %m[%i0] : memref<10xf32>
     affine.for %i1 = 0 to %N {
-      store %cf8, %m[%i1] : memref<10xf32>
+      affine.store %cf8, %m[%i1] : memref<10xf32>
     }
     affine.for %i2 = 0 to %N {
-      // CHECK: %{{[0-9]+}} = load %0[%i0] : memref<10xf32>
-      %v0 = load %m[%i0] : memref<10xf32>
+      // CHECK: %{{[0-9]+}} = affine.load %0[%i0] : memref<10xf32>
+      %v0 = affine.load %m[%i0] : memref<10xf32>
       %v1 = addf %v0, %v0 : f32
     }
   }
@@ -139,12 +139,12 @@
   %cf9 = constant 9.0 : f32
   %m = alloc() : memref<10xf32>
   affine.for %i0 = 0 to 10 {
-    store %cf7, %m[%i0] : memref<10xf32>
+    affine.store %cf7, %m[%i0] : memref<10xf32>
     affine.for %i1 = 0 to %N {
-      // CHECK: %{{[0-9]+}} = load %0[%i0] : memref<10xf32>
-      %v0 = load %m[%i0] : memref<10xf32>
+      // CHECK: %{{[0-9]+}} = affine.load %0[%i0] : memref<10xf32>
+      %v0 = affine.load %m[%i0] : memref<10xf32>
       %v1 = addf %v0, %v0 : f32
-      store %cf9, %m[%i0] : memref<10xf32>
+      affine.store %cf9, %m[%i0] : memref<10xf32>
     }
   }
   return
@@ -160,17 +160,17 @@
   %cf10 = constant 10.0 : f32
   %m = alloc() : memref<10xf32>
   affine.for %i0 = 0 to 10 {
-    store %cf7, %m[%i0] : memref<10xf32>
+    affine.store %cf7, %m[%i0] : memref<10xf32>
     affine.for %i1 = 0 to %N {
-      store %cf8, %m[%i1] : memref<10xf32>
+      affine.store %cf8, %m[%i1] : memref<10xf32>
     }
     affine.for %i2 = 0 to %N {
-      store %cf9, %m[%i2] : memref<10xf32>
+      affine.store %cf9, %m[%i2] : memref<10xf32>
     }
-    store %cf10, %m[%i0] : memref<10xf32>
+    affine.store %cf10, %m[%i0] : memref<10xf32>
     affine.for %i3 = 0 to %N {
-      // CHECK-NOT: %{{[0-9]+}} = load
-      %v0 = load %m[%i0] : memref<10xf32>
+      // CHECK-NOT: %{{[0-9]+}} = affine.load
+      %v0 = affine.load %m[%i0] : memref<10xf32>
       %v1 = addf %v0, %v0 : f32
     }
   }
@@ -183,11 +183,11 @@
   %cf7 = constant 7.0 : f32
   %m = alloc() : memref<10xf32>
   affine.for %i0 = 0 to 10 {
-    store %cf7, %m[%i0] : memref<10xf32>
+    affine.store %cf7, %m[%i0] : memref<10xf32>
     affine.for %i1 = 0 to 10 {
       affine.for %i2 = 0 to 10 {
-        // CHECK: load %{{[0-9]+}}
-        %v0 = load %m[%i2] : memref<10xf32>
+        // CHECK: affine.load %{{[0-9]+}}
+        %v0 = affine.load %m[%i2] : memref<10xf32>
         %v1 = addf %v0, %v0 : f32
       }
     }
@@ -201,12 +201,12 @@
   %cf7 = constant 7.0 : f32
   %c0 = constant 0 : index
   %m = alloc() : memref<10xf32>
-  store %cf7, %m[%c0] : memref<10xf32>
+  affine.store %cf7, %m[%c0] : memref<10xf32>
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 10 {
       affine.for %i2 = 0 to 10 {
-        // CHECK-NOT: load %{{[0-9]}}+
-        %v0 = load %m[%c0] : memref<10xf32>
+        // CHECK-NOT: affine.load %{{[0-9]}}+
+        %v0 = affine.load %m[%c0] : memref<10xf32>
         %v1 = addf %v0, %v0 : f32
       }
     }
@@ -224,26 +224,26 @@
   %c1 = constant 1 : index
   %m = alloc() : memref<10xf32>
   affine.for %i0 = 0 to 10 {
-    store %cf7, %m[%i0] : memref<10xf32>
+    affine.store %cf7, %m[%i0] : memref<10xf32>
     affine.for %i1 = 0 to %N {
-      %v0 = load %m[%i0] : memref<10xf32>
+      %v0 = affine.load %m[%i0] : memref<10xf32>
       %v1 = addf %v0, %v0 : f32
       %idx = affine.apply (d0) -> (d0 + 1) (%i0)
-      store %cf9, %m[%idx] : memref<10xf32>
+      affine.store %cf9, %m[%idx] : memref<10xf32>
     }
   }
   // Due to this load, the memref isn't optimized away.
-  %v3 = load %m[%c1] : memref<10xf32>
+  %v3 = affine.load %m[%c1] : memref<10xf32>
   return %v3 : f32
 // CHECK:       %0 = alloc() : memref<10xf32>
 // CHECK-NEXT:  affine.for %i0 = 0 to 10 {
-// CHECK-NEXT:    store %cst, %0[%i0] : memref<10xf32>
+// CHECK-NEXT:    affine.store %cst, %0[%i0] : memref<10xf32>
 // CHECK-NEXT:    affine.for %i1 = 0 to %arg0 {
 // CHECK-NEXT:      %1 = addf %cst, %cst : f32
 // CHECK-NEXT:      %2 = affine.apply [[MAP4]](%i0)
-// CHECK-NEXT:      store %cst_0, %0[%2] : memref<10xf32>
+// CHECK-NEXT:      affine.store %cst_0, %0[%2] : memref<10xf32>
 // CHECK-NEXT:    }
 // CHECK-NEXT:  }
-// CHECK-NEXT:  %3 = load %0[%c1] : memref<10xf32>
+// CHECK-NEXT:  %3 = affine.load %0[%c1] : memref<10xf32>
 // CHECK-NEXT:  return %3 : f32
 }
diff --git a/mlir/test/Transforms/memref-dependence-check.mlir b/mlir/test/Transforms/memref-dependence-check.mlir
index 84acac4..3efc134 100644
--- a/mlir/test/Transforms/memref-dependence-check.mlir
+++ b/mlir/test/Transforms/memref-dependence-check.mlir
@@ -14,14 +14,14 @@
   // and thus the store "may" conditionally execute before the load.
   affine.if #set0(%c0) {
     affine.for %i0 = 0 to 10 {
-      store %cf7, %m[%i0] : memref<10xf32>
+      affine.store %cf7, %m[%i0] : memref<10xf32>
       // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
       // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
       // expected-remark@-3 {{dependence from 0 to 1 at depth 1 = true}}
     }
   }
   affine.for %i1 = 0 to 10 {
-    %v0 = load %m[%i1] : memref<10xf32>
+    %v0 = affine.load %m[%i1] : memref<10xf32>
     // expected-remark@-1 {{dependence from 1 to 1 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 1 to 1 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 1 to 0 at depth 1 = false}}
@@ -38,13 +38,13 @@
   // There is a dependence from 0 to 1 at depth 1 (common surrounding loops 0)
   // because the first loop with the store dominates the second loop.
   affine.for %i0 = 0 to 10 {
-    store %cst, %0[%i0] : memref<10xf32>
+    affine.store %cst, %0[%i0] : memref<10xf32>
     // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 0 to 1 at depth 1 = true}}
   }
   affine.for %i1 = 0 to 10 {
-    %1 = load %0[%i1] : memref<10xf32>
+    %1 = affine.load %0[%i1] : memref<10xf32>
     // expected-remark@-1 {{dependence from 1 to 1 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 1 to 1 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 1 to 0 at depth 1 = false}}
@@ -59,10 +59,10 @@
   %m.b = alloc() : memref<100xf32>
   %c0 = constant 0 : index
   %c1 = constant 1.0 : f32
-  store %c1, %m.a[%c0] : memref<100xf32>
+  affine.store %c1, %m.a[%c0] : memref<100xf32>
   // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 0 to 1 at depth 1 = false}}
-  %v0 = load %m.b[%c0] : memref<100xf32>
+  %v0 = affine.load %m.b[%c0] : memref<100xf32>
   // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 1 to 1 at depth 1 = false}}
   return
@@ -75,10 +75,10 @@
   %c0 = constant 0 : index
   %c1 = constant 1 : index
   %c7 = constant 7.0 : f32
-  store %c7, %m[%c0] : memref<100xf32>
+  affine.store %c7, %m[%c0] : memref<100xf32>
   // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 0 to 1 at depth 1 = false}}
-  %v0 = load %m[%c1] : memref<100xf32>
+  %v0 = affine.load %m[%c1] : memref<100xf32>
   // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 1 to 1 at depth 1 = false}}
   return
@@ -91,10 +91,10 @@
   %c0 = constant 0 : index
   %c1 = constant 1 : index
   %c7 = constant 7.0 : f32
-  %v0 = load %m[%c1] : memref<100xf32>
+  %v0 = affine.load %m[%c1] : memref<100xf32>
   // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 0 to 1 at depth 1 = false}}
-  store %c7, %m[%c0] : memref<100xf32>
+  affine.store %c7, %m[%c0] : memref<100xf32>
   // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 1 to 1 at depth 1 = false}}
   return
@@ -106,10 +106,10 @@
   %m = alloc() : memref<100xf32>
   %c11 = constant 11 : index
   %c7 = constant 7.0 : f32
-  store %c7, %m[%c11] : memref<100xf32>
+  affine.store %c7, %m[%c11] : memref<100xf32>
   // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 0 to 1 at depth 1 = true}}
-  %v0 = load %m[%c11] : memref<100xf32>
+  %v0 = affine.load %m[%c11] : memref<100xf32>
   // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 1 to 1 at depth 1 = false}}
   return
@@ -121,10 +121,10 @@
   %m = alloc() : memref<100xf32>
   %c11 = constant 11 : index
   %c7 = constant 7.0 : f32
-  %v0 = load %m[%c11] : memref<100xf32>
+  %v0 = affine.load %m[%c11] : memref<100xf32>
   // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 0 to 1 at depth 1 = false}}
-  %v1 = load %m[%c11] : memref<100xf32>
+  %v1 = affine.load %m[%c11] : memref<100xf32>
   // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 1 to 1 at depth 1 = false}}
   return
@@ -135,10 +135,10 @@
 func @store_load_same_symbol(%arg0: index) {
   %m = alloc() : memref<100xf32>
   %c7 = constant 7.0 : f32
-  store %c7, %m[%arg0] : memref<100xf32>
+  affine.store %c7, %m[%arg0] : memref<100xf32>
   // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 0 to 1 at depth 1 = true}}
-  %v0 = load %m[%arg0] : memref<100xf32>
+  %v0 = affine.load %m[%arg0] : memref<100xf32>
   // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 1 to 1 at depth 1 = false}}
   return
@@ -149,10 +149,10 @@
 func @store_load_different_symbols(%arg0: index, %arg1: index) {
   %m = alloc() : memref<100xf32>
   %c7 = constant 7.0 : f32
-  store %c7, %m[%arg0] : memref<100xf32>
+  affine.store %c7, %m[%arg0] : memref<100xf32>
   // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 0 to 1 at depth 1 = true}}
-  %v0 = load %m[%arg1] : memref<100xf32>
+  %v0 = affine.load %m[%arg1] : memref<100xf32>
   // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 1 to 1 at depth 1 = false}}
   return
@@ -165,11 +165,11 @@
   %c1 = constant 1 : index
   %c8 = constant 8.0 : f32
   %a0 = affine.apply (d0) -> (d0) (%c1)
-  store %c8, %m[%a0] : memref<100xf32>
+  affine.store %c8, %m[%a0] : memref<100xf32>
   // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 0 to 1 at depth 1 = false}}
   %a1 = affine.apply (d0) -> (d0 + 1) (%c1)
-  %v0 = load %m[%a1] : memref<100xf32>
+  %v0 = affine.load %m[%a1] : memref<100xf32>
   // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 1 to 1 at depth 1 = false}}
   return
@@ -183,11 +183,11 @@
   %c9 = constant 9 : index
   %c11 = constant 11 : index
   %a0 = affine.apply (d0) -> (d0 + 1) (%c9)
-  store %c7, %m[%a0] : memref<100xf32>
+  affine.store %c7, %m[%a0] : memref<100xf32>
   // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 0 to 1 at depth 1 = true}}
   %a1 = affine.apply (d0) -> (d0 - 1) (%c11)
-  %v0 = load %m[%a1] : memref<100xf32>
+  %v0 = affine.load %m[%a1] : memref<100xf32>
   // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 1 to 1 at depth 1 = false}}
   return
@@ -199,11 +199,11 @@
   %m = alloc() : memref<100xf32>
   %c7 = constant 7.0 : f32
   %a0 = affine.apply (d0) -> (d0) (%arg0)
-  store %c7, %m[%a0] : memref<100xf32>
+  affine.store %c7, %m[%a0] : memref<100xf32>
   // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 0 to 1 at depth 1 = true}}
   %a1 = affine.apply (d0) -> (d0) (%arg0)
-  %v0 = load %m[%a1] : memref<100xf32>
+  %v0 = affine.load %m[%a1] : memref<100xf32>
   // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 1 to 1 at depth 1 = false}}
   return
@@ -215,11 +215,11 @@
   %m = alloc() : memref<100xf32>
   %c7 = constant 7.0 : f32
   %a0 = affine.apply (d0) -> (d0) (%arg0)
-  store %c7, %m[%a0] : memref<100xf32>
+  affine.store %c7, %m[%a0] : memref<100xf32>
   // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 0 to 1 at depth 1 = false}}
   %a1 = affine.apply (d0) -> (d0 + 1) (%arg0)
-  %v0 = load %m[%a1] : memref<100xf32>
+  %v0 = affine.load %m[%a1] : memref<100xf32>
   // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
   // expected-remark@-2 {{dependence from 1 to 1 at depth 1 = false}}
   return
@@ -233,13 +233,13 @@
   %c10 = constant 10 : index
   affine.for %i0 = 0 to 10 {
     %a0 = affine.apply (d0) -> (d0) (%i0)
-    store %c7, %m[%a0] : memref<100xf32>
+    affine.store %c7, %m[%a0] : memref<100xf32>
     // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 0 to 1 at depth 1 = false}}
     // expected-remark@-4 {{dependence from 0 to 1 at depth 2 = false}}
     %a1 = affine.apply (d0) -> (d0) (%c10)
-    %v0 = load %m[%a1] : memref<100xf32>
+    %v0 = affine.load %m[%a1] : memref<100xf32>
     // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 1 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 1 to 1 at depth 1 = false}}
@@ -256,13 +256,13 @@
   %c10 = constant 10 : index
   affine.for %i0 = 0 to %arg1 {
     %a0 = affine.apply (d0) -> (d0) (%arg0)
-    store %c7, %m[%a0] : memref<100xf32>
+    affine.store %c7, %m[%a0] : memref<100xf32>
     // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = [1, +inf]}}
     // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 0 to 1 at depth 1 = [1, +inf]}}
     // expected-remark@-4 {{dependence from 0 to 1 at depth 2 = true}}
     %a1 = affine.apply (d0) -> (d0) (%arg0)
-    %v0 = load %m[%a1] : memref<100xf32>
+    %v0 = affine.load %m[%a1] : memref<100xf32>
     // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = [1, +inf]}}
     // expected-remark@-2 {{dependence from 1 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 1 to 1 at depth 1 = false}}
@@ -282,7 +282,7 @@
     // For dependence from 0 to 1, we do not have a loop carried dependence
     // because only the final write in the loop accesses the same element as the
     // load, so this dependence appears only at depth 2 (loop independent).
-    store %c7, %m[%a0] : memref<100xf32>
+    affine.store %c7, %m[%a0] : memref<100xf32>
     // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 0 to 1 at depth 1 = false}}
@@ -290,7 +290,7 @@
     %a1 = affine.apply (d0) -> (d0 - 1) (%c10)
     // For dependence from 1 to 0, we have write-after-read (WAR) dependences
     // for all loads in the loop to the store on the last iteration.
-    %v0 = load %m[%a1] : memref<100xf32>
+    %v0 = affine.load %m[%a1] : memref<100xf32>
     // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = [1, 9]}}
     // expected-remark@-2 {{dependence from 1 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 1 to 1 at depth 1 = false}}
@@ -307,13 +307,13 @@
   %c0 = constant 0 : index
   affine.for %i0 = 1 to 11 {
     %a0 = affine.apply (d0) -> (d0) (%i0)
-    store %c7, %m[%a0] : memref<100xf32>
+    affine.store %c7, %m[%a0] : memref<100xf32>
     // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 0 to 1 at depth 1 = false}}
     // expected-remark@-4 {{dependence from 0 to 1 at depth 2 = false}}
     %a1 = affine.apply (d0) -> (d0) (%c0)
-    %v0 = load %m[%a1] : memref<100xf32>
+    %v0 = affine.load %m[%a1] : memref<100xf32>
     // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 1 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 1 to 1 at depth 1 = false}}
@@ -333,13 +333,13 @@
     // Dependence from 0 to 1 at depth 1 is a range because all loads at
     // constant index zero are reads after first store at index zero during
     // first iteration of the loop.
-    store %c7, %m[%a0] : memref<100xf32>
+    affine.store %c7, %m[%a0] : memref<100xf32>
     // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 0 to 1 at depth 1 = [1, 9]}}
     // expected-remark@-4 {{dependence from 0 to 1 at depth 2 = true}}
     %a1 = affine.apply (d0) -> (d0 + 1) (%c0)
-    %v0 = load %m[%a1] : memref<100xf32>
+    %v0 = affine.load %m[%a1] : memref<100xf32>
     // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 1 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 1 to 1 at depth 1 = false}}
@@ -355,13 +355,13 @@
   %c7 = constant 7.0 : f32
   affine.for %i0 = 1 to 11 {
     %a0 = affine.apply (d0) -> (d0 + 3) (%i0)
-    store %c7, %m[%a0] : memref<100xf32>
+    affine.store %c7, %m[%a0] : memref<100xf32>
     // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 0 to 1 at depth 1 = [3, 3]}}
     // expected-remark@-4 {{dependence from 0 to 1 at depth 2 = false}}
     %a1 = affine.apply (d0) -> (d0) (%i0)
-    %v0 = load %m[%a1] : memref<100xf32>
+    %v0 = affine.load %m[%a1] : memref<100xf32>
     // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 1 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 1 to 1 at depth 1 = false}}
@@ -377,13 +377,13 @@
   %c7 = constant 7.0 : f32
   affine.for %i0 = 2 to 11 {
     %a0 = affine.apply (d0) -> (d0) (%i0)
-    store %c7, %m[%a0] : memref<100xf32>
+    affine.store %c7, %m[%a0] : memref<100xf32>
     // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 0 to 1 at depth 1 = [2, 2]}}
     // expected-remark@-4 {{dependence from 0 to 1 at depth 2 = false}}
     %a1 = affine.apply (d0) -> (d0 - 2) (%i0)
-    %v0 = load %m[%a1] : memref<100xf32>
+    %v0 = affine.load %m[%a1] : memref<100xf32>
     // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 1 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 1 to 1 at depth 1 = false}}
@@ -402,7 +402,7 @@
       // Dependence from access 0 to 1 is loop independent at depth = 3.
       %a00 = affine.apply (d0, d1) -> (d0) (%i0, %i1)
       %a01 = affine.apply (d0, d1) -> (d1) (%i0, %i1)
-      store %c7, %m[%a00, %a01] : memref<10x10xf32>
+      affine.store %c7, %m[%a00, %a01] : memref<10x10xf32>
       // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
       // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
       // expected-remark@-3 {{dependence from 0 to 0 at depth 3 = false}}
@@ -411,7 +411,7 @@
       // expected-remark@-6 {{dependence from 0 to 1 at depth 3 = true}}
       %a10 = affine.apply (d0, d1) -> (d0) (%i0, %i1)
       %a11 = affine.apply (d0, d1) -> (d1) (%i0, %i1)
-      %v0 = load %m[%a10, %a11] : memref<10x10xf32>
+      %v0 = affine.load %m[%a10, %a11] : memref<10x10xf32>
       // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
       // expected-remark@-2 {{dependence from 1 to 0 at depth 2 = false}}
       // expected-remark@-3 {{dependence from 1 to 0 at depth 3 = false}}
@@ -433,7 +433,7 @@
       // Dependence from access 0 to 1 is loop carried at depth 1.
       %a00 = affine.apply (d0, d1) -> (d0) (%i0, %i1)
       %a01 = affine.apply (d0, d1) -> (d1) (%i0, %i1)
-      store %c7, %m[%a00, %a01] : memref<10x10xf32>
+      affine.store %c7, %m[%a00, %a01] : memref<10x10xf32>
       // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
       // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
       // expected-remark@-3 {{dependence from 0 to 0 at depth 3 = false}}
@@ -442,7 +442,7 @@
       // expected-remark@-6 {{dependence from 0 to 1 at depth 3 = false}}
       %a10 = affine.apply (d0, d1) -> (d0 - 2) (%i0, %i1)
       %a11 = affine.apply (d0, d1) -> (d1) (%i0, %i1)
-      %v0 = load %m[%a10, %a11] : memref<10x10xf32>
+      %v0 = affine.load %m[%a10, %a11] : memref<10x10xf32>
       // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
       // expected-remark@-2 {{dependence from 1 to 0 at depth 2 = false}}
       // expected-remark@-3 {{dependence from 1 to 0 at depth 3 = false}}
@@ -464,7 +464,7 @@
       // Dependence from access 0 to 1 is loop carried at depth 2.
       %a00 = affine.apply (d0, d1) -> (d0) (%i0, %i1)
       %a01 = affine.apply (d0, d1) -> (d1) (%i0, %i1)
-      store %c7, %m[%a00, %a01] : memref<10x10xf32>
+      affine.store %c7, %m[%a00, %a01] : memref<10x10xf32>
       // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
       // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
       // expected-remark@-3 {{dependence from 0 to 0 at depth 3 = false}}
@@ -473,7 +473,7 @@
       // expected-remark@-6 {{dependence from 0 to 1 at depth 3 = false}}
       %a10 = affine.apply (d0, d1) -> (d0) (%i0, %i1)
       %a11 = affine.apply (d0, d1) -> (d1 - 3) (%i0, %i1)
-      %v0 = load %m[%a10, %a11] : memref<10x10xf32>
+      %v0 = affine.load %m[%a10, %a11] : memref<10x10xf32>
       // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
       // expected-remark@-2 {{dependence from 1 to 0 at depth 2 = false}}
       // expected-remark@-3 {{dependence from 1 to 0 at depth 3 = false}}
@@ -495,7 +495,7 @@
     affine.for %i1 = 0 to 10 {
       %a00 = affine.apply (d0, d1) -> (d0) (%i0, %i1)
       %a01 = affine.apply (d0, d1) -> (d1) (%i0, %i1)
-      store %c7, %m[%a00, %a01] : memref<10x10xf32>
+      affine.store %c7, %m[%a00, %a01] : memref<10x10xf32>
       // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
       // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
       // expected-remark@-3 {{dependence from 0 to 0 at depth 3 = false}}
@@ -505,7 +505,7 @@
     affine.for %i2 = 0 to 9 {
       %a10 = affine.apply (d0, d1) -> (d0) (%i0, %i2)
       %a11 = affine.apply (d0, d1) -> (d1) (%i0, %i2)
-      %v0 = load %m[%a10, %a11] : memref<10x10xf32>
+      %v0 = affine.load %m[%a10, %a11] : memref<10x10xf32>
       // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
       // expected-remark@-2 {{dependence from 1 to 0 at depth 2 = false}}
       // expected-remark@-3 {{dependence from 1 to 1 at depth 1 = false}}
@@ -527,7 +527,7 @@
   // *) loop-carried dependence from access 3 to 0 at depth 1.
   affine.for %i0 = 0 to 9 {
     %a0 = affine.apply (d0) -> (d0) (%i0)
-    %v0 = load %m.a[%a0] : memref<100xf32>
+    %v0 = affine.load %m.a[%a0] : memref<100xf32>
     // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 0 to 1 at depth 1 = false}}
@@ -537,7 +537,7 @@
     // expected-remark@-7 {{dependence from 0 to 3 at depth 1 = false}}
     // expected-remark@-8 {{dependence from 0 to 3 at depth 2 = false}}
     %a1 = affine.apply (d0) -> (d0) (%i0)
-    store %v0, %m.b[%a1] : memref<100xf32>
+    affine.store %v0, %m.b[%a1] : memref<100xf32>
     // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 1 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 1 to 1 at depth 1 = false}}
@@ -547,7 +547,7 @@
     // expected-remark@-7 {{dependence from 1 to 3 at depth 1 = false}}
     // expected-remark@-8 {{dependence from 1 to 3 at depth 2 = false}}
     %a2 = affine.apply (d0) -> (d0) (%i0)
-    %v1 = load %m.b[%a2] : memref<100xf32>
+    %v1 = affine.load %m.b[%a2] : memref<100xf32>
     // expected-remark@-1 {{dependence from 2 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 2 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 2 to 1 at depth 1 = false}}
@@ -557,7 +557,7 @@
     // expected-remark@-7 {{dependence from 2 to 3 at depth 1 = false}}
     // expected-remark@-8 {{dependence from 2 to 3 at depth 2 = false}}
     %a3 = affine.apply (d0) -> (d0 + 1) (%i0)
-    store %v1, %m.a[%a3] : memref<100xf32>
+    affine.store %v1, %m.a[%a3] : memref<100xf32>
     // expected-remark@-1 {{dependence from 3 to 0 at depth 1 = [1, 1]}}
     // expected-remark@-2 {{dependence from 3 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 3 to 1 at depth 1 = false}}
@@ -579,7 +579,7 @@
     affine.for %i1 = 0 to %arg1 {
       %a00 = affine.apply (d0, d1) -> (d0 - 1) (%i0, %i1)
       %a01 = affine.apply (d0, d1) -> (d1 + 1) (%i0, %i1)
-      %v0 = load %m[%a00, %a01] : memref<10x10xf32>
+      %v0 = affine.load %m[%a00, %a01] : memref<10x10xf32>
       // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
       // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
       // expected-remark@-3 {{dependence from 0 to 0 at depth 3 = false}}
@@ -588,7 +588,7 @@
       // expected-remark@-6 {{dependence from 0 to 1 at depth 3 = false}}
       %a10 = affine.apply (d0, d1) -> (d0) (%i0, %i1)
       %a11 = affine.apply (d0, d1) -> (d1) (%i0, %i1)
-      store %c7, %m[%a10, %a11] : memref<10x10xf32>
+      affine.store %c7, %m[%a10, %a11] : memref<10x10xf32>
       // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = [1, 1][-1, -1]}}
       // expected-remark@-2 {{dependence from 1 to 0 at depth 2 = false}}
       // expected-remark@-3 {{dependence from 1 to 0 at depth 3 = false}}
@@ -608,7 +608,7 @@
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = 0 to 10 {
       %a0 = affine.apply (d0) -> (d0 + 1) (%i1)
-      %v0 = load %m[%a0] : memref<100xf32>
+      %v0 = affine.load %m[%a0] : memref<100xf32>
       // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
       // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
       // expected-remark@-3 {{dependence from 0 to 0 at depth 3 = false}}
@@ -616,7 +616,7 @@
       // expected-remark@-5 {{dependence from 0 to 1 at depth 2 = [0, 0][1, 1]}}
       // expected-remark@-6 {{dependence from 0 to 1 at depth 3 = false}}
       %a1 = affine.apply (d0) -> (d0) (%i1)
-      store %c7, %m[%a1] : memref<100xf32>
+      affine.store %c7, %m[%a1] : memref<100xf32>
       // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = [1, 9][-1, -1]}}
       // expected-remark@-2 {{dependence from 1 to 0 at depth 2 = false}}
       // expected-remark@-3 {{dependence from 1 to 0 at depth 3 = false}}
@@ -637,13 +637,13 @@
     %a0 = affine.apply (d0) -> (d0 mod 2) (%i0)
     // Results are conservative here since we currently don't have a way to
     // represent strided sets in FlatAffineConstraints.
-    %v0 = load %m[%a0] : memref<100xf32>
+    %v0 = affine.load %m[%a0] : memref<100xf32>
     // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 0 to 1 at depth 1 = [1, 9]}}
     // expected-remark@-4 {{dependence from 0 to 1 at depth 2 = false}}
     %a1 = affine.apply (d0) -> ( (d0 + 1) mod 2) (%i0)
-    store %c7, %m[%a1] : memref<100xf32>
+    affine.store %c7, %m[%a1] : memref<100xf32>
     // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = [1, 9]}}
     // expected-remark@-2 {{dependence from 1 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 1 to 1 at depth 1 = [2, 9]}}
@@ -660,7 +660,7 @@
 
   affine.for %i0 = 0 to 128 {
     affine.for %i1 = 0 to 8 {
-      store %c7, %0[%i0, %i1] : memref<100x100xf32>
+      affine.store %c7, %0[%i0, %i1] : memref<100x100xf32>
       // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
       // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
       // expected-remark@-3 {{dependence from 0 to 0 at depth 3 = false}}
@@ -672,7 +672,7 @@
       affine.for %i4 = 0 to 8 {
         affine.for %i5 = 0 to 16 {
           %8 = affine.apply (d0, d1) -> (d0 * 16 + d1)(%i4, %i5)
-          %9 = load %0[%8, %i3] : memref<100x100xf32>
+          %9 = affine.load %0[%8, %i3] : memref<100x100xf32>
           // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
           // expected-remark@-2 {{dependence from 1 to 1 at depth 1 = false}}
           // expected-remark@-3 {{dependence from 1 to 1 at depth 2 = false}}
@@ -699,7 +699,7 @@
         %idx0 = affine.apply (d0, d1, d2) -> (d0 floordiv 4) (%i0, %i1, %i2)
         %idx1 = affine.apply (d0, d1, d2) -> (d1 mod 2) (%i0, %i1, %i2)
         %idx2 = affine.apply (d0, d1, d2) -> (d2 floordiv 4) (%i0, %i1, %i2)
-        store %c0, %M[%idx0, %idx1, %idx2] : memref<2 x 2 x 2 x i32>
+        affine.store %c0, %M[%idx0, %idx1, %idx2] : memref<2 x 2 x 2 x i32>
         // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = [1, 3][-7, 7][-3, 3]}}
         // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = [0, 0][2, 7][-3, 3]}}
         // expected-remark@-3 {{dependence from 0 to 0 at depth 3 = [0, 0][0, 0][1, 3]}}
@@ -725,7 +725,7 @@
         affine.for %i3 = 0 to 3 {
           affine.for %i4 = 0 to 16 {
             affine.for %i5 = 0 to 1 {
-              store %val, %in[%i0, %i1, %i2, %i3, %i4, %i5] : memref<2x2x3x3x16x1xi32>
+              affine.store %val, %in[%i0, %i1, %i2, %i3, %i4, %i5] : memref<2x2x3x3x16x1xi32>
 // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
 // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
 // expected-remark@-3 {{dependence from 0 to 0 at depth 3 = false}}
@@ -758,7 +758,7 @@
       %a15 = affine.apply (d0) ->
         ((((((d0 mod 294912) mod 147456) mod 1152) mod 384) mod 128)
           floordiv 128) (%a0)
-      %v0 = load %in[%a10, %a11, %a13, %a14, %a12, %a15] : memref<2x2x3x3x16x1xi32>
+      %v0 = affine.load %in[%a10, %a11, %a13, %a14, %a12, %a15] : memref<2x2x3x3x16x1xi32>
 // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
 // expected-remark@-2 {{dependence from 1 to 1 at depth 1 = false}}
 // expected-remark@-3 {{dependence from 1 to 1 at depth 2 = false}}
@@ -768,7 +768,7 @@
 // expected-remark@-7 {{dependence from 1 to 2 at depth 3 = false}}
 // TODO(andydavis): the dep tester shouldn't be printing out these messages
 // below; they are redundant.
-      store %v0, %out[%ii, %jj] : memref<64x9xi32>
+      affine.store %v0, %out[%ii, %jj] : memref<64x9xi32>
 // expected-remark@-1 {{dependence from 2 to 0 at depth 1 = false}}
 // expected-remark@-2 {{dependence from 2 to 1 at depth 1 = false}}
 // expected-remark@-3 {{dependence from 2 to 1 at depth 2 = false}}
@@ -791,12 +791,12 @@
   %0 = alloc() : memref<10xf32>
   %cf0 = constant 0.0 : f32
   affine.for %i0 = 0 to 8 step 2 {
-    store %cf0, %0[%i0] : memref<10xf32>
+    affine.store %cf0, %0[%i0] : memref<10xf32>
     // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 0 to 1 at depth 1 = false}}
     // expected-remark@-4 {{dependence from 0 to 1 at depth 2 = true}}
-    %v0 = load %0[%i0] : memref<10xf32>
+    %v0 = affine.load %0[%i0] : memref<10xf32>
     // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 1 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 1 to 1 at depth 1 = false}}
@@ -814,12 +814,12 @@
   %cf0 = constant 0.0 : f32
   affine.for %i0 = 0 to 8 step 2 {
     %a0 = affine.apply (d0) -> (d0 + 1)(%i0)
-    store %cf0, %0[%a0] : memref<10xf32>
+    affine.store %cf0, %0[%a0] : memref<10xf32>
     // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 0 to 1 at depth 1 = false}}
     // expected-remark@-4 {{dependence from 0 to 1 at depth 2 = false}}
-    %v0 = load %0[%i0] : memref<10xf32>
+    %v0 = affine.load %0[%i0] : memref<10xf32>
     // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 1 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 1 to 1 at depth 1 = false}}
@@ -830,19 +830,19 @@
 
 // -----
 
-// Store op accesses memref elements at offset causing loop-carried dependence.
+// Affine.Store op accesses memref elements at offset causing loop-carried dependence.
 // CHECK-LABEL: func @strided_loop_with_loop_carried_dependence_at_depth1
 func @strided_loop_with_loop_carried_dependence_at_depth1() {
   %0 = alloc() : memref<10xf32>
   %cf0 = constant 0.0 : f32
   affine.for %i0 = 0 to 8 step 2 {
     %a0 = affine.apply (d0) -> (d0 + 4)(%i0)
-    store %cf0, %0[%a0] : memref<10xf32>
+    affine.store %cf0, %0[%a0] : memref<10xf32>
     // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 0 to 1 at depth 1 = [4, 4]}}
     // expected-remark@-4 {{dependence from 0 to 1 at depth 2 = false}}
-    %v0 = load %0[%i0] : memref<10xf32>
+    %v0 = affine.load %0[%i0] : memref<10xf32>
     // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 1 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 1 to 1 at depth 1 = false}}
@@ -861,13 +861,13 @@
   %cst = constant 7.000000e+00 : f32
   affine.for %i0 = 0 to 10 {
     %a0 = affine.apply (d0) -> (d0 - 1)(%i0)
-    store %cst, %0[%a0] : memref<100xf32>
+    affine.store %cst, %0[%a0] : memref<100xf32>
     // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 0 to 1 at depth 1 = false}}
     // expected-remark@-4 {{dependence from 0 to 1 at depth 2 = false}}
     affine.for %i1 = (d0) -> (d0)(%i0) to (d0) -> (d0 + 1)(%i0) {
-      %1 = load %0[%i1] : memref<100xf32>
+      %1 = affine.load %0[%i1] : memref<100xf32>
       // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = [1, 1]}}
       // expected-remark@-2 {{dependence from 1 to 0 at depth 2 = false}}
       // expected-remark@-3 {{dependence from 1 to 1 at depth 1 = false}}
@@ -888,7 +888,7 @@
   %cst = constant 7.000000e+00 : f32
   affine.for %i0 = 0 to 10 {
     affine.for %i1 = (d0) -> (d0)(%i0) to (d0) -> (d0 + 1)(%i0) {
-      store %cst, %0[%i1] : memref<100xf32>
+      affine.store %cst, %0[%i1] : memref<100xf32>
       // expected-remark@-1 {{dependence from 0 to 0 at depth 1 = false}}
       // expected-remark@-2 {{dependence from 0 to 0 at depth 2 = false}}
       // expected-remark@-3 {{dependence from 0 to 0 at depth 3 = false}}
@@ -896,7 +896,7 @@
       // expected-remark@-5 {{dependence from 0 to 1 at depth 2 = false}}
     }
     %a0 = affine.apply (d0) -> (d0 - 2)(%i0)
-    %1 = load %0[%a0] : memref<100xf32>
+    %1 = affine.load %0[%a0] : memref<100xf32>
     // expected-remark@-1 {{dependence from 1 to 0 at depth 1 = false}}
     // expected-remark@-2 {{dependence from 1 to 0 at depth 2 = false}}
     // expected-remark@-3 {{dependence from 1 to 1 at depth 1 = false}}
diff --git a/mlir/test/Transforms/parallelism-detection.mlir b/mlir/test/Transforms/parallelism-detection.mlir
index 6ea6cb5..c6aa4ba 100644
--- a/mlir/test/Transforms/parallelism-detection.mlir
+++ b/mlir/test/Transforms/parallelism-detection.mlir
@@ -10,12 +10,12 @@
     affine.for %j = 0 to %N {
       // expected-remark@-1 {{parallel loop}}
       affine.for %k = 0 to %N {
-        %5 = load %0[%i, %k] : memref<1024x1024xvector<64xf32>>
-        %6 = load %1[%k, %j] : memref<1024x1024xvector<64xf32>>
-        %7 = load %2[%i, %j] : memref<1024x1024xvector<64xf32>>
+        %5 = affine.load %0[%i, %k] : memref<1024x1024xvector<64xf32>>
+        %6 = affine.load %1[%k, %j] : memref<1024x1024xvector<64xf32>>
+        %7 = affine.load %2[%i, %j] : memref<1024x1024xvector<64xf32>>
         %8 = mulf %5, %6 : vector<64xf32>
         %9 = addf %7, %8 : vector<64xf32>
-        store %9, %2[%i, %j] : memref<1024x1024xvector<64xf32>>
+        affine.store %9, %2[%i, %j] : memref<1024x1024xvector<64xf32>>
       }
     }
   }
diff --git a/mlir/test/Transforms/pipeline-data-transfer.mlir b/mlir/test/Transforms/pipeline-data-transfer.mlir
index 30e6be8..6708282 100644
--- a/mlir/test/Transforms/pipeline-data-transfer.mlir
+++ b/mlir/test/Transforms/pipeline-data-transfer.mlir
@@ -1,8 +1,8 @@
-// RUN: mlir-opt %s -affine-pipeline-data-transfer | FileCheck %s
+// RUN: mlir-opt %s -split-input-file -affine-pipeline-data-transfer | FileCheck %s
+
+// -----
 
 // CHECK-DAG: [[MOD_2:#map[0-9]+]] = (d0) -> (d0 mod 2)
-// CHECK-DAG: [[FLOOR_MOD_2:#map[0-9]+]] = (d0) -> ((d0 floordiv 4) mod 2)
-// CHECK-DAG: [[REMAP_SHIFT_MINUS_4:#map[0-9]+]] = (d0) -> (d0 - 4)
 // CHECK-DAG: [[MAP_MINUS_1:#map[0-9]+]] = (d0) -> (d0 - 1)
 
 // CHECK-LABEL: func @loop_nest_dma() {
@@ -17,11 +17,11 @@
   %num_elts = constant 128 : index
 
   affine.for %i = 0 to 8 {
-    dma_start %A[%i], %Ah[%i], %num_elts, %tag[%zero] : memref<256 x f32>, memref<32 x f32, 1>, memref<1 x f32>
-    dma_wait %tag[%zero], %num_elts : memref<1 x f32>
-    %v = load %Ah[%i] : memref<32 x f32, (d0) -> (d0), 1>
+    affine.dma_start %A[%i], %Ah[%i], %tag[%zero], %num_elts : memref<256 x f32>, memref<32 x f32, 1>, memref<1 x f32>
+    affine.dma_wait %tag[%zero], %num_elts : memref<1 x f32>
+    %v = affine.load %Ah[%i] : memref<32 x f32, (d0) -> (d0), 1>
     %r = "compute"(%v) : (f32) -> (f32)
-    store %r, %Ah[%i] : memref<32 x f32, (d0) -> (d0), 1>
+    affine.store %r, %Ah[%i] : memref<32 x f32, (d0) -> (d0), 1>
     affine.for %j = 0 to 128 {
       "do_more_compute"(%i, %j) : (index, index) -> ()
     }
@@ -31,39 +31,39 @@
 // CHECK:       %0 = alloc() : memref<256xf32>
 // CHECK:       %1 = alloc() : memref<2x32xf32, 1>
 // CHECK-NEXT:  %2 = alloc() : memref<2x1xf32>
-// CHECK-NEXT:  %3 = affine.apply [[MOD_2]](%c0)
-// CHECK-NEXT:  %4 = affine.apply [[MOD_2]](%c0)
-// CHECK-NEXT:  dma_start %0[%c0], %1[%3, %c0], %c128, %2[%4, %c0_0] : memref<256xf32>, memref<2x32xf32, 1>, memref<2x1xf32>
+// CHECK-NEXT:  affine.dma_start %0[%c0], %1[%c0 mod 2, %c0], %2[%c0 mod 2, symbol(%c0_0)], %c128 : memref<256xf32>, memref<2x32xf32, 1>, memref<2x1xf32>
 // CHECK-NEXT:  affine.for %i0 = 1 to 8 {
-// CHECK-NEXT:    %5 = affine.apply [[MOD_2]](%i0)
-// CHECK-NEXT:    %6 = affine.apply [[MOD_2]](%i0)
-// CHECK-NEXT:    dma_start %0[%i0], %1[%5, %i0], %c128, %2[%6, %c0_0] : memref<256xf32>, memref<2x32xf32, 1>, memref<2x1xf32>
-// CHECK-NEXT:    %7 = affine.apply [[MAP_MINUS_1]](%i0)
-// CHECK-NEXT:    %8 = affine.apply [[MOD_2]](%7)
-// CHECK-NEXT:    %9 = affine.apply [[MOD_2]](%7)
-// CHECK-NEXT:    dma_wait %2[%8, %c0_0], %c128 : memref<2x1xf32>
-// CHECK-NEXT:    %10 = load %1[%9, %7] : memref<2x32xf32, 1>
-// CHECK-NEXT:    %11 = "compute"(%10) : (f32) -> f32
-// CHECK-NEXT:    store %11, %1[%9, %7] : memref<2x32xf32, 1>
+// CHECK-NEXT:    affine.dma_start %0[%i0], %1[%i0 mod 2, %i0], %2[%i0 mod 2, symbol(%c0_0)], %c128 : memref<256xf32>, memref<2x32xf32, 1>, memref<2x1xf32>
+// CHECK-NEXT:    %3 = affine.apply [[MAP_MINUS_1]](%i0)
+// CHECK-NEXT:    %4 = affine.apply [[MOD_2]](%3)
+// CHECK-NEXT:    %5 = affine.apply [[MOD_2]](%3)
+// CHECK-NEXT:    affine.dma_wait %2[%3 mod 2, symbol(%c0_0)], %c128 : memref<2x1xf32>
+// CHECK-NEXT:    %6 = affine.load %1[%3 mod 2, %3] : memref<2x32xf32, 1>
+// CHECK-NEXT:    %7 = "compute"(%6) : (f32) -> f32
+// CHECK-NEXT:    affine.store %7, %1[%3 mod 2, %3] : memref<2x32xf32, 1>
 // CHECK-NEXT:    affine.for %i1 = 0 to 128 {
-// CHECK-NEXT:      "do_more_compute"(%7, %i1) : (index, index) -> ()
+// CHECK-NEXT:      "do_more_compute"(%3, %i1) : (index, index) -> ()
 // CHECK-NEXT:    }
 // CHECK-NEXT:  }
-// CHECK-NEXT:  %12 = affine.apply [[MAP_MINUS_1]](%c8)
-// CHECK-NEXT:  %13 = affine.apply [[MOD_2]](%12)
-// CHECK-NEXT:  %14 = affine.apply [[MOD_2]](%12)
-// CHECK-NEXT:  dma_wait %2[%13, %c0_0], %c128 : memref<2x1xf32>
-// CHECK-NEXT:  %15 = load %1[%14, %12] : memref<2x32xf32, 1>
-// CHECK-NEXT:  %16 = "compute"(%15) : (f32) -> f32
-// CHECK-NEXT:  store %16, %1[%14, %12] : memref<2x32xf32, 1>
+// CHECK-NEXT:  %8 = affine.apply [[MAP_MINUS_1]](%c8)
+// CHECK-NEXT:  %9 = affine.apply [[MOD_2]](%8)
+// CHECK-NEXT:  %10 = affine.apply [[MOD_2]](%8)
+// CHECK-NEXT:  affine.dma_wait %2[%8 mod 2, symbol(%c0_0)], %c128 : memref<2x1xf32>
+// CHECK-NEXT:  %11 = affine.load %1[%8 mod 2, %8] : memref<2x32xf32, 1>
+// CHECK-NEXT:  %12 = "compute"(%11) : (f32) -> f32
+// CHECK-NEXT:  affine.store %12, %1[%8 mod 2, %8] : memref<2x32xf32, 1>
 // CHECK-NEXT:  affine.for %i2 = 0 to 128 {
-// CHECK-NEXT:    "do_more_compute"(%12, %i2) : (index, index) -> ()
+// CHECK-NEXT:    "do_more_compute"(%8, %i2) : (index, index) -> ()
 // CHECK-NEXT:  }
 // CHECK-NEXT:  dealloc %2 : memref<2x1xf32>
 // CHECK-NEXT:  dealloc %1 : memref<2x32xf32, 1>
 // CHECK-NEXT:  return
 // CHECK-NEXT:}
 
+// -----
+
+// CHECK-DAG: [[FLOOR_MOD_2:#map[0-9]+]] = (d0) -> ((d0 floordiv 4) mod 2)
+// CHECK-DAG: [[REMAP_SHIFT_MINUS_4:#map[0-9]+]] = (d0) -> (d0 - 4)
 
 // CHECK-LABEL: @loop_step
 func @loop_step(%arg0: memref<512xf32>,
@@ -73,33 +73,31 @@
   affine.for %i0 = 0 to 512 step 4 {
     %1 = alloc() : memref<4xf32, 1>
     %2 = alloc() : memref<1xi32>
-    dma_start %arg0[%i0], %1[%c0], %c4, %2[%c0]
+    affine.dma_start %arg0[%i0], %1[%c0], %2[%c0], %c4,
               : memref<512xf32>, memref<4xf32, 1>, memref<1xi32>
-    dma_wait %2[%c0], %c4 : memref<1xi32>
+    affine.dma_wait %2[%c0], %c4 : memref<1xi32>
     "compute"(%i0) : (index) -> ()
   }
   return
 }
 // CHECK:        [[TAG:%[0-9]+]] = alloc() : memref<2x1xi32>
-// CHECK:        %2 = affine.apply [[FLOOR_MOD_2]](%c0)
-// CHECK:        %3 = affine.apply [[FLOOR_MOD_2]](%c0)
-// CHECK-NEXT:   dma_start %arg0[%c0], %0[%2, %c0_0], %c4, [[TAG]][%3, %c0_0] : memref<512xf32>, memref<2x4xf32, 1>, memref<2x1xi32>
+// CHECK-NEXT:   affine.dma_start %arg0[%c0], %0[(%c0 floordiv 4) mod 2, symbol(%c0_0)], [[TAG]][(%c0 floordiv 4) mod 2, symbol(%c0_0)], %c4 : memref<512xf32>, memref<2x4xf32, 1>, memref<2x1xi32>
 // CHECK-NEXT:   affine.for %i0 = 4 to 512 step 4 {
-// CHECK-NEXT:     %4 = affine.apply [[FLOOR_MOD_2]](%i0)
-// CHECK-NEXT:     %5 = affine.apply [[FLOOR_MOD_2]](%i0)
-// CHECK-NEXT:     dma_start %arg0[%i0], %0[%4, %c0_0], %c4, [[TAG]][%5, %c0_0] : memref<512xf32>, memref<2x4xf32, 1>, memref<2x1xi32>
-// CHECK-NEXT:     %6 = affine.apply [[REMAP_SHIFT_MINUS_4]](%i0)
-// CHECK-NEXT:     %7 = affine.apply [[FLOOR_MOD_2]](%6)
-// CHECK:          dma_wait [[TAG]][%7, %c0_0], %c4 : memref<2x1xi32>
-// CHECK-NEXT:     "compute"(%6) : (index) -> ()
+// CHECK-NEXT:     affine.dma_start %arg0[%i0], %0[(%i0 floordiv 4) mod 2, symbol(%c0_0)], [[TAG]][(%i0 floordiv 4) mod 2, symbol(%c0_0)], %c4 : memref<512xf32>, memref<2x4xf32, 1>, memref<2x1xi32>
+// CHECK-NEXT:     %2 = affine.apply [[REMAP_SHIFT_MINUS_4]](%i0)
+// CHECK-NEXT:     %3 = affine.apply [[FLOOR_MOD_2]](%2)
+// CHECK:          affine.dma_wait [[TAG]][(%2 floordiv 4) mod 2, symbol(%c0_0)], %c4 : memref<2x1xi32>
+// CHECK-NEXT:     "compute"(%2) : (index) -> ()
 // CHECK-NEXT:   }
 // CHECK-NEXT:   [[SHIFTED:%[0-9]+]] = affine.apply [[REMAP_SHIFT_MINUS_4]](%c512)
-// CHECK-NEXT:   %10 = affine.apply [[FLOOR_MOD_2]]([[SHIFTED]])
-// CHECK:        dma_wait [[TAG]][%10, %c0_0], %c4 : memref<2x1xi32>
-// CHECK-NEXT:   "compute"(%9) : (index) -> ()
+// CHECK-NEXT:   %6 = affine.apply [[FLOOR_MOD_2]]([[SHIFTED]])
+// CHECK:        affine.dma_wait [[TAG]][(%5 floordiv 4) mod 2, symbol(%c0_0)], %c4 : memref<2x1xi32>
+// CHECK-NEXT:   "compute"(%5) : (index) -> ()
 // CHECK:        return
 // CHECK-NEXT: }
 
+// -----
+
 #map0 = (d0, d1) -> (d0, d1)
 #map1 = (d0, d1) -> ((d0 * 2048 + d1 * 256) floordiv 32)
 #map2 = (d0) -> ((d0 * 2048) floordiv 32)
@@ -116,65 +114,65 @@
   // Prologue for DMA overlap on arg2.
   // CHECK-DAG: [[BUF_ARG2:%[0-9]+]] = alloc() : memref<2x64x4xvector<8xf32>, 2>
   // CHECK-DAG: [[TAG_ARG2:%[0-9]+]] = alloc() : memref<2x2xi32>
-  // CHECK: dma_start %arg2[
+  // CHECK: affine.dma_start %arg2[
   // CHECK: affine.for %i0 = 1 to 8 {
   affine.for %i0 = 0 to 8 {
     %6 = affine.apply #map2(%i0)
-    dma_start %arg2[%6, %c0], %2[%c0, %c0], %num_elts, %5[%c0] : memref<512x32xvector<8xf32>, #map0>, memref<64x4xvector<8xf32>, #map0, 2>, memref<2xi32>
-    dma_wait %5[%c0], %num_elts : memref<2xi32>
+    affine.dma_start %arg2[%6, %c0], %2[%c0, %c0], %5[%c0], %num_elts : memref<512x32xvector<8xf32>, #map0>, memref<64x4xvector<8xf32>, #map0, 2>, memref<2xi32>
+    affine.dma_wait %5[%c0], %num_elts : memref<2xi32>
     // Steady state for DMA overlap on arg2
-    // CHECK: dma_start %arg2[
-    // CHECK: dma_wait [[TAG_ARG2]]
+    // CHECK: affine.dma_start %arg2[
+    // CHECK: affine.dma_wait [[TAG_ARG2]]
     // Prologue for DMA overlap on arg0, arg1 nested within i0
     // CHECK: [[BUF_ARG0:%[0-9]+]] = alloc() : memref<2x64x4xvector<8xf32>, 2>
     // CHECK: [[BUF_ARG1:%[0-9]+]] = alloc() : memref<2x64x4xvector<8xf32>, 2>
     // CHECK: [[TAG_ARG0:%[0-9]+]] = alloc() : memref<2x2xi32>
     // CHECK: [[TAG_ARG1:%[0-9]+]] = alloc() : memref<2x2xi32>
-    // CHECK: dma_start %arg0[
-    // CHECK: dma_start %arg1[
+    // CHECK: affine.dma_start %arg0[
+    // CHECK: affine.dma_start %arg1[
     // CHECK-NEXT affine.for %i1 = 1 to 8 {
     affine.for %i1 = 0 to 8 {
       %7 = affine.apply #map1(%i0, %i1)
       %8 = affine.apply #map2(%i1)
-      dma_start %arg0[%7, %c0], %0[%c0, %c0], %num_elts, %3[%c0] : memref<512x32xvector<8xf32>, #map0>, memref<64x4xvector<8xf32>, #map0, 2>, memref<2xi32>
-      dma_start %arg1[%8, %c0], %1[%c0, %c0], %num_elts, %4[%c0] : memref<512x32xvector<8xf32>, #map0>, memref<64x4xvector<8xf32>, #map0, 2>, memref<2xi32>
-      dma_wait %3[%c0], %num_elts : memref<2xi32>
-      dma_wait %4[%c0], %num_elts : memref<2xi32>
+      affine.dma_start %arg0[%7, %c0], %0[%c0, %c0], %3[%c0], %num_elts : memref<512x32xvector<8xf32>, #map0>, memref<64x4xvector<8xf32>, #map0, 2>, memref<2xi32>
+      affine.dma_start %arg1[%8, %c0], %1[%c0, %c0], %4[%c0], %num_elts : memref<512x32xvector<8xf32>, #map0>, memref<64x4xvector<8xf32>, #map0, 2>, memref<2xi32>
+      affine.dma_wait %3[%c0], %num_elts : memref<2xi32>
+      affine.dma_wait %4[%c0], %num_elts : memref<2xi32>
       // Steady state for DMA overlap on arg0, arg1
-      // CHECK: dma_start %arg0[
-      // CHECK: dma_start %arg1[
-      // CHECK: dma_wait [[TAG_ARG0]]
-      // CHECK: dma_wait [[TAG_ARG1]]
+      // CHECK: affine.dma_start %arg0[
+      // CHECK: affine.dma_start %arg1[
+      // CHECK: affine.dma_wait [[TAG_ARG0]]
+      // CHECK: affine.dma_wait [[TAG_ARG1]]
       // CHECK-NEXT: affine.for %i2 = 0 to 4 {
       affine.for %i2 = 0 to 4 {
         "foo"() : () -> ()
       }
     }
     // epilogue for arg0, arg1
-    // CHECK: dma_wait [[TAG_ARG0]]
-    // CHECK: dma_wait [[TAG_ARG1]]
+    // CHECK: affine.dma_wait [[TAG_ARG0]]
+    // CHECK: affine.dma_wait [[TAG_ARG1]]
     // CHECK-DAG:    dealloc [[TAG_ARG1]] : memref<2x2xi32>
     // CHECK-DAG:    dealloc [[TAG_ARG0]] : memref<2x2xi32>
     // CHECK-DAG:    dealloc [[BUF_ARG1]] : memref<2x64x4xvector<8xf32>, 2>
     // CHECK-DAG:    dealloc [[BUF_ARG0]] : memref<2x64x4xvector<8xf32>, 2>
   // epilogue for DMA overlap on %arg2
-  // CHECK:  dma_wait [[TAG_ARG2]]
+  // CHECK:  affine.dma_wait [[TAG_ARG2]]
   // Within the epilogue for arg2's DMA, we have the DMAs on %arg1, %arg2 nested.
   // CHECK: [[BUF_ARG0_NESTED:%[0-9]+]] = alloc() : memref<2x64x4xvector<8xf32>, 2>
   // CHECK: [[BUF_ARG1_NESTED:%[0-9]+]] = alloc() : memref<2x64x4xvector<8xf32>, 2>
   // CHECK: [[TAG_ARG0_NESTED:%[0-9]+]] = alloc() : memref<2x2xi32>
   // CHECK: [[TAG_ARG1_NESTED:%[0-9]+]] = alloc() : memref<2x2xi32>
-  // CHECK:  dma_start %arg0[
-  // CHECK:  dma_start %arg1[
+  // CHECK:  affine.dma_start %arg0[
+  // CHECK:  affine.dma_start %arg1[
   // CHECK:  affine.for %i4 = 1 to 8 {
-  // CHECK:    dma_start %arg0[
-  // CHECK:    dma_start %arg1[
-  // CHECK:    dma_wait [[TAG_ARG0_NESTED]]
-  // CHECK:    dma_wait [[TAG_ARG1_NESTED]]
+  // CHECK:    affine.dma_start %arg0[
+  // CHECK:    affine.dma_start %arg1[
+  // CHECK:    affine.dma_wait [[TAG_ARG0_NESTED]]
+  // CHECK:    affine.dma_wait [[TAG_ARG1_NESTED]]
   // CHECK:    affine.for %i5 = 0 to 4 {
   // CHECK:      "foo"() : () -> ()
-  // CHECK:  dma_wait [[TAG_ARG0_NESTED]]
-  // CHECK:  dma_wait [[TAG_ARG1_NESTED]]
+  // CHECK:  affine.dma_wait [[TAG_ARG0_NESTED]]
+  // CHECK:  affine.dma_wait [[TAG_ARG1_NESTED]]
   // CHECK:  affine.for %i6 = 0 to 4 {
   }
   return
@@ -188,6 +186,9 @@
 // CHECK:     return
 }
 
+// -----
+#map2 = (d0) -> ((d0 * 2048) floordiv 32)
+
 // CHECK: func @loop_dma_dependent
 func @loop_dma_dependent(%arg2: memref<512x32xvector<8xf32>>) {
   %num_elts = constant 256 : index
@@ -201,19 +202,21 @@
 
   // The two DMAs below are dependent (incoming and outgoing on the same
   // memref) in the same iteration; so no pipelining here.
-  // CHECK-NOT: dma_start
+  // CHECK-NOT: affine.dma_start
   // CHECK: affine.for %i0 = 0 to 8 {
   affine.for %i0 = 0 to 8 {
     %6 = affine.apply #map2(%i0)
-    dma_start %arg2[%6, %c0], %2[%c0, %c0], %num_elts, %5[%c0] : memref<512x32xvector<8xf32>>, memref<64x4xvector<8xf32>, 2>, memref<2xi32>
-    dma_wait %5[%c0], %num_elts : memref<2xi32>
+    affine.dma_start %arg2[%6, %c0], %2[%c0, %c0], %5[%c0], %num_elts : memref<512x32xvector<8xf32>>, memref<64x4xvector<8xf32>, 2>, memref<2xi32>
+    affine.dma_wait %5[%c0], %num_elts : memref<2xi32>
 
-    dma_start %2[%c0, %c0], %arg2[%6, %c0], %num_elts, %5[%c0] : memref<64x4xvector<8xf32>, 2>, memref<512x32xvector<8xf32>>, memref<2xi32>
-    dma_wait %5[%c0], %num_elts : memref<2xi32>
+    affine.dma_start %2[%c0, %c0], %arg2[%6, %c0], %5[%c0], %num_elts : memref<64x4xvector<8xf32>, 2>, memref<512x32xvector<8xf32>>, memref<2xi32>
+    affine.dma_wait %5[%c0], %num_elts : memref<2xi32>
   } // CHECK: }
   return // CHECK: return
 }
 
+// -----
+
 // CHECK-LABEL: func @escaping_use
 func @escaping_use(%arg0: memref<512 x 32 x f32>) {
   %c32 = constant 32 : index
@@ -222,13 +225,13 @@
   %Av = alloc() : memref<32 x 32 x f32, 2>
   %tag = alloc() : memref<1 x i32>
 
-  // CHECK-NOT: dma_start
+  // CHECK-NOT: affine.dma_start
   // CHECK: affine.for %i0 = 0 to 16 {
   affine.for %kTT = 0 to 16 {
-    dma_start %arg0[%zero, %zero], %Av[%zero, %zero], %num_elt, %tag[%zero] :
+    affine.dma_start %arg0[%zero, %zero], %Av[%zero, %zero], %tag[%zero], %num_elt :
       memref<512 x 32 x f32>,
       memref<32 x 32 x f32, 2>, memref<1 x i32>
-    dma_wait %tag[%zero], %num_elt : memref<1 x i32>
+    affine.dma_wait %tag[%zero], %num_elt : memref<1 x i32>
     // escaping use; no DMA pipelining / double buffering will be done.
     "foo"(%Av) : (memref<32 x 32 x f32, 2>) -> ()
   }
@@ -238,6 +241,8 @@
 // CHECK:      return
 }
 
+// -----
+
 // CHECK-LABEL: func @live_out_use
 func @live_out_use(%arg0: memref<512 x 32 x f32>) -> f32 {
   %c32 = constant 32 : index
@@ -246,21 +251,23 @@
   %Av = alloc() : memref<32 x 32 x f32, 2>
   %tag = alloc() : memref<1 x i32>
 
-  // CHECK-NOT: dma_start
+  // CHECK-NOT: affine.dma_start
   // CHECK: affine.for %i0 = 0 to 16 {
   affine.for %kTT = 0 to 16 {
-    dma_start %arg0[%zero, %zero], %Av[%zero, %zero], %num_elt, %tag[%zero] :
+    affine.dma_start %arg0[%zero, %zero], %Av[%zero, %zero], %tag[%zero], %num_elt :
       memref<512 x 32 x f32>,
       memref<32 x 32 x f32, 2>, memref<1 x i32>
-    dma_wait %tag[%zero], %num_elt : memref<1 x i32>
+    affine.dma_wait %tag[%zero], %num_elt : memref<1 x i32>
   }
   // Use live out of 'affine.for' op; no DMA pipelining will be done.
-  %v = load %Av[%zero, %zero] : memref<32 x 32 x f32, 2>
+  %v = affine.load %Av[%zero, %zero] : memref<32 x 32 x f32, 2>
   return %v : f32
-// CHECK:      %{{[0-9]+}} = load %{{[0-9]+}}[%c0, %c0] : memref<32x32xf32, 2>
+// CHECK:      %{{[0-9]+}} = affine.load %{{[0-9]+}}[%c0, %c0] : memref<32x32xf32, 2>
 // CHECK:      return
 }
 
+// -----
+
 // CHECK-LABEL: func @dynamic_shape_dma_buffer
 func @dynamic_shape_dma_buffer(%arg0: memref<512 x 32 x f32>) {
   %c32 = constant 32 : index
@@ -275,22 +282,18 @@
 // CHECK-NEXT:  %1 = dim %0, 0 : memref<?x?xf32, 2>
 // CHECK-NEXT:  %2 = dim %0, 1 : memref<?x?xf32, 2>
 // CHECK-NEXT:  %3 = alloc(%1, %2) : memref<2x?x?xf32, 2>
-// CHECK:       %5 = affine.apply [[MOD_2]](%c0)
-// CHECK:       %6 = affine.apply [[MOD_2]](%c0)
-// CHECK:       dma_start %arg0[%c0_0, %c0_0], %3[%5, %c0_0, %c0_0], %c512, %4[%6, %c0_0]
+// CHECK:       affine.dma_start %arg0[%c0_0, %c0_0], %3[%c0 mod 2, symbol(%c0_0), symbol(%c0_0)], %4[%c0 mod 2, symbol(%c0_0)], %c512
   affine.for %kTT = 0 to 16 {
-    dma_start %arg0[%zero, %zero], %Av[%zero, %zero], %num_elt, %tag[%zero] :
+    affine.dma_start %arg0[%zero, %zero], %Av[%zero, %zero], %tag[%zero], %num_elt :
       memref<512 x 32 x f32>,
       memref<? x ? x f32, 2>, memref<1 x i32>
-    dma_wait %tag[%zero], %num_elt : memref<1 x i32>
+    affine.dma_wait %tag[%zero], %num_elt : memref<1 x i32>
   }
   return
 // CHECK-NEXT:  affine.for %i0 = 1 to 16 {
-// CHECK:         %7 = affine.apply [[MOD_2]](%i0)
-// CHECK:         %8 = affine.apply [[MOD_2]](%i0)
-// CHECK:         dma_start %arg0[%c0_0, %c0_0], %3[%7, %c0_0, %c0_0], %c512, %4[%8, %c0_0]
-// CHECK:         dma_wait %4[%10, %c0_0], %c512 : memref<2x1xi32>
+// CHECK:         affine.dma_start %arg0[%c0_0, %c0_0], %3[%i0 mod 2, symbol(%c0_0), symbol(%c0_0)], %4[%i0 mod 2, symbol(%c0_0)], %c512
+// CHECK:         affine.dma_wait %4[%5 mod 2, symbol(%c0_0)], %c512 : memref<2x1xi32>
 // CHECK:       }
-// CHECK:       dma_wait %4[%13, %c0_0], %c512 : memref<2x1xi32>
+// CHECK:       affine.dma_wait %4[%8 mod 2, symbol(%c0_0)], %c512 : memref<2x1xi32>
 // CHECK:       return
 }