Change Pass:getFunction() to return pointer instead of ref - NFC

- change this for consistency - everything else similar takes/returns a
  Function pointer - the FuncBuilder ctor,
  Block/Value/Instruction::getFunction(), etc.
- saves a whole bunch of &s everywhere

PiperOrigin-RevId: 236928761
diff --git a/mlir/lib/Transforms/CSE.cpp b/mlir/lib/Transforms/CSE.cpp
index 9812dec..f481896 100644
--- a/mlir/lib/Transforms/CSE.cpp
+++ b/mlir/lib/Transforms/CSE.cpp
@@ -218,7 +218,7 @@
 
 void CSE::runOnFunction() {
   simplifyBlockList(getAnalysisResult<DominanceInfo>(),
-                    getFunction().getBlockList());
+                    getFunction()->getBlockList());
 
   // If no operations were erased, then we mark all analyses as preserved.
   if (opsToErase.empty()) {
diff --git a/mlir/lib/Transforms/Canonicalizer.cpp b/mlir/lib/Transforms/Canonicalizer.cpp
index 17259bb..7724426 100644
--- a/mlir/lib/Transforms/Canonicalizer.cpp
+++ b/mlir/lib/Transforms/Canonicalizer.cpp
@@ -40,16 +40,16 @@
 
 void Canonicalizer::runOnFunction() {
   OwningRewritePatternList patterns;
-  auto &func = getFunction();
+  auto *func = getFunction();
 
   // TODO: Instead of adding all known patterns from the whole system lazily add
   // and cache the canonicalization patterns for ops we see in practice when
   // building the worklist.  For now, we just grab everything.
-  auto *context = func.getContext();
+  auto *context = func->getContext();
   for (auto *op : context->getRegisteredOperations())
     op->getCanonicalizationPatterns(patterns, context);
 
-  applyPatternsGreedily(&func, std::move(patterns));
+  applyPatternsGreedily(func, std::move(patterns));
 }
 
 /// Create a Canonicalizer pass.
diff --git a/mlir/lib/Transforms/ConstantFold.cpp b/mlir/lib/Transforms/ConstantFold.cpp
index d3da8c1..6bdb1bf 100644
--- a/mlir/lib/Transforms/ConstantFold.cpp
+++ b/mlir/lib/Transforms/ConstantFold.cpp
@@ -97,7 +97,7 @@
   existingConstants.clear();
   opInstsToErase.clear();
 
-  getFunction().walk([&](Instruction *inst) { foldInstruction(inst); });
+  getFunction()->walk([&](Instruction *inst) { foldInstruction(inst); });
 
   // At this point, these operations are dead, remove them.
   // TODO: This is assuming that all constant foldable operations have no
diff --git a/mlir/lib/Transforms/DmaGeneration.cpp b/mlir/lib/Transforms/DmaGeneration.cpp
index 91d5352..44b4d95 100644
--- a/mlir/lib/Transforms/DmaGeneration.cpp
+++ b/mlir/lib/Transforms/DmaGeneration.cpp
@@ -754,7 +754,7 @@
 }
 
 void DmaGeneration::runOnFunction() {
-  Function *f = &getFunction();
+  Function *f = getFunction();
   FuncBuilder topBuilder(f);
   zeroIndex = topBuilder.create<ConstantIndexOp>(f->getLoc(), 0);
 
diff --git a/mlir/lib/Transforms/LoopFusion.cpp b/mlir/lib/Transforms/LoopFusion.cpp
index 1e4e020..7466e49 100644
--- a/mlir/lib/Transforms/LoopFusion.cpp
+++ b/mlir/lib/Transforms/LoopFusion.cpp
@@ -2202,7 +2202,7 @@
   }
 
   MemRefDependenceGraph g;
-  if (g.init(&getFunction()))
+  if (g.init(getFunction()))
     GreedyFusion(&g, localBufSizeThreshold, fastMemorySpace).run();
 }
 
diff --git a/mlir/lib/Transforms/LoopTiling.cpp b/mlir/lib/Transforms/LoopTiling.cpp
index 4aebbc2..e58de3b 100644
--- a/mlir/lib/Transforms/LoopTiling.cpp
+++ b/mlir/lib/Transforms/LoopTiling.cpp
@@ -255,7 +255,7 @@
 
 void LoopTiling::runOnFunction() {
   std::vector<SmallVector<OpPointer<AffineForOp>, 6>> bands;
-  getTileableBands(&getFunction(), &bands);
+  getTileableBands(getFunction(), &bands);
 
   for (auto &band : bands) {
     // Set up tile sizes; fill missing tile sizes at the end with default tile
diff --git a/mlir/lib/Transforms/LoopUnroll.cpp b/mlir/lib/Transforms/LoopUnroll.cpp
index 4cf65b4..40da2f6 100644
--- a/mlir/lib/Transforms/LoopUnroll.cpp
+++ b/mlir/lib/Transforms/LoopUnroll.cpp
@@ -129,11 +129,13 @@
     // Gathers all loops with trip count <= minTripCount. Do a post order walk
     // so that loops are gathered from innermost to outermost (or else unrolling
     // an outer one may delete gathered inner ones).
-    getFunction().walkPostOrder<AffineForOp>([&](OpPointer<AffineForOp> forOp) {
-      Optional<uint64_t> tripCount = getConstantTripCount(forOp);
-      if (tripCount.hasValue() && tripCount.getValue() <= clUnrollFullThreshold)
-        loops.push_back(forOp);
-    });
+    getFunction()->walkPostOrder<AffineForOp>(
+        [&](OpPointer<AffineForOp> forOp) {
+          Optional<uint64_t> tripCount = getConstantTripCount(forOp);
+          if (tripCount.hasValue() &&
+              tripCount.getValue() <= clUnrollFullThreshold)
+            loops.push_back(forOp);
+        });
     for (auto forOp : loops)
       loopUnrollFull(forOp);
     return;
@@ -143,7 +145,7 @@
                                 ? clUnrollNumRepetitions
                                 : 1;
   // If the call back is provided, we will recurse until no loops are found.
-  Function *func = &getFunction();
+  Function *func = getFunction();
   for (unsigned i = 0; i < numRepetitions || getUnrollFactor; i++) {
     InnermostLoopGatherer ilg;
     ilg.walkPostOrder(func);
diff --git a/mlir/lib/Transforms/LoopUnrollAndJam.cpp b/mlir/lib/Transforms/LoopUnrollAndJam.cpp
index 6d04b84..63fc451 100644
--- a/mlir/lib/Transforms/LoopUnrollAndJam.cpp
+++ b/mlir/lib/Transforms/LoopUnrollAndJam.cpp
@@ -91,7 +91,7 @@
   // Currently, just the outermost loop from the first loop nest is
   // unroll-and-jammed by this pass. However, runOnAffineForOp can be called on
   // any for operation.
-  auto &entryBlock = getFunction().front();
+  auto &entryBlock = getFunction()->front();
   if (auto forOp = entryBlock.front().dyn_cast<AffineForOp>())
     runOnAffineForOp(forOp);
 }
diff --git a/mlir/lib/Transforms/LowerAffine.cpp b/mlir/lib/Transforms/LowerAffine.cpp
index 3776403..31232ce 100644
--- a/mlir/lib/Transforms/LowerAffine.cpp
+++ b/mlir/lib/Transforms/LowerAffine.cpp
@@ -608,7 +608,7 @@
 
   // Collect all the For instructions as well as AffineIfOps and AffineApplyOps.
   // We do this as a prepass to avoid invalidating the walker with our rewrite.
-  getFunction().walk([&](Instruction *inst) {
+  getFunction()->walk([&](Instruction *inst) {
     if (inst->isa<AffineApplyOp>() || inst->isa<AffineForOp>() ||
         inst->isa<AffineIfOp>())
       instsToRewrite.push_back(inst);
diff --git a/mlir/lib/Transforms/LowerVectorTransfers.cpp b/mlir/lib/Transforms/LowerVectorTransfers.cpp
index e3ad429..9ac8583 100644
--- a/mlir/lib/Transforms/LowerVectorTransfers.cpp
+++ b/mlir/lib/Transforms/LowerVectorTransfers.cpp
@@ -426,7 +426,7 @@
 struct LowerVectorTransfersPass
     : public FunctionPass<LowerVectorTransfersPass> {
   void runOnFunction() {
-    Function *f = &getFunction();
+    Function *f = getFunction();
     applyMLPatternsGreedily<VectorTransferExpander<VectorTransferReadOp>,
                             VectorTransferExpander<VectorTransferWriteOp>>(f);
   }
diff --git a/mlir/lib/Transforms/MaterializeVectors.cpp b/mlir/lib/Transforms/MaterializeVectors.cpp
index 9f4027e..0d54ead 100644
--- a/mlir/lib/Transforms/MaterializeVectors.cpp
+++ b/mlir/lib/Transforms/MaterializeVectors.cpp
@@ -733,7 +733,7 @@
   NestedPatternContext mlContext;
 
   // TODO(ntv): Check to see if this supports arbitrary top-level code.
-  Function *f = &getFunction();
+  Function *f = getFunction();
   if (f->getBlocks().size() != 1)
     return;
 
diff --git a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp
index 4fc5440..f48f909 100644
--- a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp
+++ b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp
@@ -211,8 +211,8 @@
 
 void MemRefDataFlowOpt::runOnFunction() {
   // Only supports single block functions at the moment.
-  Function &f = getFunction();
-  if (f.getBlocks().size() != 1) {
+  Function *f = getFunction();
+  if (f->getBlocks().size() != 1) {
     markAllAnalysesPreserved();
     return;
   }
@@ -224,7 +224,8 @@
   memrefsToErase.clear();
 
   // Walk all load's and perform load/store forwarding.
-  f.walk<LoadOp>([&](OpPointer<LoadOp> loadOp) { forwardStoreToLoad(loadOp); });
+  f->walk<LoadOp>(
+      [&](OpPointer<LoadOp> loadOp) { forwardStoreToLoad(loadOp); });
 
   // Erase all load op's whose results were replaced with store fwd'ed ones.
   for (auto *loadOp : loadOpsToErase) {
diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp
index cdce523..08115ed 100644
--- a/mlir/lib/Transforms/PipelineDataTransfer.cpp
+++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp
@@ -146,7 +146,7 @@
   // deleted and replaced by a prologue, a new steady-state loop and an
   // epilogue).
   forOps.clear();
-  getFunction().walkPostOrder<AffineForOp>(
+  getFunction()->walkPostOrder<AffineForOp>(
       [&](OpPointer<AffineForOp> forOp) { forOps.push_back(forOp); });
   for (auto forOp : forOps)
     runOnAffineForOp(forOp);
diff --git a/mlir/lib/Transforms/SimplifyAffineStructures.cpp b/mlir/lib/Transforms/SimplifyAffineStructures.cpp
index 3adcbe0..8a8e7af 100644
--- a/mlir/lib/Transforms/SimplifyAffineStructures.cpp
+++ b/mlir/lib/Transforms/SimplifyAffineStructures.cpp
@@ -58,7 +58,7 @@
 }
 
 void SimplifyAffineStructures::runOnFunction() {
-  getFunction().walk([&](Instruction *opInst) {
+  getFunction()->walk([&](Instruction *opInst) {
     for (auto attr : opInst->getAttrs()) {
       if (auto mapAttr = attr.second.dyn_cast<AffineMapAttr>()) {
         MutableAffineMap mMap(mapAttr.getValue());
diff --git a/mlir/lib/Transforms/StripDebugInfo.cpp b/mlir/lib/Transforms/StripDebugInfo.cpp
index 47244f9..f8f90c0 100644
--- a/mlir/lib/Transforms/StripDebugInfo.cpp
+++ b/mlir/lib/Transforms/StripDebugInfo.cpp
@@ -29,12 +29,12 @@
 } // end anonymous namespace
 
 void StripDebugInfo::runOnFunction() {
-  Function &func = getFunction();
-  UnknownLoc unknownLoc = UnknownLoc::get(func.getContext());
+  Function *func = getFunction();
+  UnknownLoc unknownLoc = UnknownLoc::get(func->getContext());
 
   // Strip the debug info from the function and its instructions.
-  func.setLoc(unknownLoc);
-  func.walk([&](Instruction *inst) { inst->setLoc(unknownLoc); });
+  func->setLoc(unknownLoc);
+  func->walk([&](Instruction *inst) { inst->setLoc(unknownLoc); });
 }
 
 /// Creates a pass to strip debug information from a function.
diff --git a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
index fa9c4bc..8fd1cac 100644
--- a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
+++ b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp
@@ -264,7 +264,7 @@
   NestedPatternContext mlContext;
 
   // Only support single block functions at this point.
-  Function *f = &getFunction();
+  Function *f = getFunction();
   if (f->getBlocks().size() != 1)
     return;
 
diff --git a/mlir/lib/Transforms/Vectorize.cpp b/mlir/lib/Transforms/Vectorize.cpp
index d8e5714..b084b01 100644
--- a/mlir/lib/Transforms/Vectorize.cpp
+++ b/mlir/lib/Transforms/Vectorize.cpp
@@ -1262,7 +1262,7 @@
   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
   NestedPatternContext mlContext;
 
-  Function *f = &getFunction();
+  Function *f = getFunction();
   for (auto &pat : makePatterns()) {
     LLVM_DEBUG(dbgs() << "\n******************************************");
     LLVM_DEBUG(dbgs() << "\n******************************************");
diff --git a/mlir/lib/Transforms/ViewFunctionGraph.cpp b/mlir/lib/Transforms/ViewFunctionGraph.cpp
index b2dfe67..d77e96a 100644
--- a/mlir/lib/Transforms/ViewFunctionGraph.cpp
+++ b/mlir/lib/Transforms/ViewFunctionGraph.cpp
@@ -78,7 +78,7 @@
                const llvm::Twine &title = "")
       : os(os), shortNames(shortNames), title(title) {}
   void runOnFunction() {
-    mlir::writeGraph(os, &getFunction(), shortNames, title);
+    mlir::writeGraph(os, getFunction(), shortNames, title);
   }
 
 private: