GPU: Only allow the UI channel to preempt if all stubs are scheduled.

Necessary for uber-comp with --ui-prioritize-in-gpu-process. Currently will not affect preemption.

BUG=173650


Review URL: https://chromiumcodereview.appspot.com/12340118

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@185445 0039d316-1c4b-4281-b951-d872f2087c98
diff --git a/content/common/gpu/gpu_channel.cc b/content/common/gpu/gpu_channel.cc
index c07d15f..71ac691 100644
--- a/content/common/gpu/gpu_channel.cc
+++ b/content/common/gpu/gpu_channel.cc
@@ -157,7 +157,8 @@
         channel_(NULL),
         sync_point_manager_(sync_point_manager),
         message_loop_(message_loop),
-        messages_received_(0) {
+        messages_received_(0),
+        a_stub_is_descheduled_(false) {
   }
 
   virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE {
@@ -207,8 +208,16 @@
     UpdatePreemptionState();
   }
 
-  void SetPreemptingFlag(gpu::PreemptionFlag* preempting_flag) {
+  void SetPreemptingFlagAndSchedulingState(
+      gpu::PreemptionFlag* preempting_flag,
+      bool a_stub_is_descheduled) {
     preempting_flag_ = preempting_flag;
+    a_stub_is_descheduled_ = a_stub_is_descheduled;
+  }
+
+  void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
+    a_stub_is_descheduled_ = a_stub_is_descheduled;
+    UpdatePreemptionState();
   }
 
  protected:
@@ -228,12 +237,18 @@
     // We can preempt whenever any IPC processing takes more than
     // kPreemptWaitTimeMs.
     CHECKING,
-    // We are currently preempting.
+    // We are currently preempting (i.e. no stub is descheduled).
     PREEMPTING,
+    // We would like to preempt, but some stub is descheduled.
+    WOULD_PREEMPT_DESCHEDULED,
   };
 
   PreemptionState preemption_state_;
 
+  // Maximum amount of time that we can spend in PREEMPTING.
+  // It is reset when we transition to IDLE.
+  base::TimeDelta max_preemption_time_;
+
   struct PendingMessage {
     uint64 message_number;
     base::TimeTicks time_received;
@@ -266,27 +281,50 @@
                     time_elapsed,
                 this, &SyncPointMessageFilter::UpdatePreemptionState);
           } else {
-            TransitionToPreempting();
+            if (a_stub_is_descheduled_)
+              TransitionToWouldPreemptDescheduled();
+            else
+              TransitionToPreempting();
           }
         }
         break;
       case PREEMPTING:
-        if (pending_messages_.empty()) {
-          TransitionToIdle();
-        } else {
-          base::TimeDelta time_elapsed =
-              base::TimeTicks::Now() - pending_messages_.front().time_received;
-          if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
-            TransitionToIdle();
-        }
+        // A TransitionToIdle() timer should always be running in this state.
+        DCHECK(timer_.IsRunning());
+        if (a_stub_is_descheduled_)
+          TransitionToWouldPreemptDescheduled();
+        else
+          TransitionToIdleIfCaughtUp();
+        break;
+      case WOULD_PREEMPT_DESCHEDULED:
+        // A TransitionToIdle() timer should never be running in this state.
+        DCHECK(!timer_.IsRunning());
+        if (!a_stub_is_descheduled_)
+          TransitionToPreempting();
+        else
+          TransitionToIdleIfCaughtUp();
         break;
       default:
         NOTREACHED();
     }
   }
 
+  void TransitionToIdleIfCaughtUp() {
+    DCHECK(preemption_state_ == PREEMPTING ||
+           preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
+    if (pending_messages_.empty()) {
+      TransitionToIdle();
+    } else {
+      base::TimeDelta time_elapsed =
+          base::TimeTicks::Now() - pending_messages_.front().time_received;
+      if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
+        TransitionToIdle();
+    }
+  }
+
   void TransitionToIdle() {
-    DCHECK_EQ(preemption_state_, PREEMPTING);
+    DCHECK(preemption_state_ == PREEMPTING ||
+           preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
     // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
     timer_.Stop();
 
@@ -313,15 +351,19 @@
     DCHECK(!timer_.IsRunning());
 
     preemption_state_ = CHECKING;
+    max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs);
     UpdatePreemptionState();
   }
 
   void TransitionToPreempting() {
-    DCHECK_EQ(preemption_state_, CHECKING);
+    DCHECK(preemption_state_ == CHECKING ||
+           preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
+    DCHECK(!a_stub_is_descheduled_);
 
     // Stop any pending state update checks that we may have queued
     // while CHECKING.
-    timer_.Stop();
+    if (preemption_state_ == CHECKING)
+      timer_.Stop();
 
     preemption_state_ = PREEMPTING;
     preempting_flag_->Set();
@@ -329,12 +371,39 @@
 
     timer_.Start(
        FROM_HERE,
-       base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs),
+       max_preemption_time_,
        this, &SyncPointMessageFilter::TransitionToIdle);
 
     UpdatePreemptionState();
   }
 
+  void TransitionToWouldPreemptDescheduled() {
+    DCHECK(preemption_state_ == CHECKING ||
+           preemption_state_ == PREEMPTING);
+    DCHECK(a_stub_is_descheduled_);
+
+    if (preemption_state_ == CHECKING) {
+      // Stop any pending state update checks that we may have queued
+      // while CHECKING.
+      timer_.Stop();
+    } else {
+      // Stop any TransitionToIdle() timers that we may have queued
+      // while PREEMPTING.
+      timer_.Stop();
+      max_preemption_time_ = timer_.desired_run_time() - base::TimeTicks::Now();
+      if (max_preemption_time_ < base::TimeDelta()) {
+        TransitionToIdle();
+        return;
+      }
+    }
+
+    preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
+    preempting_flag_->Reset();
+    TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
+
+    UpdatePreemptionState();
+  }
+
   static void InsertSyncPointOnMainThread(
       base::WeakPtr<GpuChannel>* gpu_channel,
       scoped_refptr<SyncPointManager> manager,
@@ -380,6 +449,8 @@
   uint64 messages_received_;
 
   base::OneShotTimer<SyncPointMessageFilter> timer_;
+
+  bool a_stub_is_descheduled_;
 };
 
 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
@@ -399,7 +470,8 @@
       handle_messages_scheduled_(false),
       processed_get_state_fast_(false),
       currently_processing_message_(NULL),
-      weak_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {
+      weak_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)),
+      num_stubs_descheduled_(0) {
   DCHECK(gpu_channel_manager);
   DCHECK(client_id);
 
@@ -545,6 +617,27 @@
   handle_messages_scheduled_ = true;
 }
 
+void GpuChannel::StubSchedulingChanged(bool scheduled) {
+  bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
+  if (scheduled) {
+    num_stubs_descheduled_--;
+    OnScheduled();
+  } else {
+    num_stubs_descheduled_++;
+  }
+  DCHECK_LE(num_stubs_descheduled_, stubs_.size());
+  bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
+
+  if (a_stub_is_descheduled != a_stub_was_descheduled) {
+    if (preempting_flag_.get()) {
+      io_message_loop_->PostTask(
+          FROM_HERE,
+          base::Bind(&SyncPointMessageFilter::UpdateStubSchedulingState,
+                     filter_, a_stub_is_descheduled));
+    }
+  }
+}
+
 void GpuChannel::CreateViewCommandBuffer(
     const gfx::GLSurfaceHandle& window,
     int32 surface_id,
@@ -649,8 +742,8 @@
     preempting_flag_ = new gpu::PreemptionFlag;
     io_message_loop_->PostTask(
         FROM_HERE,
-        base::Bind(&SyncPointMessageFilter::SetPreemptingFlag,
-                   filter_, preempting_flag_));
+        base::Bind(&SyncPointMessageFilter::SetPreemptingFlagAndSchedulingState,
+                   filter_, preempting_flag_, num_stubs_descheduled_ > 0));
   }
   return preempting_flag_.get();
 }
@@ -799,8 +892,11 @@
     stubs_.Remove(route_id);
     // In case the renderer is currently blocked waiting for a sync reply from
     // the stub, we need to make sure to reschedule the GpuChannel here.
-    if (need_reschedule)
-      OnScheduled();
+    if (need_reschedule) {
+      // This stub won't get a chance to reschedule, so update the count
+      // now.
+      StubSchedulingChanged(true);
+    }
   }
 }
 
diff --git a/content/common/gpu/gpu_channel.h b/content/common/gpu/gpu_channel.h
index 3cf2caf..4e87588c 100644
--- a/content/common/gpu/gpu_channel.h
+++ b/content/common/gpu/gpu_channel.h
@@ -102,6 +102,11 @@
   // deferred IPC messaged are handled.
   void OnScheduled();
 
+  // This is called when a command buffer transitions between scheduled and
+  // descheduled states. When any stub is descheduled, we stop preempting
+  // other channels.
+  void StubSchedulingChanged(bool scheduled);
+
   void CreateViewCommandBuffer(
       const gfx::GLSurfaceHandle& window,
       int32 surface_id,
@@ -240,6 +245,8 @@
   scoped_refptr<SyncPointMessageFilter> filter_;
   scoped_refptr<base::MessageLoopProxy> io_message_loop_;
 
+  size_t num_stubs_descheduled_;
+
   DISALLOW_COPY_AND_ASSIGN(GpuChannel);
 };
 
diff --git a/content/common/gpu/gpu_command_buffer_stub.cc b/content/common/gpu/gpu_command_buffer_stub.cc
index 550c174..7e53996 100644
--- a/content/common/gpu/gpu_command_buffer_stub.cc
+++ b/content/common/gpu/gpu_command_buffer_stub.cc
@@ -265,13 +265,6 @@
   Send(new IPC::Message(message));
 }
 
-void GpuCommandBufferStub::OnReschedule() {
-  if (!IsScheduled())
-    return;
-
-  channel_->OnScheduled();
-}
-
 bool GpuCommandBufferStub::MakeCurrent() {
   if (decoder_->MakeCurrent())
     return true;
@@ -485,8 +478,9 @@
                  base::Unretained(scheduler_.get())));
   command_buffer_->SetParseErrorCallback(
       base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this)));
-  scheduler_->SetScheduledCallback(
-      base::Bind(&GpuCommandBufferStub::OnReschedule, base::Unretained(this)));
+  scheduler_->SetSchedulingChangedCallback(
+      base::Bind(&GpuChannel::StubSchedulingChanged,
+                 base::Unretained(channel_)));
 
   if (watchdog_) {
     scheduler_->SetCommandProcessedCallback(
diff --git a/content/common/gpu/gpu_command_buffer_stub.h b/content/common/gpu/gpu_command_buffer_stub.h
index a30626a..09668e9 100644
--- a/content/common/gpu/gpu_command_buffer_stub.h
+++ b/content/common/gpu/gpu_command_buffer_stub.h
@@ -179,8 +179,6 @@
   void OnReceivedClientManagedMemoryStats(const GpuManagedMemoryStats& stats);
   void OnSetClientHasMemoryAllocationChangedCallback(bool has_callback);
 
-  void OnReschedule();
-
   void OnCommandProcessed();
   void OnParseError();