Attempting to re-land a small portion of this change...  Simply add links from 
lower layer pools to higher layer pool.

Revert 10006036 - Revert 130129 - Revert 129034 - Revert 127893 - Revert 127730 - Revert 127717 - Revert 118788 - Revert 113405 - Revert 113305 - Revert 113300 - Revert 112134 - Revert 112130 - Close idle connections / SPDY sessions when needed

Due to the idle connection state being held by different socket pools, it's possible for one socket pool to hold an idle socket in a lower layer socket pool. From the lower level socket pool's perspective, the socket is being "actively" used. From the higher socket pool's (including SpdySession, which is more of a connection manager) perspective, the connection is idle and can be closed if we have hit a limit.

Normally this isn't a big deal, except when we have a lot of idle SPDY connections and are connecting via a proxy, so we have low connection limits through the proxy server. We address this problem by allowing lower-level socket pools to tell higher level socket pools to close a socket.

Fixed ASAN test failures by removing .Times(1) and .Times(2) from CloseMultipleIdleSocketsHeldByLayeredPoolWhenNeeded unittest (this removes the tests relying on the order of std::set in CloseOneIdleConnectionInLayeredPool). ASAN is prob
ably causing the memory allocator to allocate the pools differently. The std::set is ordered by LayeredPool* which is the address of the LayeredPool (willchan).

Added NET_EXPORT for layered_pool class defintion to fix windows shared compile.

BUG=62364, 92244, 109876, 110368, 119847
TEST=


Review URL: http://codereview.chromium.org/10026024

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@131604 0039d316-1c4b-4281-b951-d872f2087c98
diff --git a/net/socket/client_socket_handle.cc b/net/socket/client_socket_handle.cc
index 8cc0e90..ba30151 100644
--- a/net/socket/client_socket_handle.cc
+++ b/net/socket/client_socket_handle.cc
@@ -17,6 +17,8 @@
 
 ClientSocketHandle::ClientSocketHandle()
     : is_initialized_(false),
+      pool_(NULL),
+      layered_pool_(NULL),
       is_reused_(false),
       ALLOW_THIS_IN_INITIALIZER_LIST(callback_(
           base::Bind(&ClientSocketHandle::OnIOComplete,
@@ -52,6 +54,10 @@
   group_name_.clear();
   is_reused_ = false;
   user_callback_.Reset();
+  if (layered_pool_) {
+    pool_->RemoveLayeredPool(layered_pool_);
+    layered_pool_ = NULL;
+  }
   pool_ = NULL;
   idle_time_ = base::TimeDelta();
   init_time_ = base::TimeTicks();
@@ -75,6 +81,28 @@
   return pool_->GetLoadState(group_name_, this);
 }
 
+bool ClientSocketHandle::IsPoolStalled() const {
+  return pool_->IsStalled();
+}
+
+void ClientSocketHandle::AddLayeredPool(LayeredPool* layered_pool) {
+  CHECK(layered_pool);
+  CHECK(!layered_pool_);
+  if (pool_) {
+    pool_->AddLayeredPool(layered_pool);
+    layered_pool_ = layered_pool;
+  }
+}
+
+void ClientSocketHandle::RemoveLayeredPool(LayeredPool* layered_pool) {
+  CHECK(layered_pool);
+  CHECK(layered_pool_);
+  if (pool_) {
+    pool_->RemoveLayeredPool(layered_pool);
+    layered_pool_ = NULL;
+  }
+}
+
 void ClientSocketHandle::OnIOComplete(int result) {
   CompletionCallback callback = user_callback_;
   user_callback_.Reset();
diff --git a/net/socket/client_socket_handle.h b/net/socket/client_socket_handle.h
index 0b9a6527..e2ac23f 100644
--- a/net/socket/client_socket_handle.h
+++ b/net/socket/client_socket_handle.h
@@ -92,6 +92,12 @@
   // initialized the ClientSocketHandle.
   LoadState GetLoadState() const;
 
+  bool IsPoolStalled() const;
+
+  void AddLayeredPool(LayeredPool* layered_pool);
+
+  void RemoveLayeredPool(LayeredPool* layered_pool);
+
   // Returns true when Init() has completed successfully.
   bool is_initialized() const { return is_initialized_; }
 
@@ -164,6 +170,7 @@
 
   bool is_initialized_;
   ClientSocketPool* pool_;
+  LayeredPool* layered_pool_;
   scoped_ptr<StreamSocket> socket_;
   std::string group_name_;
   bool is_reused_;
diff --git a/net/socket/client_socket_pool.cc b/net/socket/client_socket_pool.cc
index a1967efd..0eebd11 100644
--- a/net/socket/client_socket_pool.cc
+++ b/net/socket/client_socket_pool.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
diff --git a/net/socket/client_socket_pool.h b/net/socket/client_socket_pool.h
index 92f0c413..1c03ecf 100644
--- a/net/socket/client_socket_pool.h
+++ b/net/socket/client_socket_pool.h
@@ -29,6 +29,17 @@
 class ClientSocketPoolHistograms;
 class StreamSocket;
 
+// ClientSocketPools are layered. This defines an interface for lower level
+// socket pools to communicate with higher layer pools.
+class NET_EXPORT LayeredPool {
+ public:
+  virtual ~LayeredPool() {};
+
+  // Instructs the LayeredPool to close an idle connection. Return true if one
+  // was closed.
+  virtual bool CloseOneIdleConnection() = 0;
+};
+
 // A ClientSocketPool is used to restrict the number of sockets open at a time.
 // It also maintains a list of idle persistent sockets.
 //
@@ -110,6 +121,10 @@
   // the pool.  Does not flush any pools wrapped by |this|.
   virtual void Flush() = 0;
 
+  // Returns true if a there is currently a request blocked on the
+  // per-pool (not per-host) max socket limit.
+  virtual bool IsStalled() const = 0;
+
   // Called to close any idle connections held by the connection manager.
   virtual void CloseIdleSockets() = 0;
 
@@ -123,6 +138,12 @@
   virtual LoadState GetLoadState(const std::string& group_name,
                                  const ClientSocketHandle* handle) const = 0;
 
+  // Adds a LayeredPool on top of |this|.
+  virtual void AddLayeredPool(LayeredPool* layered_pool) = 0;
+
+  // Removes a LayeredPool from |this|.
+  virtual void RemoveLayeredPool(LayeredPool* layered_pool) = 0;
+
   // Retrieves information on the current state of the pool as a
   // DictionaryValue.  Caller takes possession of the returned value.
   // If |include_nested_pools| is true, the states of any nested
diff --git a/net/socket/client_socket_pool_base.cc b/net/socket/client_socket_pool_base.cc
index d230ad4f..ab959cf0 100644
--- a/net/socket/client_socket_pool_base.cc
+++ b/net/socket/client_socket_pool_base.cc
@@ -207,6 +207,7 @@
   DCHECK(group_map_.empty());
   DCHECK(pending_callback_map_.empty());
   DCHECK_EQ(0, connecting_socket_count_);
+  CHECK(higher_layer_pools_.empty());
 
   NetworkChangeNotifier::RemoveIPAddressObserver(this);
 }
@@ -238,6 +239,18 @@
   return req;
 }
 
+void ClientSocketPoolBaseHelper::AddLayeredPool(LayeredPool* pool) {
+  CHECK(pool);
+  CHECK(!ContainsKey(higher_layer_pools_, pool));
+  higher_layer_pools_.insert(pool);
+}
+
+void ClientSocketPoolBaseHelper::RemoveLayeredPool(LayeredPool* pool) {
+  CHECK(pool);
+  CHECK(ContainsKey(higher_layer_pools_, pool));
+  higher_layer_pools_.erase(pool);
+}
+
 int ClientSocketPoolBaseHelper::RequestSocket(
     const std::string& group_name,
     const Request* request) {
@@ -336,13 +349,22 @@
   // Can we make another active socket now?
   if (!group->HasAvailableSocketSlot(max_sockets_per_group_) &&
       !request->ignore_limits()) {
+    // TODO(willchan): Consider whether or not we need to close a socket in a
+    // higher layered group. I don't think this makes sense since we would just
+    // reuse that socket then if we needed one and wouldn't make it down to this
+    // layer.
     request->net_log().AddEvent(
         NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP, NULL);
     return ERR_IO_PENDING;
   }
 
   if (ReachedMaxSocketsLimit() && !request->ignore_limits()) {
+    // NOTE(mmenke):  Wonder if we really need different code for each case
+    // here.  Only reason for them now seems to be preconnects.
     if (idle_socket_count() > 0) {
+      // There's an idle socket in this pool. Either that's because there's
+      // still one in this group, but we got here due to preconnecting bypassing
+      // idle sockets, or because there's an idle socket in another group.
       bool closed = CloseOneIdleSocketExceptInGroup(group);
       if (preconnecting && !closed)
         return ERR_PRECONNECT_MAX_SOCKET_LIMIT;
@@ -355,7 +377,8 @@
     }
   }
 
-  // We couldn't find a socket to reuse, so allocate and connect a new one.
+  // We couldn't find a socket to reuse, and there's space to allocate one,
+  // so allocate and connect a new one.
   scoped_ptr<ConnectJob> connect_job(
       connect_job_factory_->NewConnectJob(group_name, *request, this));
 
@@ -617,7 +640,8 @@
     group_dict->Set("connect_jobs", connect_jobs_list);
 
     group_dict->SetBoolean("is_stalled",
-                           group->IsStalled(max_sockets_per_group_));
+                           group->IsStalledOnPoolMaxSockets(
+                               max_sockets_per_group_));
     group_dict->SetBoolean("has_backup_job", group->HasBackupJob());
 
     all_groups_dict->SetWithoutPathExpansion(it->first, group_dict);
@@ -792,18 +816,22 @@
 // are not at the |max_sockets_per_group_| limit. Note: for requests with
 // the same priority, the winner is based on group hash ordering (and not
 // insertion order).
-bool ClientSocketPoolBaseHelper::FindTopStalledGroup(Group** group,
-                                                     std::string* group_name) {
+bool ClientSocketPoolBaseHelper::FindTopStalledGroup(
+    Group** group,
+    std::string* group_name) const {
+  CHECK((group && group_name) || (!group && !group_name));
   Group* top_group = NULL;
   const std::string* top_group_name = NULL;
   bool has_stalled_group = false;
-  for (GroupMap::iterator i = group_map_.begin();
+  for (GroupMap::const_iterator i = group_map_.begin();
        i != group_map_.end(); ++i) {
     Group* curr_group = i->second;
     const RequestQueue& queue = curr_group->pending_requests();
     if (queue.empty())
       continue;
-    if (curr_group->IsStalled(max_sockets_per_group_)) {
+    if (curr_group->IsStalledOnPoolMaxSockets(max_sockets_per_group_)) {
+      if (!group)
+        return true;
       has_stalled_group = true;
       bool has_higher_priority = !top_group ||
           curr_group->TopPendingPriority() < top_group->TopPendingPriority();
@@ -815,8 +843,11 @@
   }
 
   if (top_group) {
+    CHECK(group);
     *group = top_group;
     *group_name = *top_group_name;
+  } else {
+    CHECK(!has_stalled_group);
   }
   return has_stalled_group;
 }
@@ -889,6 +920,25 @@
   AbortAllRequests();
 }
 
+bool ClientSocketPoolBaseHelper::IsStalled() const {
+  // If we are not using |max_sockets_|, then clearly we are not stalled
+  if ((handed_out_socket_count_ + connecting_socket_count_) < max_sockets_)
+    return false;
+  // So in order to be stalled we need to be using |max_sockets_| AND
+  // we need to have a request that is actually stalled on the global
+  // socket limit.  To find such a request, we look for a group that
+  // a has more requests that jobs AND where the number of jobs is less
+  // than |max_sockets_per_group_|.  (If the number of jobs is equal to
+  // |max_sockets_per_group_|, then the request is stalled on the group,
+  // which does not count.)
+  for (GroupMap::const_iterator it = group_map_.begin();
+       it != group_map_.end(); ++it) {
+    if (it->second->IsStalledOnPoolMaxSockets(max_sockets_per_group_))
+      return true;
+  }
+  return false;
+}
+
 void ClientSocketPoolBaseHelper::RemoveConnectJob(ConnectJob* job,
                                                   Group* group) {
   CHECK_GT(connecting_socket_count_, 0);
@@ -1025,8 +1075,10 @@
   return true;
 }
 
-void ClientSocketPoolBaseHelper::CloseOneIdleSocket() {
-  CloseOneIdleSocketExceptInGroup(NULL);
+bool ClientSocketPoolBaseHelper::CloseOneIdleSocket() {
+  if (idle_socket_count() == 0)
+    return false;
+  return CloseOneIdleSocketExceptInGroup(NULL);
 }
 
 bool ClientSocketPoolBaseHelper::CloseOneIdleSocketExceptInGroup(
@@ -1050,9 +1102,18 @@
     }
   }
 
-  if (!exception_group)
-    LOG(DFATAL) << "No idle socket found to close!.";
+  return false;
+}
 
+bool ClientSocketPoolBaseHelper::CloseOneIdleConnectionInLayeredPool() {
+  // This pool doesn't have any idle sockets. It's possible that a pool at a
+  // higher layer is holding one of this sockets active, but it's actually idle.
+  // Query the higher layers.
+  for (std::set<LayeredPool*>::const_iterator it = higher_layer_pools_.begin();
+       it != higher_layer_pools_.end(); ++it) {
+    if ((*it)->CloseOneIdleConnection())
+      return true;
+  }
   return false;
 }
 
diff --git a/net/socket/client_socket_pool_base.h b/net/socket/client_socket_pool_base.h
index f550e42..832c166 100644
--- a/net/socket/client_socket_pool_base.h
+++ b/net/socket/client_socket_pool_base.h
@@ -28,6 +28,7 @@
 #include <map>
 #include <set>
 #include <string>
+#include <vector>
 
 #include "base/basictypes.h"
 #include "base/memory/ref_counted.h"
@@ -239,6 +240,11 @@
 
   virtual ~ClientSocketPoolBaseHelper();
 
+  // Adds/Removes layered pools. It is expected in the destructor that no
+  // layered pools remain.
+  void AddLayeredPool(LayeredPool* pool);
+  void RemoveLayeredPool(LayeredPool* pool);
+
   // See ClientSocketPool::RequestSocket for documentation on this function.
   // ClientSocketPoolBaseHelper takes ownership of |request|, which must be
   // heap allocated.
@@ -261,6 +267,9 @@
   // See ClientSocketPool::Flush for documentation on this function.
   void Flush();
 
+  // See ClientSocketPool::IsStalled for documentation on this function.
+  bool IsStalled() const;
+
   // See ClientSocketPool::CloseIdleSockets for documentation on this function.
   void CloseIdleSockets();
 
@@ -306,6 +315,16 @@
   // sockets that timed out or can't be reused.  Made public for testing.
   void CleanupIdleSockets(bool force);
 
+  // Closes one idle socket.  Picks the first one encountered.
+  // TODO(willchan): Consider a better algorithm for doing this.  Perhaps we
+  // should keep an ordered list of idle sockets, and close them in order.
+  // Requires maintaining more state.  It's not clear if it's worth it since
+  // I'm not sure if we hit this situation often.
+  bool CloseOneIdleSocket();
+
+  // Checks layered pools to see if they can close an idle connection.
+  bool CloseOneIdleConnectionInLayeredPool();
+
   // See ClientSocketPool::GetInfoAsValue for documentation on this function.
   base::DictionaryValue* GetInfoAsValue(const std::string& name,
                                         const std::string& type) const;
@@ -371,7 +390,7 @@
           static_cast<int>(idle_sockets_.size());
     }
 
-    bool IsStalled(int max_sockets_per_group) const {
+    bool IsStalledOnPoolMaxSockets(int max_sockets_per_group) const {
       return HasAvailableSocketSlot(max_sockets_per_group) &&
           pending_requests_.size() > jobs_.size();
     }
@@ -457,9 +476,9 @@
 
   // Scans the group map for groups which have an available socket slot and
   // at least one pending request. Returns true if any groups are stalled, and
-  // if so, fills |group| and |group_name| with data of the stalled group
-  // having highest priority.
-  bool FindTopStalledGroup(Group** group, std::string* group_name);
+  // if so (and if both |group| and |group_name| are not NULL), fills |group|
+  // and |group_name| with data of the stalled group having highest priority.
+  bool FindTopStalledGroup(Group** group, std::string* group_name) const;
 
   // Called when timer_ fires.  This method scans the idle sockets removing
   // sockets that timed out or can't be reused.
@@ -511,13 +530,6 @@
   static void LogBoundConnectJobToRequest(
       const NetLog::Source& connect_job_source, const Request* request);
 
-  // Closes one idle socket.  Picks the first one encountered.
-  // TODO(willchan): Consider a better algorithm for doing this.  Perhaps we
-  // should keep an ordered list of idle sockets, and close them in order.
-  // Requires maintaining more state.  It's not clear if it's worth it since
-  // I'm not sure if we hit this situation often.
-  void CloseOneIdleSocket();
-
   // Same as CloseOneIdleSocket() except it won't close an idle socket in
   // |group|.  If |group| is NULL, it is ignored.  Returns true if it closed a
   // socket.
@@ -582,6 +594,8 @@
   // make sure that they are discarded rather than reused.
   int pool_generation_number_;
 
+  std::set<LayeredPool*> higher_layer_pools_;
+
   base::WeakPtrFactory<ClientSocketPoolBaseHelper> weak_factory_;
 
   DISALLOW_COPY_AND_ASSIGN(ClientSocketPoolBaseHelper);
@@ -648,6 +662,13 @@
   virtual ~ClientSocketPoolBase() {}
 
   // These member functions simply forward to ClientSocketPoolBaseHelper.
+  void AddLayeredPool(LayeredPool* pool) {
+    helper_.AddLayeredPool(pool);
+  }
+
+  void RemoveLayeredPool(LayeredPool* pool) {
+    helper_.RemoveLayeredPool(pool);
+  }
 
   // RequestSocket bundles up the parameters into a Request and then forwards to
   // ClientSocketPoolBaseHelper::RequestSocket().
@@ -692,6 +713,10 @@
     return helper_.ReleaseSocket(group_name, socket, id);
   }
 
+  void Flush() { helper_.Flush(); }
+
+  bool IsStalled() const { return helper_.IsStalled(); }
+
   void CloseIdleSockets() { return helper_.CloseIdleSockets(); }
 
   int idle_socket_count() const { return helper_.idle_socket_count(); }
@@ -740,7 +765,11 @@
 
   void EnableConnectBackupJobs() { helper_.EnableConnectBackupJobs(); }
 
-  void Flush() { helper_.Flush(); }
+  bool CloseOneIdleSocket() { return helper_.CloseOneIdleSocket(); }
+
+  bool CloseOneIdleConnectionInLayeredPool() {
+    return helper_.CloseOneIdleConnectionInLayeredPool();
+  }
 
  private:
   // This adaptor class exists to bridge the
diff --git a/net/socket/client_socket_pool_base_unittest.cc b/net/socket/client_socket_pool_base_unittest.cc
index 20cf1d3..099a1c6 100644
--- a/net/socket/client_socket_pool_base_unittest.cc
+++ b/net/socket/client_socket_pool_base_unittest.cc
@@ -4,6 +4,8 @@
 
 #include "net/socket/client_socket_pool_base.h"
 
+#include <vector>
+
 #include "base/bind.h"
 #include "base/bind_helpers.h"
 #include "base/callback.h"
@@ -28,8 +30,12 @@
 #include "net/socket/socket_test_util.h"
 #include "net/socket/ssl_host_info.h"
 #include "net/socket/stream_socket.h"
+#include "testing/gmock/include/gmock/gmock.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
+using ::testing::Invoke;
+using ::testing::Return;
+
 namespace net {
 
 namespace {
@@ -40,10 +46,18 @@
 
 class TestSocketParams : public base::RefCounted<TestSocketParams> {
  public:
-  bool ignore_limits() { return false; }
+  TestSocketParams() : ignore_limits_(false) {}
+
+  void set_ignore_limits(bool ignore_limits) {
+    ignore_limits_ = ignore_limits;
+  }
+  bool ignore_limits() { return ignore_limits_; }
+
  private:
   friend class base::RefCounted<TestSocketParams>;
   ~TestSocketParams() {}
+
+  bool ignore_limits_;
 };
 typedef ClientSocketPoolBase<TestSocketParams> TestClientSocketPoolBase;
 
@@ -358,12 +372,18 @@
  public:
   explicit TestConnectJobFactory(MockClientSocketFactory* client_socket_factory)
       : job_type_(TestConnectJob::kMockJob),
+        job_types_(NULL),
         client_socket_factory_(client_socket_factory) {}
 
   virtual ~TestConnectJobFactory() {}
 
   void set_job_type(TestConnectJob::JobType job_type) { job_type_ = job_type; }
 
+  void set_job_types(std::list<TestConnectJob::JobType>* job_types) {
+    job_types_ = job_types;
+    CHECK(!job_types_->empty());
+  }
+
   void set_timeout_duration(base::TimeDelta timeout_duration) {
     timeout_duration_ = timeout_duration;
   }
@@ -374,7 +394,13 @@
       const std::string& group_name,
       const TestClientSocketPoolBase::Request& request,
       ConnectJob::Delegate* delegate) const {
-    return new TestConnectJob(job_type_,
+    EXPECT_TRUE(!job_types_ || !job_types_->empty());
+    TestConnectJob::JobType job_type = job_type_;
+    if (job_types_ && !job_types_->empty()) {
+      job_type = job_types_->front();
+      job_types_->pop_front();
+    }
+    return new TestConnectJob(job_type,
                               group_name,
                               request,
                               timeout_duration_,
@@ -389,6 +415,7 @@
 
  private:
   TestConnectJob::JobType job_type_;
+  std::list<TestConnectJob::JobType>* job_types_;
   base::TimeDelta timeout_duration_;
   MockClientSocketFactory* const client_socket_factory_;
 
@@ -450,6 +477,10 @@
     base_.Flush();
   }
 
+  virtual bool IsStalled() const OVERRIDE {
+    return base_.IsStalled();
+  }
+
   virtual void CloseIdleSockets() OVERRIDE {
     base_.CloseIdleSockets();
   }
@@ -469,6 +500,14 @@
     return base_.GetLoadState(group_name, handle);
   }
 
+  virtual void AddLayeredPool(LayeredPool* pool) OVERRIDE {
+    base_.AddLayeredPool(pool);
+  }
+
+  virtual void RemoveLayeredPool(LayeredPool* pool) OVERRIDE {
+    base_.RemoveLayeredPool(pool);
+  }
+
   virtual DictionaryValue* GetInfoAsValue(
       const std::string& name,
       const std::string& type,
@@ -502,6 +541,10 @@
 
   void EnableConnectBackupJobs() { base_.EnableConnectBackupJobs(); }
 
+  bool CloseOneIdleConnectionInLayeredPool() {
+    return base_.CloseOneIdleConnectionInLayeredPool();
+  }
+
  private:
   TestClientSocketPoolBase base_;
 
@@ -1167,6 +1210,7 @@
   ClientSocketHandle stalled_handle;
   TestCompletionCallback callback;
   {
+    EXPECT_FALSE(pool_->IsStalled());
     ClientSocketHandle handles[kDefaultMaxSockets];
     for (int i = 0; i < kDefaultMaxSockets; ++i) {
       TestCompletionCallback callback;
@@ -1181,6 +1225,7 @@
 
     EXPECT_EQ(kDefaultMaxSockets, client_socket_factory_.allocation_count());
     EXPECT_EQ(0, pool_->IdleSocketCount());
+    EXPECT_FALSE(pool_->IsStalled());
 
     // Now we will hit the socket limit.
     EXPECT_EQ(ERR_IO_PENDING, stalled_handle.Init("foo",
@@ -1189,6 +1234,7 @@
                                                   callback.callback(),
                                                   pool_.get(),
                                                   BoundNetLog()));
+    EXPECT_TRUE(pool_->IsStalled());
 
     // Dropping out of scope will close all handles and return them to idle.
   }
@@ -2008,9 +2054,9 @@
   EXPECT_EQ(1, handle2.socket()->Write(NULL, 1, CompletionCallback()));
   handle2.Reset();
 
-  // The idle socket timeout value was set to 10 milliseconds. Wait 20
+  // The idle socket timeout value was set to 10 milliseconds. Wait 100
   // milliseconds so the sockets timeout.
-  base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(20));
+  base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(100));
   MessageLoop::current()->RunAllPending();
 
   ASSERT_EQ(2, pool_->IdleSocketCount());
@@ -3042,6 +3088,7 @@
 
   ASSERT_TRUE(pool_->HasGroup("a"));
   EXPECT_EQ(kDefaultMaxSockets - 1, pool_->NumConnectJobsInGroup("a"));
+  EXPECT_FALSE(pool_->IsStalled());
 
   ASSERT_FALSE(pool_->HasGroup("b"));
 
@@ -3050,6 +3097,7 @@
 
   ASSERT_TRUE(pool_->HasGroup("b"));
   EXPECT_EQ(1, pool_->NumConnectJobsInGroup("b"));
+  EXPECT_FALSE(pool_->IsStalled());
 }
 
 TEST_F(ClientSocketPoolBaseTest, RequestSocketsCountIdleSockets) {
diff --git a/net/socket/socks_client_socket_pool.cc b/net/socket/socks_client_socket_pool.cc
index ae963bfe..a7c7ecb 100644
--- a/net/socket/socks_client_socket_pool.cc
+++ b/net/socket/socks_client_socket_pool.cc
@@ -199,9 +199,16 @@
             new SOCKSConnectJobFactory(transport_pool,
                                        host_resolver,
                                        net_log)) {
+  // We should always have a |transport_pool_| except in unit tests.
+  if (transport_pool_)
+    transport_pool_->AddLayeredPool(this);
 }
 
-SOCKSClientSocketPool::~SOCKSClientSocketPool() {}
+SOCKSClientSocketPool::~SOCKSClientSocketPool() {
+  // We should always have a |transport_pool_| except in unit tests.
+  if (transport_pool_)
+    transport_pool_->RemoveLayeredPool(this);
+}
 
 int SOCKSClientSocketPool::RequestSocket(
     const std::string& group_name, const void* socket_params,
@@ -239,6 +246,10 @@
   base_.Flush();
 }
 
+bool SOCKSClientSocketPool::IsStalled() const {
+  return base_.IsStalled() || transport_pool_->IsStalled();
+}
+
 void SOCKSClientSocketPool::CloseIdleSockets() {
   base_.CloseIdleSockets();
 }
@@ -257,6 +268,14 @@
   return base_.GetLoadState(group_name, handle);
 }
 
+void SOCKSClientSocketPool::AddLayeredPool(LayeredPool* layered_pool) {
+  base_.AddLayeredPool(layered_pool);
+}
+
+void SOCKSClientSocketPool::RemoveLayeredPool(LayeredPool* layered_pool) {
+  base_.RemoveLayeredPool(layered_pool);
+}
+
 DictionaryValue* SOCKSClientSocketPool::GetInfoAsValue(
     const std::string& name,
     const std::string& type,
@@ -280,4 +299,10 @@
   return base_.histograms();
 };
 
+bool SOCKSClientSocketPool::CloseOneIdleConnection() {
+  if (base_.CloseOneIdleSocket())
+    return true;
+  return base_.CloseOneIdleConnectionInLayeredPool();
+}
+
 }  // namespace net
diff --git a/net/socket/socks_client_socket_pool.h b/net/socket/socks_client_socket_pool.h
index 3fc7df6..8583d96 100644
--- a/net/socket/socks_client_socket_pool.h
+++ b/net/socket/socks_client_socket_pool.h
@@ -105,7 +105,8 @@
   DISALLOW_COPY_AND_ASSIGN(SOCKSConnectJob);
 };
 
-class NET_EXPORT_PRIVATE SOCKSClientSocketPool : public ClientSocketPool {
+class NET_EXPORT_PRIVATE SOCKSClientSocketPool
+    : public ClientSocketPool, public LayeredPool {
  public:
   SOCKSClientSocketPool(
       int max_sockets,
@@ -139,6 +140,8 @@
 
   virtual void Flush() OVERRIDE;
 
+  virtual bool IsStalled() const OVERRIDE;
+
   virtual void CloseIdleSockets() OVERRIDE;
 
   virtual int IdleSocketCount() const OVERRIDE;
@@ -150,6 +153,10 @@
       const std::string& group_name,
       const ClientSocketHandle* handle) const OVERRIDE;
 
+  virtual void AddLayeredPool(LayeredPool* layered_pool) OVERRIDE;
+
+  virtual void RemoveLayeredPool(LayeredPool* layered_pool) OVERRIDE;
+
   virtual base::DictionaryValue* GetInfoAsValue(
       const std::string& name,
       const std::string& type,
@@ -159,6 +166,9 @@
 
   virtual ClientSocketPoolHistograms* histograms() const OVERRIDE;
 
+  // LayeredPool implementation.
+  virtual bool CloseOneIdleConnection() OVERRIDE;
+
  private:
   typedef ClientSocketPoolBase<SOCKSSocketParams> PoolBase;
 
diff --git a/net/socket/ssl_client_socket_pool.cc b/net/socket/ssl_client_socket_pool.cc
index fb2973f4c..7cdf2f8d 100644
--- a/net/socket/ssl_client_socket_pool.cc
+++ b/net/socket/ssl_client_socket_pool.cc
@@ -478,9 +478,21 @@
       ssl_config_service_(ssl_config_service) {
   if (ssl_config_service_)
     ssl_config_service_->AddObserver(this);
+  if (transport_pool_)
+    transport_pool_->AddLayeredPool(this);
+  if (socks_pool_)
+    socks_pool_->AddLayeredPool(this);
+  if (http_proxy_pool_)
+    http_proxy_pool_->AddLayeredPool(this);
 }
 
 SSLClientSocketPool::~SSLClientSocketPool() {
+  if (http_proxy_pool_)
+    http_proxy_pool_->RemoveLayeredPool(this);
+  if (socks_pool_)
+    socks_pool_->RemoveLayeredPool(this);
+  if (transport_pool_)
+    transport_pool_->RemoveLayeredPool(this);
   if (ssl_config_service_)
     ssl_config_service_->RemoveObserver(this);
 }
@@ -533,6 +545,13 @@
   base_.Flush();
 }
 
+bool SSLClientSocketPool::IsStalled() const {
+  return base_.IsStalled() ||
+      (transport_pool_ && transport_pool_->IsStalled()) ||
+      (socks_pool_ && socks_pool_->IsStalled()) ||
+      (http_proxy_pool_ && http_proxy_pool_->IsStalled());
+}
+
 void SSLClientSocketPool::CloseIdleSockets() {
   base_.CloseIdleSockets();
 }
@@ -551,6 +570,14 @@
   return base_.GetLoadState(group_name, handle);
 }
 
+void SSLClientSocketPool::AddLayeredPool(LayeredPool* layered_pool) {
+  base_.AddLayeredPool(layered_pool);
+}
+
+void SSLClientSocketPool::RemoveLayeredPool(LayeredPool* layered_pool) {
+  base_.RemoveLayeredPool(layered_pool);
+}
+
 DictionaryValue* SSLClientSocketPool::GetInfoAsValue(
     const std::string& name,
     const std::string& type,
@@ -590,4 +617,10 @@
   Flush();
 }
 
+bool SSLClientSocketPool::CloseOneIdleConnection() {
+  if (base_.CloseOneIdleSocket())
+    return true;
+  return base_.CloseOneIdleConnectionInLayeredPool();
+}
+
 }  // namespace net
diff --git a/net/socket/ssl_client_socket_pool.h b/net/socket/ssl_client_socket_pool.h
index d80ace97..26e5f56 100644
--- a/net/socket/ssl_client_socket_pool.h
+++ b/net/socket/ssl_client_socket_pool.h
@@ -166,6 +166,7 @@
 
 class NET_EXPORT_PRIVATE SSLClientSocketPool
     : public ClientSocketPool,
+      public LayeredPool,
       public SSLConfigService::Observer {
  public:
   // Only the pools that will be used are required. i.e. if you never
@@ -211,6 +212,8 @@
 
   virtual void Flush() OVERRIDE;
 
+  virtual bool IsStalled() const OVERRIDE;
+
   virtual void CloseIdleSockets() OVERRIDE;
 
   virtual int IdleSocketCount() const OVERRIDE;
@@ -222,6 +225,10 @@
       const std::string& group_name,
       const ClientSocketHandle* handle) const OVERRIDE;
 
+  virtual void AddLayeredPool(LayeredPool* layered_pool) OVERRIDE;
+
+  virtual void RemoveLayeredPool(LayeredPool* layered_pool) OVERRIDE;
+
   virtual base::DictionaryValue* GetInfoAsValue(
       const std::string& name,
       const std::string& type,
@@ -231,6 +238,9 @@
 
   virtual ClientSocketPoolHistograms* histograms() const OVERRIDE;
 
+  // LayeredPool implementation.
+  virtual bool CloseOneIdleConnection() OVERRIDE;
+
  private:
   typedef ClientSocketPoolBase<SSLSocketParams> PoolBase;
 
diff --git a/net/socket/transport_client_socket_pool.cc b/net/socket/transport_client_socket_pool.cc
index d11099f..a00e8101 100644
--- a/net/socket/transport_client_socket_pool.cc
+++ b/net/socket/transport_client_socket_pool.cc
@@ -449,6 +449,10 @@
   base_.Flush();
 }
 
+bool TransportClientSocketPool::IsStalled() const {
+  return base_.IsStalled();
+}
+
 void TransportClientSocketPool::CloseIdleSockets() {
   base_.CloseIdleSockets();
 }
@@ -467,6 +471,14 @@
   return base_.GetLoadState(group_name, handle);
 }
 
+void TransportClientSocketPool::AddLayeredPool(LayeredPool* layered_pool) {
+  base_.AddLayeredPool(layered_pool);
+}
+
+void TransportClientSocketPool::RemoveLayeredPool(LayeredPool* layered_pool) {
+  base_.RemoveLayeredPool(layered_pool);
+}
+
 DictionaryValue* TransportClientSocketPool::GetInfoAsValue(
     const std::string& name,
     const std::string& type,
diff --git a/net/socket/transport_client_socket_pool.h b/net/socket/transport_client_socket_pool.h
index a4b4143d9..ef3f6fa 100644
--- a/net/socket/transport_client_socket_pool.h
+++ b/net/socket/transport_client_socket_pool.h
@@ -157,6 +157,7 @@
                              StreamSocket* socket,
                              int id) OVERRIDE;
   virtual void Flush() OVERRIDE;
+  virtual bool IsStalled() const OVERRIDE;
   virtual void CloseIdleSockets() OVERRIDE;
   virtual int IdleSocketCount() const OVERRIDE;
   virtual int IdleSocketCountInGroup(
@@ -164,6 +165,8 @@
   virtual LoadState GetLoadState(
       const std::string& group_name,
       const ClientSocketHandle* handle) const OVERRIDE;
+  virtual void AddLayeredPool(LayeredPool* layered_pool) OVERRIDE;
+  virtual void RemoveLayeredPool(LayeredPool* layered_pool) OVERRIDE;
   virtual base::DictionaryValue* GetInfoAsValue(
       const std::string& name,
       const std::string& type,