Revert 113305 - Revert 113300 - Revert of 112134 of Revert 112130 - Close idle connections / SPDY sessions when needed.

Due to the idle connection state being held by different socket pools, it's possible for one socket pool to hold an idle socket in a lower layer socket pool. From the lower level socket pool's perspective, the socket is being "actively" used. From the higher socket pool's (including SpdySession, which is more of a connection manager) perspective, the connection is idle and can be closed if we have hit a limit.

Normally this isn't a big deal, except when we have a lot of idle SPDY connections and are connecting via a proxy, so we have low connection limits through the proxy server. We address this problem by allowing lower-level socket pools to tell higher level socket pools to close a socket.

Fixed ASAN test failures by removing .Times(1) and .Times(2) from CloseMultipleIdleSocketsHeldByLayeredPoolWhenNeeded unittest (this removes the tests relying on the order of std::set in CloseOneIdleConnectionInLayeredPool). ASAN is prob
ably causing the memory allocator to allocate the pools differently. The std::set is ordered by LayeredPool* which is the address of the LayeredPool (willchan).

Added NET_EXPORT for layered_pool class defintion to fix windows shared compile.

BUG=62364, 92244, 105839
TEST=none


Review URL: http://codereview.chromium.org/8340012

[email protected]
Review URL: http://codereview.chromium.org/8745007

[email protected]

Review URL: http://codereview.chromium.org/8803019

[email protected]
Committed: http://src.chromium.org/viewvc/chrome?view=rev&revision=113305

[email protected]
Review URL: http://codereview.chromium.org/8836002

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@113405 0039d316-1c4b-4281-b951-d872f2087c98
diff --git a/net/socket/client_socket_pool_base.cc b/net/socket/client_socket_pool_base.cc
index 4c1600ee..bb7eb06 100644
--- a/net/socket/client_socket_pool_base.cc
+++ b/net/socket/client_socket_pool_base.cc
@@ -207,6 +207,7 @@
   DCHECK(group_map_.empty());
   DCHECK(pending_callback_map_.empty());
   DCHECK_EQ(0, connecting_socket_count_);
+  DCHECK(higher_layer_pools_.empty());
 
   NetworkChangeNotifier::RemoveIPAddressObserver(this);
 }
@@ -236,6 +237,18 @@
   return req;
 }
 
+void ClientSocketPoolBaseHelper::AddLayeredPool(LayeredPool* pool) {
+  CHECK(pool);
+  CHECK(!ContainsKey(higher_layer_pools_, pool));
+  higher_layer_pools_.insert(pool);
+}
+
+void ClientSocketPoolBaseHelper::RemoveLayeredPool(LayeredPool* pool) {
+  CHECK(pool);
+  CHECK(ContainsKey(higher_layer_pools_, pool));
+  higher_layer_pools_.erase(pool);
+}
+
 int ClientSocketPoolBaseHelper::RequestSocket(
     const std::string& group_name,
     const Request* request) {
@@ -334,6 +347,10 @@
   // Can we make another active socket now?
   if (!group->HasAvailableSocketSlot(max_sockets_per_group_) &&
       !request->ignore_limits()) {
+    // TODO(willchan): Consider whether or not we need to close a socket in a
+    // higher layered group. I don't think this makes sense since we would just
+    // reuse that socket then if we needed one and wouldn't make it down to this
+    // layer.
     request->net_log().AddEvent(
         NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP, NULL);
     return ERR_IO_PENDING;
@@ -341,19 +358,28 @@
 
   if (ReachedMaxSocketsLimit() && !request->ignore_limits()) {
     if (idle_socket_count() > 0) {
+      // There's an idle socket in this pool. Either that's because there's
+      // still one in this group, but we got here due to preconnecting bypassing
+      // idle sockets, or because there's an idle socket in another group.
       bool closed = CloseOneIdleSocketExceptInGroup(group);
       if (preconnecting && !closed)
         return ERR_PRECONNECT_MAX_SOCKET_LIMIT;
     } else {
-      // We could check if we really have a stalled group here, but it requires
-      // a scan of all groups, so just flip a flag here, and do the check later.
-      request->net_log().AddEvent(
-          NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS, NULL);
-      return ERR_IO_PENDING;
+      do {
+        if (!CloseOneIdleConnectionInLayeredPool()) {
+          // We could check if we really have a stalled group here, but it
+          // requires a scan of all groups, so just flip a flag here, and do
+          // the check later.
+          request->net_log().AddEvent(
+              NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS, NULL);
+          return ERR_IO_PENDING;
+        }
+      } while (ReachedMaxSocketsLimit());
     }
   }
 
-  // We couldn't find a socket to reuse, so allocate and connect a new one.
+  // We couldn't find a socket to reuse, and there's space to allocate one,
+  // so allocate and connect a new one.
   scoped_ptr<ConnectJob> connect_job(
       connect_job_factory_->NewConnectJob(group_name, *request, this));
 
@@ -790,18 +816,22 @@
 // are not at the |max_sockets_per_group_| limit. Note: for requests with
 // the same priority, the winner is based on group hash ordering (and not
 // insertion order).
-bool ClientSocketPoolBaseHelper::FindTopStalledGroup(Group** group,
-                                                     std::string* group_name) {
+bool ClientSocketPoolBaseHelper::FindTopStalledGroup(
+    Group** group,
+    std::string* group_name) const {
+  CHECK((group && group_name) || (!group && !group_name));
   Group* top_group = NULL;
   const std::string* top_group_name = NULL;
   bool has_stalled_group = false;
-  for (GroupMap::iterator i = group_map_.begin();
+  for (GroupMap::const_iterator i = group_map_.begin();
        i != group_map_.end(); ++i) {
     Group* curr_group = i->second;
     const RequestQueue& queue = curr_group->pending_requests();
     if (queue.empty())
       continue;
     if (curr_group->IsStalled(max_sockets_per_group_)) {
+      if (!group)
+        return true;
       has_stalled_group = true;
       bool has_higher_priority = !top_group ||
           curr_group->TopPendingPriority() < top_group->TopPendingPriority();
@@ -813,8 +843,11 @@
   }
 
   if (top_group) {
+    CHECK(group);
     *group = top_group;
     *group_name = *top_group_name;
+  } else {
+    CHECK(!has_stalled_group);
   }
   return has_stalled_group;
 }
@@ -887,6 +920,17 @@
   AbortAllRequests();
 }
 
+bool ClientSocketPoolBaseHelper::IsStalled() const {
+  if ((handed_out_socket_count_ + connecting_socket_count_) < max_sockets_)
+    return false;
+  for (GroupMap::const_iterator it = group_map_.begin();
+       it != group_map_.end(); it++) {
+    if (it->second->IsStalled(max_sockets_per_group_))
+      return true;
+  }
+  return false;
+}
+
 void ClientSocketPoolBaseHelper::RemoveConnectJob(ConnectJob* job,
                                                   Group* group) {
   CHECK_GT(connecting_socket_count_, 0);
@@ -1023,8 +1067,10 @@
   return true;
 }
 
-void ClientSocketPoolBaseHelper::CloseOneIdleSocket() {
-  CloseOneIdleSocketExceptInGroup(NULL);
+bool ClientSocketPoolBaseHelper::CloseOneIdleSocket() {
+  if (idle_socket_count() == 0)
+    return false;
+  return CloseOneIdleSocketExceptInGroup(NULL);
 }
 
 bool ClientSocketPoolBaseHelper::CloseOneIdleSocketExceptInGroup(
@@ -1048,9 +1094,18 @@
     }
   }
 
-  if (!exception_group)
-    LOG(DFATAL) << "No idle socket found to close!.";
+  return false;
+}
 
+bool ClientSocketPoolBaseHelper::CloseOneIdleConnectionInLayeredPool() {
+  // This pool doesn't have any idle sockets. It's possible that a pool at a
+  // higher layer is holding one of this sockets active, but it's actually idle.
+  // Query the higher layers.
+  for (std::set<LayeredPool*>::const_iterator it = higher_layer_pools_.begin();
+       it != higher_layer_pools_.end(); ++it) {
+    if ((*it)->CloseOneIdleConnection())
+      return true;
+  }
   return false;
 }