Remove HigherLayeredPool logic from ClientSocketPoolBase.
Previously, socket pools could stack on top of each otherwise, with each
socket in one pool owning a socket in the lower pool. When a lower layer
pool had no available socket slots and a pending request, it would call
into the next layer up and ask it to close a socket. Now that socket
pools are no longer layered on top of each other, socket pools no longer
have to have the logic to act as the higher layered pool.
This leaves the code in place for socket pools to act as a lower layer
pool, however. H2 connections can still sit on top of socket pools, and
each session is treated as a higher layer "pool" when a socket pool is
stalled at its global socket limit.
Bug: 472729
Change-Id: I3bbec857166a46a86b07d9ecb96cc77022a7f5ce
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1496490
Reviewed-by: Asanka Herath <[email protected]>
Commit-Queue: Matt Menke <[email protected]>
Cr-Commit-Position: refs/heads/master@{#637831}
diff --git a/net/socket/client_socket_pool_base_unittest.cc b/net/socket/client_socket_pool_base_unittest.cc
index 16e52842..0edabe39 100644
--- a/net/socket/client_socket_pool_base_unittest.cc
+++ b/net/socket/client_socket_pool_base_unittest.cc
@@ -616,8 +616,7 @@
base::TimeDelta unused_idle_socket_timeout,
base::TimeDelta used_idle_socket_timeout,
TestClientSocketPoolBase::ConnectJobFactory* connect_job_factory)
- : base_(NULL,
- max_sockets,
+ : base_(max_sockets,
max_sockets_per_group,
unused_idle_socket_timeout,
used_idle_socket_timeout,
@@ -699,8 +698,7 @@
std::unique_ptr<base::DictionaryValue> GetInfoAsValue(
const std::string& name,
- const std::string& type,
- bool include_nested_pools) const override {
+ const std::string& type) const override {
return base_.GetInfoAsValue(name, type);
}
@@ -737,10 +735,6 @@
void EnableConnectBackupJobs() { base_.EnableConnectBackupJobs(); }
- bool CloseOneIdleConnectionInHigherLayeredPool() {
- return base_.CloseOneIdleConnectionInHigherLayeredPool();
- }
-
private:
TestClientSocketPoolBase base_;
@@ -4383,29 +4377,6 @@
bool can_release_connection_;
};
-TEST_F(ClientSocketPoolBaseTest, FailToCloseIdleSocketsNotHeldByLayeredPool) {
- CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup);
- connect_job_factory_->set_job_type(TestConnectJob::kMockJob);
-
- MockLayeredPool mock_layered_pool(pool_.get(), "foo");
- EXPECT_THAT(mock_layered_pool.RequestSocket(pool_.get()), IsOk());
- EXPECT_CALL(mock_layered_pool, CloseOneIdleConnection())
- .WillOnce(Return(false));
- EXPECT_FALSE(pool_->CloseOneIdleConnectionInHigherLayeredPool());
-}
-
-TEST_F(ClientSocketPoolBaseTest, ForciblyCloseIdleSocketsHeldByLayeredPool) {
- CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup);
- connect_job_factory_->set_job_type(TestConnectJob::kMockJob);
-
- MockLayeredPool mock_layered_pool(pool_.get(), "foo");
- EXPECT_THAT(mock_layered_pool.RequestSocket(pool_.get()), IsOk());
- EXPECT_CALL(mock_layered_pool, CloseOneIdleConnection())
- .WillOnce(Invoke(&mock_layered_pool,
- &MockLayeredPool::ReleaseOneConnection));
- EXPECT_TRUE(pool_->CloseOneIdleConnectionInHigherLayeredPool());
-}
-
// Tests the basic case of closing an idle socket in a higher layered pool when
// a new request is issued and the lower layer pool is stalled.
TEST_F(ClientSocketPoolBaseTest, CloseIdleSocketsHeldByLayeredPoolWhenNeeded) {
@@ -4428,6 +4399,31 @@
EXPECT_THAT(callback.WaitForResult(), IsOk());
}
+// Tests the case that trying to close an idle socket in a higher layered pool
+// fails.
+TEST_F(ClientSocketPoolBaseTest,
+ CloseIdleSocketsHeldByLayeredPoolWhenNeededFails) {
+ CreatePool(1, 1);
+ connect_job_factory_->set_job_type(TestConnectJob::kMockJob);
+
+ MockLayeredPool mock_layered_pool(pool_.get(), "foo");
+ mock_layered_pool.set_can_release_connection(false);
+ EXPECT_THAT(mock_layered_pool.RequestSocket(pool_.get()), IsOk());
+ EXPECT_CALL(mock_layered_pool, CloseOneIdleConnection())
+ .WillOnce(Invoke(&mock_layered_pool,
+ &MockLayeredPool::ReleaseOneConnection));
+ ClientSocketHandle handle;
+ TestCompletionCallback callback;
+ EXPECT_EQ(
+ ERR_IO_PENDING,
+ handle.Init("a", params_, DEFAULT_PRIORITY, SocketTag(),
+ ClientSocketPool::RespectLimits::ENABLED, callback.callback(),
+ ClientSocketPool::ProxyAuthCallback(), pool_.get(),
+ NetLogWithSource()));
+ base::RunLoop().RunUntilIdle();
+ EXPECT_FALSE(callback.have_result());
+}
+
// Same as above, but the idle socket is in the same group as the stalled
// socket, and closes the only other request in its group when closing requests
// in higher layered pools. This generally shouldn't happen, but it may be