blob: cf2ff7e28d0fbc618fcb689d30056266354e7b98 [file] [log] [blame]
[email protected]93fe75162012-02-09 21:51:311// Copyright (c) 2012 The Chromium Authors. All rights reserved.
[email protected]f40156002011-11-22 21:19:082// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/http/mock_http_cache.h"
6
Josh Karlin79689072018-02-26 19:00:367#include <algorithm>
avid0181f32015-12-10 19:41:478#include <limits>
Bence Béky8f9d7d3952017-10-09 19:58:049#include <memory>
dchengc7eeda422015-12-26 03:56:4810#include <utility>
avid0181f32015-12-10 19:41:4711
[email protected]f40156002011-11-22 21:19:0812#include "base/bind.h"
Bence Békybc1de292018-02-01 15:48:0313#include "base/callback_helpers.h"
skyostil4891b25b2015-06-11 11:43:4514#include "base/location.h"
15#include "base/single_thread_task_runner.h"
gabf767595f2016-05-11 18:50:3516#include "base/threading/thread_task_runner_handle.h"
[email protected]f40156002011-11-22 21:19:0817#include "net/base/net_errors.h"
Shivani Sharmac18f9762017-10-23 16:43:2318#include "net/http/http_cache_writers.h"
[email protected]f40156002011-11-22 21:19:0819#include "testing/gtest/include/gtest/gtest.h"
20
ttuttle859dc7a2015-04-23 19:42:2921namespace net {
22
[email protected]f40156002011-11-22 21:19:0823namespace {
24
asankab5fb4b42016-06-29 11:06:0825// During testing, we are going to limit the size of a cache entry to this many
26// bytes using DCHECKs in order to prevent a test from causing unbounded memory
27// growth. In practice cache entry shouldn't come anywhere near this limit for
28// tests that use the mock cache. If they do, that's likely a problem with the
29// test. If a test requires using massive cache entries, they should use a real
30// cache backend instead.
31const int kMaxMockCacheEntrySize = 100 * 1000 * 1000;
32
[email protected]42c459632011-12-17 02:20:2333// We can override the test mode for a given operation by setting this global
34// variable.
35int g_test_mode = 0;
36
[email protected]f40156002011-11-22 21:19:0837int GetTestModeForEntry(const std::string& key) {
Josh Karlinf3caee1642018-12-10 22:54:2738 std::string url = key;
39
[email protected]f40156002011-11-22 21:19:0840 // 'key' is prefixed with an identifier if it corresponds to a cached POST.
41 // Skip past that to locate the actual URL.
42 //
43 // TODO(darin): It breaks the abstraction a bit that we assume 'key' is an
44 // URL corresponding to a registered MockTransaction. It would be good to
45 // have another way to access the test_mode.
[email protected]f40156002011-11-22 21:19:0846 if (isdigit(key[0])) {
47 size_t slash = key.find('/');
48 DCHECK(slash != std::string::npos);
Josh Karlinf3caee1642018-12-10 22:54:2749 url = url.substr(slash + 1);
[email protected]f40156002011-11-22 21:19:0850 }
Josh Karlinf3caee1642018-12-10 22:54:2751
52 // If we split the cache by top frame origin, then the origin is prepended to
53 // the key. Skip to the second url in the key.
54 if (base::StartsWith(url, "_dk_", base::CompareCase::SENSITIVE)) {
Shivani Sharmad81bdd42019-05-23 17:19:5655 auto const pos = url.find(" http");
Josh Karlinf3caee1642018-12-10 22:54:2756 url = url.substr(pos + 1);
57 }
58
59 const MockTransaction* t = FindMockTransaction(GURL(url));
[email protected]f40156002011-11-22 21:19:0860 DCHECK(t);
61 return t->test_mode;
62}
63
[email protected]f40156002011-11-22 21:19:0864} // namespace
65
66//-----------------------------------------------------------------------------
67
68struct MockDiskEntry::CallbackInfo {
69 scoped_refptr<MockDiskEntry> entry;
Maks Orlovich4acf0b582018-06-07 11:39:4470 net::CompletionOnceCallback callback;
[email protected]f40156002011-11-22 21:19:0871 int result;
72};
73
[email protected]f40156002011-11-22 21:19:0874MockDiskEntry::MockDiskEntry(const std::string& key)
rvargas9ba24452015-07-18 03:13:2575 : key_(key),
morloviche63bba92017-08-30 16:28:4476 in_memory_data_(0),
Maks Orlovich780531d2018-11-12 19:51:0577 max_file_size_(std::numeric_limits<int>::max()),
rvargas9ba24452015-07-18 03:13:2578 doomed_(false),
79 sparse_(false),
80 fail_requests_(false),
81 fail_sparse_requests_(false),
82 busy_(false),
83 delayed_(false),
shivanisha2b6e7ee2017-07-14 20:32:0284 cancel_(false),
85 defer_op_(DEFER_NONE),
86 resume_return_code_(0) {
[email protected]f40156002011-11-22 21:19:0887 test_mode_ = GetTestModeForEntry(key);
88}
89
90void MockDiskEntry::Doom() {
91 doomed_ = true;
92}
93
94void MockDiskEntry::Close() {
95 Release();
96}
97
98std::string MockDiskEntry::GetKey() const {
[email protected]f40156002011-11-22 21:19:0899 return key_;
100}
101
102base::Time MockDiskEntry::GetLastUsed() const {
jkarlin91ae3602016-05-25 14:05:38103 return base::Time::Now();
[email protected]f40156002011-11-22 21:19:08104}
105
106base::Time MockDiskEntry::GetLastModified() const {
jkarlin91ae3602016-05-25 14:05:38107 return base::Time::Now();
[email protected]f40156002011-11-22 21:19:08108}
109
avid0181f32015-12-10 19:41:47110int32_t MockDiskEntry::GetDataSize(int index) const {
[email protected]f40156002011-11-22 21:19:08111 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
avid0181f32015-12-10 19:41:47112 return static_cast<int32_t>(data_[index].size());
[email protected]f40156002011-11-22 21:19:08113}
114
ttuttle859dc7a2015-04-23 19:42:29115int MockDiskEntry::ReadData(int index,
116 int offset,
117 IOBuffer* buf,
118 int buf_len,
Maks Orlovich4acf0b582018-06-07 11:39:44119 CompletionOnceCallback callback) {
[email protected]f40156002011-11-22 21:19:08120 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
[email protected]2a65aceb82011-12-19 20:59:27121 DCHECK(!callback.is_null());
[email protected]f40156002011-11-22 21:19:08122
123 if (fail_requests_)
ttuttle859dc7a2015-04-23 19:42:29124 return ERR_CACHE_READ_FAILURE;
[email protected]f40156002011-11-22 21:19:08125
126 if (offset < 0 || offset > static_cast<int>(data_[index].size()))
ttuttle859dc7a2015-04-23 19:42:29127 return ERR_FAILED;
[email protected]f40156002011-11-22 21:19:08128 if (static_cast<size_t>(offset) == data_[index].size())
129 return 0;
130
131 int num = std::min(buf_len, static_cast<int>(data_[index].size()) - offset);
132 memcpy(buf->data(), &data_[index][offset], num);
133
134 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
135 return num;
136
shivanisha2b6e7ee2017-07-14 20:32:02137 // Pause and resume.
138 if (defer_op_ == DEFER_READ) {
139 defer_op_ = DEFER_NONE;
Maks Orlovich4acf0b582018-06-07 11:39:44140 resume_callback_ = std::move(callback);
shivanisha2b6e7ee2017-07-14 20:32:02141 resume_return_code_ = num;
142 return ERR_IO_PENDING;
143 }
144
Maks Orlovich4acf0b582018-06-07 11:39:44145 CallbackLater(std::move(callback), num);
ttuttle859dc7a2015-04-23 19:42:29146 return ERR_IO_PENDING;
[email protected]f40156002011-11-22 21:19:08147}
148
shivanisha2b6e7ee2017-07-14 20:32:02149void MockDiskEntry::ResumeDiskEntryOperation() {
150 DCHECK(!resume_callback_.is_null());
Maks Orlovich4acf0b582018-06-07 11:39:44151 CallbackLater(std::move(resume_callback_), resume_return_code_);
shivanisha2b6e7ee2017-07-14 20:32:02152 resume_return_code_ = 0;
153}
154
ttuttle859dc7a2015-04-23 19:42:29155int MockDiskEntry::WriteData(int index,
156 int offset,
157 IOBuffer* buf,
158 int buf_len,
Maks Orlovich4acf0b582018-06-07 11:39:44159 CompletionOnceCallback callback,
ttuttle859dc7a2015-04-23 19:42:29160 bool truncate) {
[email protected]f40156002011-11-22 21:19:08161 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
[email protected]2a65aceb82011-12-19 20:59:27162 DCHECK(!callback.is_null());
[email protected]f40156002011-11-22 21:19:08163 DCHECK(truncate);
164
165 if (fail_requests_) {
Maks Orlovich4acf0b582018-06-07 11:39:44166 CallbackLater(std::move(callback), ERR_CACHE_READ_FAILURE);
ttuttle859dc7a2015-04-23 19:42:29167 return ERR_IO_PENDING;
[email protected]f40156002011-11-22 21:19:08168 }
169
170 if (offset < 0 || offset > static_cast<int>(data_[index].size()))
ttuttle859dc7a2015-04-23 19:42:29171 return ERR_FAILED;
[email protected]f40156002011-11-22 21:19:08172
asankab5fb4b42016-06-29 11:06:08173 DCHECK_LT(offset + buf_len, kMaxMockCacheEntrySize);
Maks Orlovich780531d2018-11-12 19:51:05174 if (offset + buf_len > max_file_size_ && index == 1)
175 return net::ERR_FAILED;
176
[email protected]f40156002011-11-22 21:19:08177 data_[index].resize(offset + buf_len);
178 if (buf_len)
179 memcpy(&data_[index][offset], buf->data(), buf_len);
180
181 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
182 return buf_len;
183
Josh Karlin79689072018-02-26 19:00:36184 if (defer_op_ == DEFER_WRITE) {
185 defer_op_ = DEFER_NONE;
Maks Orlovich4acf0b582018-06-07 11:39:44186 resume_callback_ = std::move(callback);
Josh Karlin79689072018-02-26 19:00:36187 resume_return_code_ = buf_len;
188 return ERR_IO_PENDING;
189 }
190
Maks Orlovich4acf0b582018-06-07 11:39:44191 CallbackLater(std::move(callback), buf_len);
ttuttle859dc7a2015-04-23 19:42:29192 return ERR_IO_PENDING;
[email protected]f40156002011-11-22 21:19:08193}
194
avid0181f32015-12-10 19:41:47195int MockDiskEntry::ReadSparseData(int64_t offset,
ttuttle859dc7a2015-04-23 19:42:29196 IOBuffer* buf,
197 int buf_len,
Maks Orlovich4acf0b582018-06-07 11:39:44198 CompletionOnceCallback callback) {
[email protected]2a65aceb82011-12-19 20:59:27199 DCHECK(!callback.is_null());
[email protected]954bbe42013-08-30 12:38:39200 if (fail_sparse_requests_)
ttuttle859dc7a2015-04-23 19:42:29201 return ERR_NOT_IMPLEMENTED;
rvargas9ba24452015-07-18 03:13:25202 if (!sparse_ || busy_ || cancel_)
ttuttle859dc7a2015-04-23 19:42:29203 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
[email protected]f40156002011-11-22 21:19:08204 if (offset < 0)
ttuttle859dc7a2015-04-23 19:42:29205 return ERR_FAILED;
[email protected]f40156002011-11-22 21:19:08206
207 if (fail_requests_)
ttuttle859dc7a2015-04-23 19:42:29208 return ERR_CACHE_READ_FAILURE;
[email protected]f40156002011-11-22 21:19:08209
avid0181f32015-12-10 19:41:47210 DCHECK(offset < std::numeric_limits<int32_t>::max());
[email protected]f40156002011-11-22 21:19:08211 int real_offset = static_cast<int>(offset);
212 if (!buf_len)
213 return 0;
214
215 int num = std::min(static_cast<int>(data_[1].size()) - real_offset,
216 buf_len);
217 memcpy(buf->data(), &data_[1][real_offset], num);
218
219 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
220 return num;
221
Maks Orlovich4acf0b582018-06-07 11:39:44222 CallbackLater(std::move(callback), num);
[email protected]f40156002011-11-22 21:19:08223 busy_ = true;
224 delayed_ = false;
ttuttle859dc7a2015-04-23 19:42:29225 return ERR_IO_PENDING;
[email protected]f40156002011-11-22 21:19:08226}
227
avid0181f32015-12-10 19:41:47228int MockDiskEntry::WriteSparseData(int64_t offset,
ttuttle859dc7a2015-04-23 19:42:29229 IOBuffer* buf,
[email protected]f40156002011-11-22 21:19:08230 int buf_len,
Maks Orlovich4acf0b582018-06-07 11:39:44231 CompletionOnceCallback callback) {
[email protected]2a65aceb82011-12-19 20:59:27232 DCHECK(!callback.is_null());
[email protected]954bbe42013-08-30 12:38:39233 if (fail_sparse_requests_)
ttuttle859dc7a2015-04-23 19:42:29234 return ERR_NOT_IMPLEMENTED;
rvargas9ba24452015-07-18 03:13:25235 if (busy_ || cancel_)
ttuttle859dc7a2015-04-23 19:42:29236 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
[email protected]f40156002011-11-22 21:19:08237 if (!sparse_) {
238 if (data_[1].size())
ttuttle859dc7a2015-04-23 19:42:29239 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
[email protected]f40156002011-11-22 21:19:08240 sparse_ = true;
241 }
242 if (offset < 0)
ttuttle859dc7a2015-04-23 19:42:29243 return ERR_FAILED;
[email protected]f40156002011-11-22 21:19:08244 if (!buf_len)
245 return 0;
246
247 if (fail_requests_)
ttuttle859dc7a2015-04-23 19:42:29248 return ERR_CACHE_READ_FAILURE;
[email protected]f40156002011-11-22 21:19:08249
avid0181f32015-12-10 19:41:47250 DCHECK(offset < std::numeric_limits<int32_t>::max());
[email protected]f40156002011-11-22 21:19:08251 int real_offset = static_cast<int>(offset);
252
asankab5fb4b42016-06-29 11:06:08253 if (static_cast<int>(data_[1].size()) < real_offset + buf_len) {
254 DCHECK_LT(real_offset + buf_len, kMaxMockCacheEntrySize);
[email protected]f40156002011-11-22 21:19:08255 data_[1].resize(real_offset + buf_len);
asankab5fb4b42016-06-29 11:06:08256 }
[email protected]f40156002011-11-22 21:19:08257
258 memcpy(&data_[1][real_offset], buf->data(), buf_len);
259 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
260 return buf_len;
261
Maks Orlovich4acf0b582018-06-07 11:39:44262 CallbackLater(std::move(callback), buf_len);
ttuttle859dc7a2015-04-23 19:42:29263 return ERR_IO_PENDING;
[email protected]f40156002011-11-22 21:19:08264}
265
avid0181f32015-12-10 19:41:47266int MockDiskEntry::GetAvailableRange(int64_t offset,
ttuttle859dc7a2015-04-23 19:42:29267 int len,
avid0181f32015-12-10 19:41:47268 int64_t* start,
Maks Orlovich4acf0b582018-06-07 11:39:44269 CompletionOnceCallback callback) {
[email protected]2a65aceb82011-12-19 20:59:27270 DCHECK(!callback.is_null());
rvargas9ba24452015-07-18 03:13:25271 if (!sparse_ || busy_ || cancel_)
ttuttle859dc7a2015-04-23 19:42:29272 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
[email protected]f40156002011-11-22 21:19:08273 if (offset < 0)
ttuttle859dc7a2015-04-23 19:42:29274 return ERR_FAILED;
[email protected]f40156002011-11-22 21:19:08275
276 if (fail_requests_)
ttuttle859dc7a2015-04-23 19:42:29277 return ERR_CACHE_READ_FAILURE;
[email protected]f40156002011-11-22 21:19:08278
279 *start = offset;
avid0181f32015-12-10 19:41:47280 DCHECK(offset < std::numeric_limits<int32_t>::max());
[email protected]f40156002011-11-22 21:19:08281 int real_offset = static_cast<int>(offset);
282 if (static_cast<int>(data_[1].size()) < real_offset)
283 return 0;
284
285 int num = std::min(static_cast<int>(data_[1].size()) - real_offset, len);
286 int count = 0;
287 for (; num > 0; num--, real_offset++) {
288 if (!count) {
289 if (data_[1][real_offset]) {
290 count++;
291 *start = real_offset;
292 }
293 } else {
294 if (!data_[1][real_offset])
295 break;
296 count++;
297 }
298 }
299 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
300 return count;
301
Maks Orlovich4acf0b582018-06-07 11:39:44302 CallbackLater(std::move(callback), count);
ttuttle859dc7a2015-04-23 19:42:29303 return ERR_IO_PENDING;
[email protected]f40156002011-11-22 21:19:08304}
305
306bool MockDiskEntry::CouldBeSparse() const {
[email protected]954bbe42013-08-30 12:38:39307 if (fail_sparse_requests_)
308 return false;
[email protected]f40156002011-11-22 21:19:08309 return sparse_;
310}
311
312void MockDiskEntry::CancelSparseIO() {
313 cancel_ = true;
314}
315
Victor Costan45c36ac2018-10-08 07:31:52316net::Error MockDiskEntry::ReadyForSparseIO(CompletionOnceCallback callback) {
[email protected]954bbe42013-08-30 12:38:39317 if (fail_sparse_requests_)
ttuttle859dc7a2015-04-23 19:42:29318 return ERR_NOT_IMPLEMENTED;
[email protected]f40156002011-11-22 21:19:08319 if (!cancel_)
ttuttle859dc7a2015-04-23 19:42:29320 return OK;
[email protected]f40156002011-11-22 21:19:08321
322 cancel_ = false;
[email protected]2a65aceb82011-12-19 20:59:27323 DCHECK(!callback.is_null());
[email protected]f40156002011-11-22 21:19:08324 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
ttuttle859dc7a2015-04-23 19:42:29325 return OK;
[email protected]f40156002011-11-22 21:19:08326
[email protected]2a65aceb82011-12-19 20:59:27327 // The pending operation is already in the message loop (and hopefully
[email protected]f40156002011-11-22 21:19:08328 // already in the second pass). Just notify the caller that it finished.
Maks Orlovich4acf0b582018-06-07 11:39:44329 CallbackLater(std::move(callback), 0);
ttuttle859dc7a2015-04-23 19:42:29330 return ERR_IO_PENDING;
[email protected]f40156002011-11-22 21:19:08331}
332
Jay Civelli78612bf2018-03-01 20:59:12333void MockDiskEntry::SetLastUsedTimeForTest(base::Time time) {
334 NOTREACHED();
335}
336
[email protected]f40156002011-11-22 21:19:08337// If |value| is true, don't deliver any completion callbacks until called
338// again with |value| set to false. Caution: remember to enable callbacks
339// again or all subsequent tests will fail.
340// Static.
341void MockDiskEntry::IgnoreCallbacks(bool value) {
342 if (ignore_callbacks_ == value)
343 return;
344 ignore_callbacks_ = value;
345 if (!value)
Raul Tambre94493c652019-03-11 17:18:35346 StoreAndDeliverCallbacks(false, nullptr, CompletionOnceCallback(), 0);
[email protected]f40156002011-11-22 21:19:08347}
348
Chris Watkins7a41d3552017-12-01 02:13:27349MockDiskEntry::~MockDiskEntry() = default;
[email protected]f40156002011-11-22 21:19:08350
351// Unlike the callbacks for MockHttpTransaction, we want this one to run even
352// if the consumer called Close on the MockDiskEntry. We achieve that by
353// leveraging the fact that this class is reference counted.
Maks Orlovich4acf0b582018-06-07 11:39:44354void MockDiskEntry::CallbackLater(CompletionOnceCallback callback, int result) {
[email protected]2c5c9d52011-12-07 14:15:36355 if (ignore_callbacks_)
Maks Orlovich4acf0b582018-06-07 11:39:44356 return StoreAndDeliverCallbacks(true, this, std::move(callback), result);
skyostil4891b25b2015-06-11 11:43:45357 base::ThreadTaskRunnerHandle::Get()->PostTask(
Maks Orlovich4acf0b582018-06-07 11:39:44358 FROM_HERE, base::BindOnce(&MockDiskEntry::RunCallback, this,
359 std::move(callback), result));
[email protected]f40156002011-11-22 21:19:08360}
361
Maks Orlovich4acf0b582018-06-07 11:39:44362void MockDiskEntry::RunCallback(CompletionOnceCallback callback, int result) {
[email protected]f40156002011-11-22 21:19:08363 if (busy_) {
364 // This is kind of hacky, but controlling the behavior of just this entry
365 // from a test is sort of complicated. What we really want to do is
366 // delay the delivery of a sparse IO operation a little more so that the
367 // request start operation (async) will finish without seeing the end of
368 // this operation (already posted to the message loop)... and without
369 // just delaying for n mS (which may cause trouble with slow bots). So
370 // we re-post this operation (all async sparse IO operations will take two
[email protected]2a65aceb82011-12-19 20:59:27371 // trips through the message loop instead of one).
[email protected]f40156002011-11-22 21:19:08372 if (!delayed_) {
373 delayed_ = true;
Maks Orlovich4acf0b582018-06-07 11:39:44374 return CallbackLater(std::move(callback), result);
[email protected]f40156002011-11-22 21:19:08375 }
376 }
377 busy_ = false;
Maks Orlovich4acf0b582018-06-07 11:39:44378 std::move(callback).Run(result);
[email protected]f40156002011-11-22 21:19:08379}
380
381// When |store| is true, stores the callback to be delivered later; otherwise
382// delivers any callback previously stored.
383// Static.
ttuttle859dc7a2015-04-23 19:42:29384void MockDiskEntry::StoreAndDeliverCallbacks(bool store,
385 MockDiskEntry* entry,
Maks Orlovich4acf0b582018-06-07 11:39:44386 CompletionOnceCallback callback,
ttuttle859dc7a2015-04-23 19:42:29387 int result) {
[email protected]f40156002011-11-22 21:19:08388 static std::vector<CallbackInfo> callback_list;
389 if (store) {
Maks Orlovich4acf0b582018-06-07 11:39:44390 CallbackInfo c = {entry, std::move(callback), result};
391 callback_list.push_back(std::move(c));
[email protected]f40156002011-11-22 21:19:08392 } else {
[email protected]2c5c9d52011-12-07 14:15:36393 for (size_t i = 0; i < callback_list.size(); i++) {
[email protected]f40156002011-11-22 21:19:08394 CallbackInfo& c = callback_list[i];
Maks Orlovich4acf0b582018-06-07 11:39:44395 c.entry->CallbackLater(std::move(c.callback), c.result);
[email protected]f40156002011-11-22 21:19:08396 }
397 callback_list.clear();
398 }
399}
400
401// Statics.
[email protected]f40156002011-11-22 21:19:08402bool MockDiskEntry::ignore_callbacks_ = false;
403
404//-----------------------------------------------------------------------------
405
[email protected]f40156002011-11-22 21:19:08406MockDiskCache::MockDiskCache()
Steven Bingler252ffae2019-03-19 17:43:16407 : Backend(DISK_CACHE),
408 open_count_(0),
shivanisha36a816e2017-07-05 22:27:45409 create_count_(0),
410 doomed_count_(0),
Maks Orlovich780531d2018-11-12 19:51:05411 max_file_size_(std::numeric_limits<int>::max()),
shivanisha36a816e2017-07-05 22:27:45412 fail_requests_(false),
413 soft_failures_(false),
Maks Orlovich6a1ba1812018-11-02 00:41:31414 soft_failures_one_instance_(false),
shivanisha36a816e2017-07-05 22:27:45415 double_create_check_(true),
shivanisha2b6e7ee2017-07-14 20:32:02416 fail_sparse_requests_(false),
morloviche63bba92017-08-30 16:28:44417 support_in_memory_entry_data_(true),
Steven Binglerb6917f32019-02-09 00:37:24418 force_fail_callback_later_(false),
shivanisha2b6e7ee2017-07-14 20:32:02419 defer_op_(MockDiskEntry::DEFER_NONE),
420 resume_return_code_(0) {}
[email protected]f40156002011-11-22 21:19:08421
422MockDiskCache::~MockDiskCache() {
423 ReleaseAll();
424}
425
avid0181f32015-12-10 19:41:47426int32_t MockDiskCache::GetEntryCount() const {
427 return static_cast<int32_t>(entries_.size());
[email protected]f40156002011-11-22 21:19:08428}
429
Steven Bingler38603e52019-02-19 22:37:01430net::Error MockDiskCache::OpenOrCreateEntry(
431 const std::string& key,
432 net::RequestPriority request_priority,
433 disk_cache::EntryWithOpened* entry_struct,
434 CompletionOnceCallback callback) {
435 DCHECK(!callback.is_null());
436 base::RepeatingCallback<void(int)> copyable_callback;
437 if (callback)
438 copyable_callback = base::AdaptCallbackForRepeating(std::move(callback));
439
440 if (force_fail_callback_later_) {
441 CallbackLater(copyable_callback, ERR_CACHE_OPEN_OR_CREATE_FAILURE);
442 return ERR_IO_PENDING;
443 }
444
445 if (fail_requests_)
446 return ERR_CACHE_OPEN_OR_CREATE_FAILURE;
447
448 disk_cache::Entry** entry = &(entry_struct->entry);
449
450 // First try opening the entry.
451 entry_struct->opened = true;
452 net::Error rv = OpenEntry(key, request_priority, entry, copyable_callback);
453 if (rv == OK || rv == ERR_IO_PENDING)
454 return rv;
455
456 // Unable to open, try creating the entry.
457 entry_struct->opened = false;
458 rv = CreateEntry(key, request_priority, entry, copyable_callback);
459 if (rv == OK || rv == ERR_IO_PENDING)
460 return rv;
461
462 return ERR_CACHE_OPEN_OR_CREATE_FAILURE;
463}
464
Victor Costan45c36ac2018-10-08 07:31:52465net::Error MockDiskCache::OpenEntry(const std::string& key,
466 net::RequestPriority request_priority,
467 disk_cache::Entry** entry,
468 CompletionOnceCallback callback) {
[email protected]2a65aceb82011-12-19 20:59:27469 DCHECK(!callback.is_null());
Steven Binglerb6917f32019-02-09 00:37:24470 if (force_fail_callback_later_) {
471 CallbackLater(std::move(callback), ERR_CACHE_OPEN_FAILURE);
472 return ERR_IO_PENDING;
473 }
474
[email protected]f40156002011-11-22 21:19:08475 if (fail_requests_)
ttuttle859dc7a2015-04-23 19:42:29476 return ERR_CACHE_OPEN_FAILURE;
[email protected]f40156002011-11-22 21:19:08477
jdoerrie22a91d8b92018-10-05 08:43:26478 auto it = entries_.find(key);
[email protected]f40156002011-11-22 21:19:08479 if (it == entries_.end())
ttuttle859dc7a2015-04-23 19:42:29480 return ERR_CACHE_OPEN_FAILURE;
[email protected]f40156002011-11-22 21:19:08481
482 if (it->second->is_doomed()) {
483 it->second->Release();
484 entries_.erase(it);
ttuttle859dc7a2015-04-23 19:42:29485 return ERR_CACHE_OPEN_FAILURE;
[email protected]f40156002011-11-22 21:19:08486 }
487
488 open_count_++;
489
490 it->second->AddRef();
491 *entry = it->second;
492
Maks Orlovich6a1ba1812018-11-02 00:41:31493 if (soft_failures_ || soft_failures_one_instance_) {
[email protected]f40156002011-11-22 21:19:08494 it->second->set_fail_requests();
Maks Orlovich6a1ba1812018-11-02 00:41:31495 soft_failures_one_instance_ = false;
496 }
[email protected]f40156002011-11-22 21:19:08497
Maks Orlovich780531d2018-11-12 19:51:05498 it->second->set_max_file_size(max_file_size_);
499
[email protected]f40156002011-11-22 21:19:08500 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
ttuttle859dc7a2015-04-23 19:42:29501 return OK;
[email protected]f40156002011-11-22 21:19:08502
Maks Orlovich4acf0b582018-06-07 11:39:44503 CallbackLater(std::move(callback), OK);
ttuttle859dc7a2015-04-23 19:42:29504 return ERR_IO_PENDING;
[email protected]f40156002011-11-22 21:19:08505}
506
Victor Costan45c36ac2018-10-08 07:31:52507net::Error MockDiskCache::CreateEntry(const std::string& key,
508 net::RequestPriority request_priority,
509 disk_cache::Entry** entry,
510 CompletionOnceCallback callback) {
[email protected]2a65aceb82011-12-19 20:59:27511 DCHECK(!callback.is_null());
Steven Binglerb6917f32019-02-09 00:37:24512 if (force_fail_callback_later_) {
513 CallbackLater(std::move(callback), ERR_CACHE_CREATE_FAILURE);
514 return ERR_IO_PENDING;
515 }
516
[email protected]f40156002011-11-22 21:19:08517 if (fail_requests_)
ttuttle859dc7a2015-04-23 19:42:29518 return ERR_CACHE_CREATE_FAILURE;
[email protected]f40156002011-11-22 21:19:08519
jdoerrie22a91d8b92018-10-05 08:43:26520 auto it = entries_.find(key);
[email protected]f40156002011-11-22 21:19:08521 if (it != entries_.end()) {
[email protected]6a019de2011-11-30 19:57:29522 if (!it->second->is_doomed()) {
523 if (double_create_check_)
524 NOTREACHED();
525 else
ttuttle859dc7a2015-04-23 19:42:29526 return ERR_CACHE_CREATE_FAILURE;
[email protected]6a019de2011-11-30 19:57:29527 }
[email protected]f40156002011-11-22 21:19:08528 it->second->Release();
529 entries_.erase(it);
530 }
531
532 create_count_++;
533
534 MockDiskEntry* new_entry = new MockDiskEntry(key);
535
536 new_entry->AddRef();
537 entries_[key] = new_entry;
538
539 new_entry->AddRef();
540 *entry = new_entry;
541
Maks Orlovich6a1ba1812018-11-02 00:41:31542 if (soft_failures_ || soft_failures_one_instance_) {
[email protected]f40156002011-11-22 21:19:08543 new_entry->set_fail_requests();
Maks Orlovich6a1ba1812018-11-02 00:41:31544 soft_failures_one_instance_ = false;
545 }
[email protected]f40156002011-11-22 21:19:08546
[email protected]954bbe42013-08-30 12:38:39547 if (fail_sparse_requests_)
548 new_entry->set_fail_sparse_requests();
549
Maks Orlovich780531d2018-11-12 19:51:05550 new_entry->set_max_file_size(max_file_size_);
551
[email protected]f40156002011-11-22 21:19:08552 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
ttuttle859dc7a2015-04-23 19:42:29553 return OK;
[email protected]f40156002011-11-22 21:19:08554
shivanisha2b6e7ee2017-07-14 20:32:02555 // Pause and resume.
556 if (defer_op_ == MockDiskEntry::DEFER_CREATE) {
557 defer_op_ = MockDiskEntry::DEFER_NONE;
Maks Orlovich4acf0b582018-06-07 11:39:44558 resume_callback_ = std::move(callback);
shivanisha2b6e7ee2017-07-14 20:32:02559 resume_return_code_ = OK;
560 return ERR_IO_PENDING;
561 }
562
Maks Orlovich4acf0b582018-06-07 11:39:44563 CallbackLater(std::move(callback), OK);
ttuttle859dc7a2015-04-23 19:42:29564 return ERR_IO_PENDING;
[email protected]f40156002011-11-22 21:19:08565}
566
Victor Costan45c36ac2018-10-08 07:31:52567net::Error MockDiskCache::DoomEntry(const std::string& key,
568 net::RequestPriority request_priority,
569 CompletionOnceCallback callback) {
[email protected]42c459632011-12-17 02:20:23570 DCHECK(!callback.is_null());
Steven Binglerb6917f32019-02-09 00:37:24571 if (force_fail_callback_later_) {
572 CallbackLater(std::move(callback), ERR_CACHE_DOOM_FAILURE);
573 return ERR_IO_PENDING;
574 }
575
576 if (fail_requests_)
577 return ERR_CACHE_DOOM_FAILURE;
578
jdoerrie22a91d8b92018-10-05 08:43:26579 auto it = entries_.find(key);
[email protected]f40156002011-11-22 21:19:08580 if (it != entries_.end()) {
581 it->second->Release();
582 entries_.erase(it);
shivanisha36a816e2017-07-05 22:27:45583 doomed_count_++;
[email protected]f40156002011-11-22 21:19:08584 }
585
586 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
ttuttle859dc7a2015-04-23 19:42:29587 return OK;
[email protected]f40156002011-11-22 21:19:08588
Maks Orlovich4acf0b582018-06-07 11:39:44589 CallbackLater(std::move(callback), OK);
ttuttle859dc7a2015-04-23 19:42:29590 return ERR_IO_PENDING;
[email protected]f40156002011-11-22 21:19:08591}
592
Victor Costan45c36ac2018-10-08 07:31:52593net::Error MockDiskCache::DoomAllEntries(CompletionOnceCallback callback) {
ttuttle859dc7a2015-04-23 19:42:29594 return ERR_NOT_IMPLEMENTED;
[email protected]dbe1ffe2011-12-12 19:18:56595}
596
Victor Costan45c36ac2018-10-08 07:31:52597net::Error MockDiskCache::DoomEntriesBetween(const base::Time initial_time,
598 const base::Time end_time,
599 CompletionOnceCallback callback) {
ttuttle859dc7a2015-04-23 19:42:29600 return ERR_NOT_IMPLEMENTED;
[email protected]dbe1ffe2011-12-12 19:18:56601}
602
Victor Costan45c36ac2018-10-08 07:31:52603net::Error MockDiskCache::DoomEntriesSince(const base::Time initial_time,
604 CompletionOnceCallback callback) {
ttuttle859dc7a2015-04-23 19:42:29605 return ERR_NOT_IMPLEMENTED;
[email protected]f40156002011-11-22 21:19:08606}
607
Ben Kelly5a16ed02018-09-07 21:28:09608int64_t MockDiskCache::CalculateSizeOfAllEntries(
609 Int64CompletionOnceCallback callback) {
msramekaee01ceb2015-10-07 14:23:33610 return ERR_NOT_IMPLEMENTED;
611}
612
gavinpb1aaa052014-09-24 14:54:35613class MockDiskCache::NotImplementedIterator : public Iterator {
614 public:
Victor Costan45c36ac2018-10-08 07:31:52615 net::Error OpenNextEntry(disk_cache::Entry** next_entry,
616 CompletionOnceCallback callback) override {
ttuttle859dc7a2015-04-23 19:42:29617 return ERR_NOT_IMPLEMENTED;
gavinpb1aaa052014-09-24 14:54:35618 }
619};
[email protected]275dbf42011-12-12 20:41:01620
danakj1fd259a02016-04-16 03:17:09621std::unique_ptr<disk_cache::Backend::Iterator> MockDiskCache::CreateIterator() {
622 return std::unique_ptr<Iterator>(new NotImplementedIterator());
[email protected]f40156002011-11-22 21:19:08623}
624
payal.pandey90b81372015-04-30 06:53:04625void MockDiskCache::GetStats(base::StringPairs* stats) {
[email protected]f40156002011-11-22 21:19:08626}
627
628void MockDiskCache::OnExternalCacheHit(const std::string& key) {
Steven Bingler674fb0822018-12-12 18:04:52629 external_cache_hits_.push_back(key);
[email protected]f40156002011-11-22 21:19:08630}
631
jkarlin1f2ef1772017-03-28 19:04:38632size_t MockDiskCache::DumpMemoryStats(
633 base::trace_event::ProcessMemoryDump* pmd,
634 const std::string& parent_absolute_name) const {
635 return 0u;
xunjielia0166f42017-02-23 17:44:57636}
637
morloviche63bba92017-08-30 16:28:44638uint8_t MockDiskCache::GetEntryInMemoryData(const std::string& key) {
639 if (!support_in_memory_entry_data_)
640 return 0;
641
jdoerrie22a91d8b92018-10-05 08:43:26642 auto it = entries_.find(key);
morloviche63bba92017-08-30 16:28:44643 if (it != entries_.end())
644 return it->second->in_memory_data();
645 return 0;
646}
647
648void MockDiskCache::SetEntryInMemoryData(const std::string& key, uint8_t data) {
jdoerrie22a91d8b92018-10-05 08:43:26649 auto it = entries_.find(key);
morloviche63bba92017-08-30 16:28:44650 if (it != entries_.end())
651 it->second->set_in_memory_data(data);
652}
653
Maks Orlovich780531d2018-11-12 19:51:05654int64_t MockDiskCache::MaxFileSize() const {
655 return max_file_size_;
656}
657
[email protected]f40156002011-11-22 21:19:08658void MockDiskCache::ReleaseAll() {
shivanisha2b6e7ee2017-07-14 20:32:02659 for (auto entry : entries_)
660 entry.second->Release();
[email protected]f40156002011-11-22 21:19:08661 entries_.clear();
662}
663
Maks Orlovich4acf0b582018-06-07 11:39:44664void MockDiskCache::CallbackLater(CompletionOnceCallback callback, int result) {
skyostil4891b25b2015-06-11 11:43:45665 base::ThreadTaskRunnerHandle::Get()->PostTask(
Maks Orlovich4acf0b582018-06-07 11:39:44666 FROM_HERE, base::BindOnce(std::move(callback), result));
[email protected]f40156002011-11-22 21:19:08667}
668
shivanisha8061c4202017-06-13 23:35:52669bool MockDiskCache::IsDiskEntryDoomed(const std::string& key) {
670 auto it = entries_.find(key);
shivanisha2b6e7ee2017-07-14 20:32:02671 if (it != entries_.end())
672 return it->second->is_doomed();
673
674 return false;
675}
676
677void MockDiskCache::ResumeCacheOperation() {
678 DCHECK(!resume_callback_.is_null());
Maks Orlovich4acf0b582018-06-07 11:39:44679 CallbackLater(std::move(resume_callback_), resume_return_code_);
shivanisha2b6e7ee2017-07-14 20:32:02680 resume_return_code_ = 0;
681}
682
683scoped_refptr<MockDiskEntry> MockDiskCache::GetDiskEntryRef(
684 const std::string& key) {
685 auto it = entries_.find(key);
shivanisha8061c4202017-06-13 23:35:52686 if (it == entries_.end())
shivanisha2b6e7ee2017-07-14 20:32:02687 return nullptr;
688 return it->second;
shivanisha8061c4202017-06-13 23:35:52689}
690
Steven Bingler674fb0822018-12-12 18:04:52691const std::vector<std::string>& MockDiskCache::GetExternalCacheHits() const {
692 return external_cache_hits_;
693}
694
[email protected]f40156002011-11-22 21:19:08695//-----------------------------------------------------------------------------
696
danakj1fd259a02016-04-16 03:17:09697int MockBackendFactory::CreateBackend(
698 NetLog* net_log,
699 std::unique_ptr<disk_cache::Backend>* backend,
Bence Békya55b3522018-06-11 15:58:52700 CompletionOnceCallback callback) {
[email protected]8c3f5a32013-08-01 11:57:53701 backend->reset(new MockDiskCache());
ttuttle859dc7a2015-04-23 19:42:29702 return OK;
[email protected]f40156002011-11-22 21:19:08703}
704
705//-----------------------------------------------------------------------------
706
zhongyifcd139742017-02-02 02:26:42707MockHttpCache::MockHttpCache() : MockHttpCache(false) {}
[email protected]f40156002011-11-22 21:19:08708
mmenkebc31a2c2015-10-29 13:44:45709MockHttpCache::MockHttpCache(
danakj1fd259a02016-04-16 03:17:09710 std::unique_ptr<HttpCache::BackendFactory> disk_cache_factory)
zhongyifcd139742017-02-02 02:26:42711 : MockHttpCache(std::move(disk_cache_factory), false) {}
712
zhongyi4928bd52017-02-08 02:16:27713MockHttpCache::MockHttpCache(bool is_main_cache)
Jeremy Roman0579ed62017-08-29 15:56:19714 : MockHttpCache(std::make_unique<MockBackendFactory>(), is_main_cache) {}
zhongyifcd139742017-02-02 02:26:42715
716MockHttpCache::MockHttpCache(
717 std::unique_ptr<HttpCache::BackendFactory> disk_cache_factory,
zhongyi4928bd52017-02-08 02:16:27718 bool is_main_cache)
Jeremy Roman0579ed62017-08-29 15:56:19719 : http_cache_(std::make_unique<MockNetworkLayer>(),
dchengc7eeda422015-12-26 03:56:48720 std::move(disk_cache_factory),
zhongyi4928bd52017-02-08 02:16:27721 is_main_cache) {}
[email protected]f40156002011-11-22 21:19:08722
pcc64237962015-02-25 03:32:53723disk_cache::Backend* MockHttpCache::backend() {
ttuttle859dc7a2015-04-23 19:42:29724 TestCompletionCallback cb;
[email protected]f40156002011-11-22 21:19:08725 disk_cache::Backend* backend;
[email protected]2a65aceb82011-12-19 20:59:27726 int rv = http_cache_.GetBackend(&backend, cb.callback());
[email protected]f40156002011-11-22 21:19:08727 rv = cb.GetResult(rv);
Raul Tambre94493c652019-03-11 17:18:35728 return (rv == OK) ? backend : nullptr;
pcc64237962015-02-25 03:32:53729}
730
731MockDiskCache* MockHttpCache::disk_cache() {
732 return static_cast<MockDiskCache*>(backend());
[email protected]f40156002011-11-22 21:19:08733}
734
danakj1fd259a02016-04-16 03:17:09735int MockHttpCache::CreateTransaction(std::unique_ptr<HttpTransaction>* trans) {
ttuttle859dc7a2015-04-23 19:42:29736 return http_cache_.CreateTransaction(DEFAULT_PRIORITY, trans);
[email protected]027bd85a2013-12-27 22:39:10737}
738
shivanisha3701cde02017-02-09 17:43:07739void MockHttpCache::SimulateCacheLockTimeout() {
shivanishabc3b71b2017-06-24 07:21:14740 http_cache_.SimulateCacheLockTimeoutForTesting();
741}
742
743void MockHttpCache::SimulateCacheLockTimeoutAfterHeaders() {
744 http_cache_.SimulateCacheLockTimeoutAfterHeadersForTesting();
[email protected]8aacaf382014-06-24 05:33:41745}
746
rvargas43dc8fd2015-01-07 23:03:25747void MockHttpCache::FailConditionalizations() {
748 http_cache_.FailConditionalizationForTest();
749}
750
[email protected]f40156002011-11-22 21:19:08751bool MockHttpCache::ReadResponseInfo(disk_cache::Entry* disk_entry,
ttuttle859dc7a2015-04-23 19:42:29752 HttpResponseInfo* response_info,
[email protected]f40156002011-11-22 21:19:08753 bool* response_truncated) {
754 int size = disk_entry->GetDataSize(0);
755
ttuttle859dc7a2015-04-23 19:42:29756 TestCompletionCallback cb;
Victor Costan9c7302b2018-08-27 16:39:44757 scoped_refptr<IOBuffer> buffer = base::MakeRefCounted<IOBuffer>(size);
[email protected]90499482013-06-01 00:39:50758 int rv = disk_entry->ReadData(0, 0, buffer.get(), size, cb.callback());
[email protected]f40156002011-11-22 21:19:08759 rv = cb.GetResult(rv);
760 EXPECT_EQ(size, rv);
761
ttuttle859dc7a2015-04-23 19:42:29762 return HttpCache::ParseResponseInfo(buffer->data(), size, response_info,
763 response_truncated);
[email protected]f40156002011-11-22 21:19:08764}
765
ttuttle859dc7a2015-04-23 19:42:29766bool MockHttpCache::WriteResponseInfo(disk_cache::Entry* disk_entry,
767 const HttpResponseInfo* response_info,
768 bool skip_transient_headers,
769 bool response_truncated) {
brettwbd4d7112015-06-03 04:29:25770 base::Pickle pickle;
[email protected]f40156002011-11-22 21:19:08771 response_info->Persist(
772 &pickle, skip_transient_headers, response_truncated);
773
ttuttle859dc7a2015-04-23 19:42:29774 TestCompletionCallback cb;
Victor Costan9c7302b2018-08-27 16:39:44775 scoped_refptr<WrappedIOBuffer> data = base::MakeRefCounted<WrappedIOBuffer>(
776 reinterpret_cast<const char*>(pickle.data()));
[email protected]f40156002011-11-22 21:19:08777 int len = static_cast<int>(pickle.size());
778
[email protected]90499482013-06-01 00:39:50779 int rv = disk_entry->WriteData(0, 0, data.get(), len, cb.callback(), true);
[email protected]f40156002011-11-22 21:19:08780 rv = cb.GetResult(rv);
781 return (rv == len);
782}
783
784bool MockHttpCache::OpenBackendEntry(const std::string& key,
785 disk_cache::Entry** entry) {
ttuttle859dc7a2015-04-23 19:42:29786 TestCompletionCallback cb;
Josh Karlindd9a5d142018-06-06 00:35:48787 int rv = backend()->OpenEntry(key, net::HIGHEST, entry, cb.callback());
ttuttle859dc7a2015-04-23 19:42:29788 return (cb.GetResult(rv) == OK);
[email protected]f40156002011-11-22 21:19:08789}
790
791bool MockHttpCache::CreateBackendEntry(const std::string& key,
792 disk_cache::Entry** entry,
ttuttle859dc7a2015-04-23 19:42:29793 NetLog* net_log) {
794 TestCompletionCallback cb;
Josh Karlindd9a5d142018-06-06 00:35:48795 int rv = backend()->CreateEntry(key, net::HIGHEST, entry, cb.callback());
ttuttle859dc7a2015-04-23 19:42:29796 return (cb.GetResult(rv) == OK);
[email protected]f40156002011-11-22 21:19:08797}
798
799// Static.
800int MockHttpCache::GetTestMode(int test_mode) {
801 if (!g_test_mode)
802 return test_mode;
803
804 return g_test_mode;
805}
806
807// Static.
808void MockHttpCache::SetTestMode(int test_mode) {
809 g_test_mode = test_mode;
810}
811
shivanisha8061c4202017-06-13 23:35:52812bool MockHttpCache::IsWriterPresent(const std::string& key) {
813 HttpCache::ActiveEntry* entry = http_cache_.FindActiveEntry(key);
Shivani Sharmac18f9762017-10-23 16:43:23814 return entry && entry->writers && !entry->writers->IsEmpty();
shivanisha8061c4202017-06-13 23:35:52815}
816
817bool MockHttpCache::IsHeadersTransactionPresent(const std::string& key) {
818 HttpCache::ActiveEntry* entry = http_cache_.FindActiveEntry(key);
Shivani Sharmac18f9762017-10-23 16:43:23819 return entry && entry->headers_transaction;
shivanisha8061c4202017-06-13 23:35:52820}
821
822int MockHttpCache::GetCountReaders(const std::string& key) {
823 HttpCache::ActiveEntry* entry = http_cache_.FindActiveEntry(key);
Shivani Sharmaedd8dd282017-10-30 14:04:45824 return entry ? entry->readers.size() : 0;
shivanisha8061c4202017-06-13 23:35:52825}
826
827int MockHttpCache::GetCountAddToEntryQueue(const std::string& key) {
828 HttpCache::ActiveEntry* entry = http_cache_.FindActiveEntry(key);
Shivani Sharmaedd8dd282017-10-30 14:04:45829 return entry ? entry->add_to_entry_queue.size() : 0;
shivanisha8061c4202017-06-13 23:35:52830}
831
832int MockHttpCache::GetCountDoneHeadersQueue(const std::string& key) {
833 HttpCache::ActiveEntry* entry = http_cache_.FindActiveEntry(key);
Shivani Sharmaedd8dd282017-10-30 14:04:45834 return entry ? entry->done_headers_queue.size() : 0;
835}
836
837int MockHttpCache::GetCountWriterTransactions(const std::string& key) {
838 HttpCache::ActiveEntry* entry = http_cache_.FindActiveEntry(key);
839 return entry && entry->writers ? entry->writers->GetTransactionsCount() : 0;
shivanisha8061c4202017-06-13 23:35:52840}
841
[email protected]f40156002011-11-22 21:19:08842//-----------------------------------------------------------------------------
843
Victor Costan45c36ac2018-10-08 07:31:52844net::Error MockDiskCacheNoCB::CreateEntry(const std::string& key,
845 net::RequestPriority request_priority,
846 disk_cache::Entry** entry,
847 CompletionOnceCallback callback) {
ttuttle859dc7a2015-04-23 19:42:29848 return ERR_IO_PENDING;
[email protected]f40156002011-11-22 21:19:08849}
850
851//-----------------------------------------------------------------------------
852
[email protected]2a65aceb82011-12-19 20:59:27853int MockBackendNoCbFactory::CreateBackend(
ttuttle859dc7a2015-04-23 19:42:29854 NetLog* net_log,
danakj1fd259a02016-04-16 03:17:09855 std::unique_ptr<disk_cache::Backend>* backend,
Bence Békya55b3522018-06-11 15:58:52856 CompletionOnceCallback callback) {
[email protected]8c3f5a32013-08-01 11:57:53857 backend->reset(new MockDiskCacheNoCB());
ttuttle859dc7a2015-04-23 19:42:29858 return OK;
[email protected]f40156002011-11-22 21:19:08859}
860
861//-----------------------------------------------------------------------------
862
863MockBlockingBackendFactory::MockBlockingBackendFactory()
Raul Tambre94493c652019-03-11 17:18:35864 : backend_(nullptr), block_(true), fail_(false) {}
[email protected]2a65aceb82011-12-19 20:59:27865
Chris Watkins7a41d3552017-12-01 02:13:27866MockBlockingBackendFactory::~MockBlockingBackendFactory() = default;
[email protected]f40156002011-11-22 21:19:08867
868int MockBlockingBackendFactory::CreateBackend(
ttuttle859dc7a2015-04-23 19:42:29869 NetLog* net_log,
danakj1fd259a02016-04-16 03:17:09870 std::unique_ptr<disk_cache::Backend>* backend,
Bence Békya55b3522018-06-11 15:58:52871 CompletionOnceCallback callback) {
[email protected]f40156002011-11-22 21:19:08872 if (!block_) {
873 if (!fail_)
[email protected]8c3f5a32013-08-01 11:57:53874 backend->reset(new MockDiskCache());
[email protected]f40156002011-11-22 21:19:08875 return Result();
876 }
877
878 backend_ = backend;
Bence Békya55b3522018-06-11 15:58:52879 callback_ = std::move(callback);
ttuttle859dc7a2015-04-23 19:42:29880 return ERR_IO_PENDING;
[email protected]f40156002011-11-22 21:19:08881}
882
883void MockBlockingBackendFactory::FinishCreation() {
884 block_ = false;
[email protected]2a65aceb82011-12-19 20:59:27885 if (!callback_.is_null()) {
[email protected]f40156002011-11-22 21:19:08886 if (!fail_)
[email protected]8c3f5a32013-08-01 11:57:53887 backend_->reset(new MockDiskCache());
Bence Békybc1de292018-02-01 15:48:03888 // Running the callback might delete |this|.
Daniel Chengcac5f4c62019-04-25 22:39:27889 std::move(callback_).Run(Result());
[email protected]f40156002011-11-22 21:19:08890 }
891}
ttuttle859dc7a2015-04-23 19:42:29892
893} // namespace net