blob: fa5018baf45c66bf03381aeb075946093b405e3f [file] [log] [blame]
initial.commit586acc5fe2008-07-26 22:42:521// Copyright 2008, Google Inc.
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// * Redistributions of source code must retain the above copyright
9// notice, this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above
11// copyright notice, this list of conditions and the following disclaimer
12// in the documentation and/or other materials provided with the
13// distribution.
14// * Neither the name of Google Inc. nor the names of its
15// contributors may be used to endorse or promote products derived from
16// this software without specific prior written permission.
17//
18// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30#include "net/disk_cache/mem_backend_impl.h"
31
[email protected]408d35f52008-08-13 18:30:2232#include "net/disk_cache/cache_util.h"
initial.commit586acc5fe2008-07-26 22:42:5233#include "net/disk_cache/mem_entry_impl.h"
34
35namespace {
36
37const int kDefaultCacheSize = 10 * 1024 * 1024;
38const int kCleanUpMargin = 1024 * 1024;
39
40int LowWaterAdjust(int high_water) {
41 if (high_water < kCleanUpMargin)
42 return 0;
43
44 return high_water - kCleanUpMargin;
45}
46
47} // namespace
48
49namespace disk_cache {
50
51Backend* CreateInMemoryCacheBackend(int max_bytes) {
52 MemBackendImpl* cache = new MemBackendImpl();
53 cache->SetMaxSize(max_bytes);
54 if (cache->Init())
55 return cache;
56
57 delete cache;
58 LOG(ERROR) << "Unable to create cache";
59 return NULL;
60}
61
62// ------------------------------------------------------------------------
63
64bool MemBackendImpl::Init() {
65 if (max_size_)
66 return true;
67
[email protected]408d35f52008-08-13 18:30:2268 int64 total_memory = GetSystemMemory();
69
70 if (total_memory < 0) {
initial.commit586acc5fe2008-07-26 22:42:5271 max_size_ = kDefaultCacheSize;
72 return true;
73 }
74
75 // We want to use up to 2% of the computer's memory, with a limit of 50 MB,
76 // reached on systemd with more than 2.5 GB of RAM.
[email protected]408d35f52008-08-13 18:30:2277 total_memory = total_memory * 2 / 100;
78 if (total_memory > kDefaultCacheSize * 5)
initial.commit586acc5fe2008-07-26 22:42:5279 max_size_ = kDefaultCacheSize * 5;
80 else
[email protected]408d35f52008-08-13 18:30:2281 max_size_ = static_cast<int32>(total_memory);
initial.commit586acc5fe2008-07-26 22:42:5282
83 return true;
84}
85
86MemBackendImpl::~MemBackendImpl() {
87 EntryMap::iterator it = entries_.begin();
88 while (it != entries_.end()) {
89 it->second->Doom();
90 it = entries_.begin();
91 }
92 DCHECK(!current_size_);
93}
94
95bool MemBackendImpl::SetMaxSize(int max_bytes) {
96 COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
97 if (max_bytes < 0)
98 return false;
99
100 // Zero size means use the default.
101 if (!max_bytes)
102 return true;
103
104 max_size_ = max_bytes;
105 return true;
106}
107
108int32 MemBackendImpl::GetEntryCount() const {
109 return static_cast<int32>(entries_.size());
110}
111
112bool MemBackendImpl::OpenEntry(const std::string& key, Entry** entry) {
113 EntryMap::iterator it = entries_.find(key);
114 if (it == entries_.end())
115 return false;
116
117 it->second->Open();
118
119 *entry = it->second;
120 return true;
121}
122
123bool MemBackendImpl::CreateEntry(const std::string& key, Entry** entry) {
124 EntryMap::iterator it = entries_.find(key);
125 if (it != entries_.end())
126 return false;
127
128 MemEntryImpl* cache_entry = new MemEntryImpl(this);
129 if (!cache_entry->CreateEntry(key)) {
130 delete entry;
131 return false;
132 }
133
134 rankings_.Insert(cache_entry);
135 entries_[key] = cache_entry;
136
137 *entry = cache_entry;
138 return true;
139}
140
141bool MemBackendImpl::DoomEntry(const std::string& key) {
142 Entry* entry;
143 if (!OpenEntry(key, &entry))
144 return false;
145
146 entry->Doom();
147 entry->Close();
148 return true;
149}
150
151void MemBackendImpl::InternalDoomEntry(MemEntryImpl* entry) {
152 rankings_.Remove(entry);
153 EntryMap::iterator it = entries_.find(entry->GetKey());
154 if (it != entries_.end())
155 entries_.erase(it);
156 else
157 NOTREACHED();
158
159 entry->InternalDoom();
160}
161
162bool MemBackendImpl::DoomAllEntries() {
163 TrimCache(true);
164 return true;
165}
166
167bool MemBackendImpl::DoomEntriesBetween(const Time initial_time,
168 const Time end_time) {
169 if (end_time.is_null())
170 return DoomEntriesSince(initial_time);
171
172 DCHECK(end_time >= initial_time);
173
174 MemEntryImpl* next = rankings_.GetNext(NULL);
175
176 // rankings_ is ordered by last used, this will descend through the cache
177 // and start dooming items before the end_time, and will stop once it reaches
178 // an item used before the initial time.
179 while (next) {
180 MemEntryImpl* node = next;
181 next = rankings_.GetNext(next);
182
183 if (node->GetLastUsed() < initial_time)
184 break;
185
186 if (node->GetLastUsed() < end_time) {
187 node->Doom();
188 }
189 }
190
191 return true;
192}
193
194// We use OpenNextEntry to retrieve elements from the cache, until we get
195// entries that are too old.
196bool MemBackendImpl::DoomEntriesSince(const Time initial_time) {
197 for (;;) {
198 Entry* entry;
199 void* iter = NULL;
200 if (!OpenNextEntry(&iter, &entry))
201 return true;
202
203 if (initial_time > entry->GetLastUsed()) {
204 entry->Close();
205 EndEnumeration(&iter);
206 return true;
207 }
208
209 entry->Doom();
210 entry->Close();
211 EndEnumeration(&iter); // Dooming the entry invalidates the iterator.
212 }
213}
214
215bool MemBackendImpl::OpenNextEntry(void** iter, Entry** next_entry) {
216 MemEntryImpl* current = reinterpret_cast<MemEntryImpl*>(*iter);
217 MemEntryImpl* node = rankings_.GetNext(current);
218 *next_entry = node;
219 *iter = node;
220
221 if (node)
222 node->Open();
223
224 return NULL != node;
225}
226
227void MemBackendImpl::EndEnumeration(void** iter) {
228 *iter = NULL;
229}
230
231void MemBackendImpl::TrimCache(bool empty) {
232 MemEntryImpl* next = rankings_.GetPrev(NULL);
233
234 DCHECK(next);
235
236 int target_size = empty ? 0 : LowWaterAdjust(max_size_);
237 while (current_size_ > target_size && next) {
238 MemEntryImpl* node = next;
239 next = rankings_.GetPrev(next);
240 if (!node->InUse() || empty) {
241 node->Doom();
242 }
243 }
244
245 return;
246}
247
248void MemBackendImpl::AddStorageSize(int32 bytes) {
249 current_size_ += bytes;
250 DCHECK(current_size_ >= 0);
251
252 if (current_size_ > max_size_)
253 TrimCache(false);
254}
255
256void MemBackendImpl::SubstractStorageSize(int32 bytes) {
257 current_size_ -= bytes;
258 DCHECK(current_size_ >= 0);
259}
260
261void MemBackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) {
262 if (old_size >= new_size)
263 SubstractStorageSize(old_size - new_size);
264 else
265 AddStorageSize(new_size - old_size);
266}
267
268void MemBackendImpl::UpdateRank(MemEntryImpl* node) {
269 rankings_.UpdateRank(node);
270}
271
272int MemBackendImpl::MaxFileSize() const {
273 return max_size_ / 8;
274}
275
276} // namespace disk_cache