blob: 5a8baae5a8c7df0c40bb43b2d2d6b5c54c1d7321 [file] [log] [blame]
[email protected]c83dd912010-04-06 18:50:511// Copyright (c) 2010 The Chromium Authors. All rights reserved.
license.botbf09a502008-08-24 00:55:552// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
initial.commit09911bf2008-07-26 23:55:294
5#include "chrome/browser/safe_browsing/protocol_manager.h"
6
[email protected]978df342009-11-24 06:21:537#include "base/base64.h"
[email protected]c83dd912010-04-06 18:50:518#include "base/env_var.h"
[email protected]484fce42008-10-01 00:37:189#include "base/file_version_info.h"
initial.commit09911bf2008-07-26 23:55:2910#include "base/histogram.h"
11#include "base/logging.h"
[email protected]05f9b682008-09-29 22:18:0112#include "base/rand_util.h"
[email protected]807204142009-05-05 03:31:4413#include "base/stl_util-inl.h"
initial.commit09911bf2008-07-26 23:55:2914#include "base/string_util.h"
15#include "base/task.h"
16#include "base/timer.h"
[email protected]bcff05a2010-04-14 01:46:4317#include "chrome/app/chrome_version_info.h"
[email protected]d83d03aa2009-11-02 21:44:3718#include "chrome/browser/chrome_thread.h"
initial.commit09911bf2008-07-26 23:55:2919#include "chrome/browser/profile.h"
20#include "chrome/browser/safe_browsing/protocol_parser.h"
initial.commit09911bf2008-07-26 23:55:2921#include "chrome/browser/safe_browsing/safe_browsing_service.h"
initial.commit09911bf2008-07-26 23:55:2922#include "chrome/common/env_vars.h"
[email protected]68d2a05f2010-05-07 21:39:5523#include "chrome/common/net/url_request_context_getter.h"
[email protected]dfdb0de72009-02-19 21:58:1424#include "net/base/escape.h"
initial.commit09911bf2008-07-26 23:55:2925#include "net/base/load_flags.h"
[email protected]3c3f4ac52009-12-15 20:22:1726#include "net/url_request/url_request_status.h"
initial.commit09911bf2008-07-26 23:55:2927
[email protected]e1acf6f2008-10-27 20:43:3328using base::Time;
29using base::TimeDelta;
initial.commit09911bf2008-07-26 23:55:2930
31// Maximum time, in seconds, from start up before we must issue an update query.
[email protected]05f9b682008-09-29 22:18:0132static const int kSbTimerStartIntervalSec = 5 * 60;
initial.commit09911bf2008-07-26 23:55:2933
[email protected]a11c2c62009-08-07 22:47:5634// The maximum time, in seconds, to wait for a response to an update request.
35static const int kSbMaxUpdateWaitSec = 10;
36
initial.commit09911bf2008-07-26 23:55:2937// Update URL for querying about the latest set of chunk updates.
38static const char* const kSbUpdateUrl =
[email protected]d3216442009-03-05 21:07:2739 "http://safebrowsing.clients.google.com/safebrowsing/downloads?client=%s"
40 "&appver=%s&pver=2.2";
initial.commit09911bf2008-07-26 23:55:2941
42// GetHash request URL for retrieving full hashes.
43static const char* const kSbGetHashUrl =
[email protected]d3216442009-03-05 21:07:2744 "http://safebrowsing.clients.google.com/safebrowsing/gethash?client=%s"
45 "&appver=%s&pver=2.2";
initial.commit09911bf2008-07-26 23:55:2946
47// New MAC client key requests URL.
48static const char* const kSbNewKeyUrl =
[email protected]d3216442009-03-05 21:07:2749 "https://sb-ssl.google.com/safebrowsing/newkey?client=%s&appver=%s"
50 "&pver=2.2";
initial.commit09911bf2008-07-26 23:55:2951
[email protected]dfdb0de72009-02-19 21:58:1452// URL for reporting malware pages.
53static const char* const kSbMalwareReportUrl =
[email protected]d3216442009-03-05 21:07:2754 "http://safebrowsing.clients.google.com/safebrowsing/report?evts=malblhit"
55 "&evtd=%s&evtr=%s&evhr=%s&client=%s&appver=%s";
[email protected]dfdb0de72009-02-19 21:58:1456
initial.commit09911bf2008-07-26 23:55:2957// Maximum back off multiplier.
58static const int kSbMaxBackOff = 8;
59
60
initial.commit09911bf2008-07-26 23:55:2961// SafeBrowsingProtocolManager implementation ----------------------------------
62
63SafeBrowsingProtocolManager::SafeBrowsingProtocolManager(
64 SafeBrowsingService* sb_service,
[email protected]1a871512009-11-06 06:11:1865 const std::string& client_name,
initial.commit09911bf2008-07-26 23:55:2966 const std::string& client_key,
[email protected]d11f5662009-11-12 20:52:5667 const std::string& wrapped_key,
68 URLRequestContextGetter* request_context_getter)
initial.commit09911bf2008-07-26 23:55:2969 : sb_service_(sb_service),
70 request_type_(NO_REQUEST),
71 update_error_count_(0),
72 gethash_error_count_(0),
73 update_back_off_mult_(1),
74 gethash_back_off_mult_(1),
75 next_update_sec_(-1),
76 update_state_(FIRST_REQUEST),
77 initial_request_(true),
78 chunk_pending_to_write_(false),
initial.commit09911bf2008-07-26 23:55:2979 client_key_(client_key),
[email protected]6e3b12ff2009-01-06 22:17:5780 wrapped_key_(wrapped_key),
[email protected]1a871512009-11-06 06:11:1881 update_size_(0),
[email protected]d11f5662009-11-12 20:52:5682 client_name_(client_name),
83 request_context_getter_(request_context_getter) {
initial.commit09911bf2008-07-26 23:55:2984 // Set the backoff multiplier fuzz to a random value between 0 and 1.
[email protected]05f9b682008-09-29 22:18:0185 back_off_fuzz_ = static_cast<float>(base::RandDouble());
initial.commit09911bf2008-07-26 23:55:2986
[email protected]efbb60482009-11-12 21:38:5587 // The first update must happen between 1-5 minutes of start up.
[email protected]05f9b682008-09-29 22:18:0188 next_update_sec_ = base::RandInt(60, kSbTimerStartIntervalSec);
[email protected]484fce42008-10-01 00:37:1889
90 scoped_ptr<FileVersionInfo> version_info(
[email protected]bcff05a2010-04-14 01:46:4391 chrome_app::GetChromeVersionInfo());
[email protected]484fce42008-10-01 00:37:1892 if (!version_info.get())
93 version_ = "0.1";
94 else
95 version_ = WideToASCII(version_info->product_version());
initial.commit09911bf2008-07-26 23:55:2996}
97
98SafeBrowsingProtocolManager::~SafeBrowsingProtocolManager() {
initial.commit09911bf2008-07-26 23:55:2999 // Delete in-progress SafeBrowsing requests.
100 STLDeleteContainerPairFirstPointers(hash_requests_.begin(),
101 hash_requests_.end());
102 hash_requests_.clear();
[email protected]dfdb0de72009-02-19 21:58:14103
104 // Delete in-progress malware reports.
105 STLDeleteContainerPointers(malware_reports_.begin(), malware_reports_.end());
106 malware_reports_.clear();
initial.commit09911bf2008-07-26 23:55:29107}
108
109// Public API used by the SafeBrowsingService ----------------------------------
110
111// We can only have one update or chunk request outstanding, but there may be
112// multiple GetHash requests pending since we don't want to serialize them and
113// slow down the user.
114void SafeBrowsingProtocolManager::GetFullHash(
115 SafeBrowsingService::SafeBrowsingCheck* check,
116 const std::vector<SBPrefix>& prefixes) {
117 // If we are in GetHash backoff, we need to check if we're past the next
118 // allowed time. If we are, we can proceed with the request. If not, we are
119 // required to return empty results (i.e. treat the page as safe).
120 if (gethash_error_count_ && Time::Now() <= next_gethash_time_) {
121 std::vector<SBFullHashResult> full_hashes;
[email protected]200abc32008-09-05 01:44:33122 sb_service_->HandleGetHashResults(check, full_hashes, false);
initial.commit09911bf2008-07-26 23:55:29123 return;
124 }
125
126 std::string url = StringPrintf(kSbGetHashUrl,
[email protected]1a871512009-11-06 06:11:18127 client_name_.c_str(),
[email protected]484fce42008-10-01 00:37:18128 version_.c_str());
initial.commit09911bf2008-07-26 23:55:29129 if (!client_key_.empty()) {
130 url.append("&wrkey=");
131 url.append(wrapped_key_);
132 }
133
134 GURL gethash_url(url);
135 URLFetcher* fetcher = new URLFetcher(gethash_url, URLFetcher::POST, this);
136 hash_requests_[fetcher] = check;
137
138 std::string get_hash;
139 SafeBrowsingProtocolParser parser;
140 parser.FormatGetHash(prefixes, &get_hash);
141
142 fetcher->set_load_flags(net::LOAD_DISABLE_CACHE);
[email protected]d11f5662009-11-12 20:52:56143 fetcher->set_request_context(request_context_getter_);
[email protected]d36e3c8e2008-08-29 23:42:20144 fetcher->set_upload_data("text/plain", get_hash);
initial.commit09911bf2008-07-26 23:55:29145 fetcher->Start();
146}
147
148void SafeBrowsingProtocolManager::GetNextUpdate() {
149 if (initial_request_) {
150 if (client_key_.empty() || wrapped_key_.empty()) {
151 IssueKeyRequest();
152 return;
153 } else {
154 initial_request_ = false;
155 }
156 }
157
158 if (!request_.get())
159 IssueUpdateRequest();
160}
161
162// URLFetcher::Delegate implementation -----------------------------------------
163
164// All SafeBrowsing request responses are handled here.
165// TODO(paulg): Clarify with the SafeBrowsing team whether a failed parse of a
166// chunk should retry the download and parse of that chunk (and
167// what back off / how many times to try), and if that effects the
168// update back off. For now, a failed parse of the chunk means we
169// drop it. This isn't so bad because the next UPDATE_REQUEST we
170// do will report all the chunks we have. If that chunk is still
171// required, the SafeBrowsing servers will tell us to get it again.
172void SafeBrowsingProtocolManager::OnURLFetchComplete(
173 const URLFetcher* source,
174 const GURL& url,
175 const URLRequestStatus& status,
176 int response_code,
177 const ResponseCookies& cookies,
178 const std::string& data) {
179 scoped_ptr<const URLFetcher> fetcher;
180 bool parsed_ok = true;
181 bool must_back_off = false; // Reduce SafeBrowsing service query frequency.
182
[email protected]dfdb0de72009-02-19 21:58:14183 // See if this is a malware report fetcher. We don't take any action for
184 // the response to those.
185 std::set<const URLFetcher*>::iterator mit = malware_reports_.find(source);
186 if (mit != malware_reports_.end()) {
187 const URLFetcher* report = *mit;
188 malware_reports_.erase(mit);
189 delete report;
190 return;
191 }
192
initial.commit09911bf2008-07-26 23:55:29193 HashRequests::iterator it = hash_requests_.find(source);
194 if (it != hash_requests_.end()) {
195 // GetHash response.
196 fetcher.reset(it->first);
197 SafeBrowsingService::SafeBrowsingCheck* check = it->second;
198 std::vector<SBFullHashResult> full_hashes;
[email protected]200abc32008-09-05 01:44:33199 bool can_cache = false;
initial.commit09911bf2008-07-26 23:55:29200 if (response_code == 200 || response_code == 204) {
[email protected]682343d2009-04-17 19:51:40201 // For tracking our GetHash false positive (204) rate, compared to real
202 // (200) responses.
203 if (response_code == 200)
204 UMA_HISTOGRAM_COUNTS("SB2.GetHash200", 1);
205 else
206 UMA_HISTOGRAM_COUNTS("SB2.GetHash204", 1);
[email protected]200abc32008-09-05 01:44:33207 can_cache = true;
initial.commit09911bf2008-07-26 23:55:29208 gethash_error_count_ = 0;
209 gethash_back_off_mult_ = 1;
210 bool re_key = false;
211 SafeBrowsingProtocolParser parser;
212 parsed_ok = parser.ParseGetHash(data.data(),
213 static_cast<int>(data.length()),
214 client_key_,
215 &re_key,
216 &full_hashes);
217 if (!parsed_ok) {
218 // If we fail to parse it, we must still inform the SafeBrowsingService
219 // so that it doesn't hold up the user's request indefinitely. Not sure
220 // what to do at that point though!
221 full_hashes.clear();
222 } else {
223 if (re_key)
224 HandleReKey();
225 }
[email protected]3c3f4ac52009-12-15 20:22:17226 } else {
[email protected]7bdc1bf2009-07-28 15:48:03227 HandleGetHashError(Time::Now());
[email protected]3c3f4ac52009-12-15 20:22:17228 if (status.status() == URLRequestStatus::FAILED) {
229 SB_DLOG(INFO) << "SafeBrowsing GetHash request for: " << source->url()
230 << " failed with os error: " << status.os_error();
231 } else {
232 SB_DLOG(INFO) << "SafeBrowsing GetHash request for: " << source->url()
233 << " failed with error: " << response_code;
234 }
initial.commit09911bf2008-07-26 23:55:29235 }
236
237 // Call back the SafeBrowsingService with full_hashes, even if there was a
238 // parse error or an error response code (in which case full_hashes will be
239 // empty). We can't block the user regardless of the error status.
[email protected]200abc32008-09-05 01:44:33240 sb_service_->HandleGetHashResults(check, full_hashes, can_cache);
initial.commit09911bf2008-07-26 23:55:29241
242 hash_requests_.erase(it);
243 } else {
244 // Update, chunk or key response.
initial.commit09911bf2008-07-26 23:55:29245 fetcher.reset(request_.release());
246
[email protected]a11c2c62009-08-07 22:47:56247 if (request_type_ == UPDATE_REQUEST) {
248 if (!fetcher.get()) {
249 // We've timed out waiting for an update response, so we've cancelled
250 // the update request and scheduled a new one. Ignore this response.
251 return;
252 }
253
254 // Cancel the update response timeout now that we have the response.
255 update_timer_.Stop();
256 }
257
initial.commit09911bf2008-07-26 23:55:29258 if (response_code == 200) {
259 // We have data from the SafeBrowsing service.
260 parsed_ok = HandleServiceResponse(source->url(),
261 data.data(),
262 static_cast<int>(data.length()));
263 if (!parsed_ok) {
264 SB_DLOG(INFO) << "SafeBrowsing request for: " << source->url()
265 << "failed parse.";
[email protected]22717d1e2008-10-15 21:55:32266 must_back_off = true;
267 chunk_request_urls_.clear();
[email protected]6e3b12ff2009-01-06 22:17:57268 UpdateFinished(false);
initial.commit09911bf2008-07-26 23:55:29269 }
270
[email protected]cb1cdf492009-01-16 23:51:44271 switch (request_type_) {
272 case CHUNK_REQUEST:
273 if (parsed_ok)
274 chunk_request_urls_.pop_front();
275 break;
276 case GETKEY_REQUEST:
277 if (initial_request_) {
278 // This is the first request we've made this session. Now that we
279 // have the keys, do the regular update request.
280 initial_request_ = false;
281 GetNextUpdate();
282 return;
283 }
284 break;
285 case UPDATE_REQUEST:
286 if (chunk_request_urls_.empty() && parsed_ok) {
287 // We are up to date since the servers gave us nothing new, so we
288 // are done with this update cycle.
289 UpdateFinished(true);
290 }
291 break;
[email protected]7e242b52009-02-05 12:31:02292 default:
293 NOTREACHED();
294 break;
initial.commit09911bf2008-07-26 23:55:29295 }
[email protected]3c3f4ac52009-12-15 20:22:17296 } else {
297 // The SafeBrowsing service error, or very bad response code: back off.
initial.commit09911bf2008-07-26 23:55:29298 must_back_off = true;
299 if (request_type_ == CHUNK_REQUEST)
300 chunk_request_urls_.clear();
[email protected]6e3b12ff2009-01-06 22:17:57301 UpdateFinished(false);
[email protected]3c3f4ac52009-12-15 20:22:17302 if (status.status() == URLRequestStatus::FAILED) {
303 SB_DLOG(INFO) << "SafeBrowsing request for: " << source->url()
304 << " failed with os error: " << status.os_error();
305 } else {
306 SB_DLOG(INFO) << "SafeBrowsing request for: " << source->url()
307 << " failed with error: " << response_code;
308 }
initial.commit09911bf2008-07-26 23:55:29309 }
310 }
311
312 // Schedule a new update request if we've finished retrieving all the chunks
313 // from the previous update. We treat the update request and the chunk URLs it
314 // contains as an atomic unit as far as back off is concerned.
315 if (chunk_request_urls_.empty() &&
316 (request_type_ == CHUNK_REQUEST || request_type_ == UPDATE_REQUEST))
317 ScheduleNextUpdate(must_back_off);
318
319 // Get the next chunk if available.
320 IssueChunkRequest();
321}
322
323bool SafeBrowsingProtocolManager::HandleServiceResponse(const GURL& url,
324 const char* data,
325 int length) {
326 SafeBrowsingProtocolParser parser;
327
328 switch (request_type_) {
329 case UPDATE_REQUEST: {
330 int next_update_sec = -1;
331 bool re_key = false;
332 bool reset = false;
[email protected]7b1e37102010-03-08 21:43:16333 scoped_ptr<std::vector<SBChunkDelete> > chunk_deletes(
334 new std::vector<SBChunkDelete>);
initial.commit09911bf2008-07-26 23:55:29335 std::vector<ChunkUrl> chunk_urls;
336 if (!parser.ParseUpdate(data, length, client_key_,
337 &next_update_sec, &re_key,
[email protected]7b1e37102010-03-08 21:43:16338 &reset, chunk_deletes.get(), &chunk_urls)) {
initial.commit09911bf2008-07-26 23:55:29339 return false;
340 }
341
342 last_update_ = Time::Now();
343
344 if (update_state_ == FIRST_REQUEST)
345 update_state_ = SECOND_REQUEST;
346 else if (update_state_ == SECOND_REQUEST)
347 update_state_ = NORMAL_REQUEST;
348
349 // New time for the next update.
350 if (next_update_sec > 0) {
351 next_update_sec_ = next_update_sec;
352 } else if (update_state_ == SECOND_REQUEST) {
[email protected]05f9b682008-09-29 22:18:01353 next_update_sec_ = base::RandInt(15 * 60, 45 * 60);
initial.commit09911bf2008-07-26 23:55:29354 }
355
356 // We need to request a new set of keys for MAC.
357 if (re_key)
358 HandleReKey();
359
360 // New chunks to download.
361 if (!chunk_urls.empty()) {
[email protected]553dba62009-02-24 19:08:23362 UMA_HISTOGRAM_COUNTS("SB2.UpdateUrls", chunk_urls.size());
initial.commit09911bf2008-07-26 23:55:29363 for (size_t i = 0; i < chunk_urls.size(); ++i)
364 chunk_request_urls_.push_back(chunk_urls[i]);
365 }
366
367 // Handle the case were the SafeBrowsing service tells us to dump our
368 // database.
369 if (reset) {
370 sb_service_->ResetDatabase();
371 return true;
372 }
373
[email protected]7b1e37102010-03-08 21:43:16374 // Chunks to delete from our storage. Pass ownership of
375 // |chunk_deletes|.
initial.commit09911bf2008-07-26 23:55:29376 if (!chunk_deletes->empty())
[email protected]7b1e37102010-03-08 21:43:16377 sb_service_->HandleChunkDelete(chunk_deletes.release());
initial.commit09911bf2008-07-26 23:55:29378
379 break;
380 }
381 case CHUNK_REQUEST: {
[email protected]484c57a2009-03-21 01:24:01382 UMA_HISTOGRAM_TIMES("SB2.ChunkRequest",
383 base::Time::Now() - chunk_request_start_);
[email protected]22573822008-11-14 00:40:47384
initial.commit09911bf2008-07-26 23:55:29385 const ChunkUrl chunk_url = chunk_request_urls_.front();
initial.commit09911bf2008-07-26 23:55:29386 bool re_key = false;
[email protected]7b1e37102010-03-08 21:43:16387 scoped_ptr<SBChunkList> chunks(new SBChunkList);
[email protected]553dba62009-02-24 19:08:23388 UMA_HISTOGRAM_COUNTS("SB2.ChunkSize", length);
[email protected]6e3b12ff2009-01-06 22:17:57389 update_size_ += length;
initial.commit09911bf2008-07-26 23:55:29390 if (!parser.ParseChunk(data, length,
391 client_key_, chunk_url.mac,
[email protected]7b1e37102010-03-08 21:43:16392 &re_key, chunks.get())) {
initial.commit09911bf2008-07-26 23:55:29393#ifndef NDEBUG
394 std::string data_str;
395 data_str.assign(data, length);
396 std::string encoded_chunk;
[email protected]978df342009-11-24 06:21:53397 base::Base64Encode(data, &encoded_chunk);
initial.commit09911bf2008-07-26 23:55:29398 SB_DLOG(INFO) << "ParseChunk error for chunk: " << chunk_url.url
399 << ", client_key: " << client_key_
400 << ", wrapped_key: " << wrapped_key_
401 << ", mac: " << chunk_url.mac
402 << ", Base64Encode(data): " << encoded_chunk
403 << ", length: " << length;
404#endif
initial.commit09911bf2008-07-26 23:55:29405 return false;
406 }
407
408 if (re_key)
409 HandleReKey();
410
[email protected]7b1e37102010-03-08 21:43:16411 // Chunks to add to storage. Pass ownership of |chunks|.
412 if (!chunks->empty()) {
initial.commit09911bf2008-07-26 23:55:29413 chunk_pending_to_write_ = true;
[email protected]7b1e37102010-03-08 21:43:16414 sb_service_->HandleChunk(chunk_url.list_name, chunks.release());
initial.commit09911bf2008-07-26 23:55:29415 }
416
417 break;
418 }
419 case GETKEY_REQUEST: {
420 std::string client_key, wrapped_key;
421 if (!parser.ParseNewKey(data, length, &client_key, &wrapped_key))
422 return false;
423
424 client_key_ = client_key;
425 wrapped_key_ = wrapped_key;
[email protected]d83d03aa2009-11-02 21:44:37426 ChromeThread::PostTask(
427 ChromeThread::UI, FROM_HERE,
428 NewRunnableMethod(
429 sb_service_, &SafeBrowsingService::OnNewMacKeys, client_key_,
430 wrapped_key_));
initial.commit09911bf2008-07-26 23:55:29431 break;
432 }
433
434 default:
435 return false;
436 }
437
438 return true;
439}
440
441void SafeBrowsingProtocolManager::Initialize() {
442 // Don't want to hit the safe browsing servers on build/chrome bots.
[email protected]c83dd912010-04-06 18:50:51443 scoped_ptr<base::EnvVarGetter> env(base::EnvVarGetter::Create());
444 if (env->HasEnv(env_vars::kHeadless))
initial.commit09911bf2008-07-26 23:55:29445 return;
446
447 ScheduleNextUpdate(false /* no back off */);
448}
449
450void SafeBrowsingProtocolManager::ScheduleNextUpdate(bool back_off) {
451 DCHECK(next_update_sec_ > 0);
452
[email protected]2d316662008-09-03 18:18:14453 // Unschedule any current timer.
454 update_timer_.Stop();
initial.commit09911bf2008-07-26 23:55:29455
456 // Reschedule with the new update.
457 const int next_update = GetNextUpdateTime(back_off);
[email protected]2d316662008-09-03 18:18:14458 update_timer_.Start(TimeDelta::FromMilliseconds(next_update), this,
459 &SafeBrowsingProtocolManager::GetNextUpdate);
initial.commit09911bf2008-07-26 23:55:29460}
461
462// According to section 5 of the SafeBrowsing protocol specification, we must
463// back off after a certain number of errors. We only change 'next_update_sec_'
464// when we receive a response from the SafeBrowsing service.
465int SafeBrowsingProtocolManager::GetNextUpdateTime(bool back_off) {
466 int next = next_update_sec_;
467 if (back_off) {
468 next = GetNextBackOffTime(&update_error_count_, &update_back_off_mult_);
469 } else {
470 // Successful response means error reset.
471 update_error_count_ = 0;
472 update_back_off_mult_ = 1;
473 }
474 return next * 1000; // milliseconds
475}
476
477int SafeBrowsingProtocolManager::GetNextBackOffTime(int* error_count,
478 int* multiplier) {
479 DCHECK(multiplier && error_count);
480 (*error_count)++;
481 if (*error_count > 1 && *error_count < 6) {
482 int next = static_cast<int>(*multiplier * (1 + back_off_fuzz_) * 30 * 60);
483 *multiplier *= 2;
484 if (*multiplier > kSbMaxBackOff)
485 *multiplier = kSbMaxBackOff;
486 return next;
487 }
488
489 if (*error_count >= 6)
490 return 60 * 60 * 8; // 8 hours
491
492 return 60; // 1 minute
493}
494
495// This request requires getting a list of all the chunks for each list from the
496// database asynchronously. The request will be issued when we're called back in
497// OnGetChunksComplete.
498// TODO(paulg): We should get this at start up and maintain a ChunkRange cache
499// to avoid hitting the database with each update request. On the
500// otherhand, this request will only occur ~20-30 minutes so there
501// isn't that much overhead. Measure!
502void SafeBrowsingProtocolManager::IssueUpdateRequest() {
503 request_type_ = UPDATE_REQUEST;
[email protected]57119c3f2008-12-04 00:33:04504 sb_service_->UpdateStarted();
initial.commit09911bf2008-07-26 23:55:29505}
506
507void SafeBrowsingProtocolManager::IssueChunkRequest() {
508 // We are only allowed to have one request outstanding at any time. Also,
509 // don't get the next url until the previous one has been written to disk so
510 // that we don't use too much memory.
511 if (request_.get() || chunk_request_urls_.empty() || chunk_pending_to_write_)
512 return;
513
514 ChunkUrl next_chunk = chunk_request_urls_.front();
515 DCHECK(!next_chunk.url.empty());
516 if (!StartsWithASCII(next_chunk.url, "http://", false) &&
517 !StartsWithASCII(next_chunk.url, "https://", false))
518 next_chunk.url = "http://" + next_chunk.url;
519 GURL chunk_url(next_chunk.url);
520 request_type_ = CHUNK_REQUEST;
521 request_.reset(new URLFetcher(chunk_url, URLFetcher::GET, this));
522 request_->set_load_flags(net::LOAD_DISABLE_CACHE);
[email protected]d11f5662009-11-12 20:52:56523 request_->set_request_context(request_context_getter_);
[email protected]22573822008-11-14 00:40:47524 chunk_request_start_ = base::Time::Now();
initial.commit09911bf2008-07-26 23:55:29525 request_->Start();
526}
527
528void SafeBrowsingProtocolManager::IssueKeyRequest() {
529 GURL key_url(StringPrintf(kSbNewKeyUrl,
[email protected]1a871512009-11-06 06:11:18530 client_name_.c_str(),
[email protected]484fce42008-10-01 00:37:18531 version_.c_str()));
initial.commit09911bf2008-07-26 23:55:29532 request_type_ = GETKEY_REQUEST;
533 request_.reset(new URLFetcher(key_url, URLFetcher::GET, this));
534 request_->set_load_flags(net::LOAD_DISABLE_CACHE);
[email protected]d11f5662009-11-12 20:52:56535 request_->set_request_context(request_context_getter_);
initial.commit09911bf2008-07-26 23:55:29536 request_->Start();
537}
538
539void SafeBrowsingProtocolManager::OnGetChunksComplete(
540 const std::vector<SBListChunkRanges>& lists, bool database_error) {
541 DCHECK(request_type_ == UPDATE_REQUEST);
initial.commit09911bf2008-07-26 23:55:29542 if (database_error) {
[email protected]a11c2c62009-08-07 22:47:56543 UpdateFinished(false);
[email protected]3c3f4ac52009-12-15 20:22:17544 ScheduleNextUpdate(false);
initial.commit09911bf2008-07-26 23:55:29545 return;
546 }
547
548 const bool use_mac = !client_key_.empty();
549
550 // Format our stored chunks:
551 std::string list_data;
552 bool found_malware = false;
553 bool found_phishing = false;
554 for (size_t i = 0; i < lists.size(); ++i) {
555 list_data.append(FormatList(lists[i], use_mac));
[email protected]c3ff89492008-11-11 02:17:51556 if (lists[i].name == safe_browsing_util::kPhishingList)
initial.commit09911bf2008-07-26 23:55:29557 found_phishing = true;
558
[email protected]c3ff89492008-11-11 02:17:51559 if (lists[i].name == safe_browsing_util::kMalwareList)
initial.commit09911bf2008-07-26 23:55:29560 found_malware = true;
561 }
562
563 // If we have an empty database, let the server know we want data for these
564 // lists.
565 if (!found_phishing)
[email protected]c3ff89492008-11-11 02:17:51566 list_data.append(FormatList(
567 SBListChunkRanges(safe_browsing_util::kPhishingList), use_mac));
initial.commit09911bf2008-07-26 23:55:29568
569 if (!found_malware)
[email protected]c3ff89492008-11-11 02:17:51570 list_data.append(FormatList(
571 SBListChunkRanges(safe_browsing_util::kMalwareList), use_mac));
initial.commit09911bf2008-07-26 23:55:29572
573 std::string url = StringPrintf(kSbUpdateUrl,
[email protected]1a871512009-11-06 06:11:18574 client_name_.c_str(),
[email protected]484fce42008-10-01 00:37:18575 version_.c_str());
initial.commit09911bf2008-07-26 23:55:29576 if (use_mac) {
577 url.append("&wrkey=");
578 url.append(wrapped_key_);
579 }
580
581 GURL update_url(url);
582 request_.reset(new URLFetcher(update_url, URLFetcher::POST, this));
583 request_->set_load_flags(net::LOAD_DISABLE_CACHE);
[email protected]d11f5662009-11-12 20:52:56584 request_->set_request_context(request_context_getter_);
initial.commit09911bf2008-07-26 23:55:29585 request_->set_upload_data("text/plain", list_data);
586 request_->Start();
[email protected]a11c2c62009-08-07 22:47:56587
588 // Begin the update request timeout.
589 update_timer_.Start(TimeDelta::FromSeconds(kSbMaxUpdateWaitSec), this,
590 &SafeBrowsingProtocolManager::UpdateResponseTimeout);
591}
592
593// If we haven't heard back from the server with an update response, this method
594// will run. Close the current update session and schedule another update.
595void SafeBrowsingProtocolManager::UpdateResponseTimeout() {
596 DCHECK(request_type_ == UPDATE_REQUEST);
597 request_.reset();
[email protected]a11c2c62009-08-07 22:47:56598 UpdateFinished(false);
[email protected]3c3f4ac52009-12-15 20:22:17599 ScheduleNextUpdate(false);
initial.commit09911bf2008-07-26 23:55:29600}
601
602void SafeBrowsingProtocolManager::OnChunkInserted() {
603 chunk_pending_to_write_ = false;
604
605 if (chunk_request_urls_.empty()) {
[email protected]484c57a2009-03-21 01:24:01606 UMA_HISTOGRAM_LONG_TIMES("SB2.Update", Time::Now() - last_update_);
[email protected]6e3b12ff2009-01-06 22:17:57607 UpdateFinished(true);
initial.commit09911bf2008-07-26 23:55:29608 } else {
609 IssueChunkRequest();
610 }
611}
612
[email protected]dfdb0de72009-02-19 21:58:14613void SafeBrowsingProtocolManager::ReportMalware(const GURL& malware_url,
614 const GURL& page_url,
615 const GURL& referrer_url) {
616 std::string report_str = StringPrintf(
617 kSbMalwareReportUrl,
[email protected]0d2e6a62010-01-15 20:09:19618 EscapeQueryParamValue(malware_url.spec(), true).c_str(),
619 EscapeQueryParamValue(page_url.spec(), true).c_str(),
620 EscapeQueryParamValue(referrer_url.spec(), true).c_str(),
[email protected]1a871512009-11-06 06:11:18621 client_name_.c_str(),
[email protected]dfdb0de72009-02-19 21:58:14622 version_.c_str());
623 GURL report_url(report_str);
624 URLFetcher* report = new URLFetcher(report_url, URLFetcher::GET, this);
625 report->set_load_flags(net::LOAD_DISABLE_CACHE);
[email protected]d11f5662009-11-12 20:52:56626 report->set_request_context(request_context_getter_);
[email protected]dfdb0de72009-02-19 21:58:14627 report->Start();
628 malware_reports_.insert(report);
629}
630
initial.commit09911bf2008-07-26 23:55:29631// static
632std::string SafeBrowsingProtocolManager::FormatList(
633 const SBListChunkRanges& list, bool use_mac) {
634 std::string formatted_results;
635 formatted_results.append(list.name);
636 formatted_results.append(";");
637 if (!list.adds.empty()) {
638 formatted_results.append("a:" + list.adds);
639 if (!list.subs.empty() || use_mac)
640 formatted_results.append(":");
641 }
642 if (!list.subs.empty()) {
643 formatted_results.append("s:" + list.subs);
644 if (use_mac)
645 formatted_results.append(":");
646 }
647 if (use_mac)
648 formatted_results.append("mac");
649 formatted_results.append("\n");
650
651 return formatted_results;
652}
653
654void SafeBrowsingProtocolManager::HandleReKey() {
655 client_key_.clear();
656 wrapped_key_.clear();
657 IssueKeyRequest();
658}
659
[email protected]7bdc1bf2009-07-28 15:48:03660void SafeBrowsingProtocolManager::HandleGetHashError(const Time& now) {
initial.commit09911bf2008-07-26 23:55:29661 int next = GetNextBackOffTime(&gethash_error_count_, &gethash_back_off_mult_);
[email protected]7bdc1bf2009-07-28 15:48:03662 next_gethash_time_ = now + TimeDelta::FromSeconds(next);
initial.commit09911bf2008-07-26 23:55:29663}
[email protected]6e3b12ff2009-01-06 22:17:57664
665void SafeBrowsingProtocolManager::UpdateFinished(bool success) {
[email protected]553dba62009-02-24 19:08:23666 UMA_HISTOGRAM_COUNTS("SB2.UpdateSize", update_size_);
[email protected]6e3b12ff2009-01-06 22:17:57667 update_size_ = 0;
668 sb_service_->UpdateFinished(success);
[email protected]7e242b52009-02-05 12:31:02669}