blob: 31a093a76481817a062e71484198226e63bddda2 [file] [log] [blame]
license.botbf09a502008-08-24 00:55:551// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
initial.commit09911bf2008-07-26 23:55:294
5#include "chrome/browser/safe_browsing/protocol_manager.h"
6
[email protected]978df342009-11-24 06:21:537#include "base/base64.h"
[email protected]484fce42008-10-01 00:37:188#include "base/file_version_info.h"
initial.commit09911bf2008-07-26 23:55:299#include "base/histogram.h"
10#include "base/logging.h"
[email protected]05f9b682008-09-29 22:18:0111#include "base/rand_util.h"
[email protected]807204142009-05-05 03:31:4412#include "base/stl_util-inl.h"
initial.commit09911bf2008-07-26 23:55:2913#include "base/string_util.h"
[email protected]05f9b682008-09-29 22:18:0114#include "base/sys_info.h"
initial.commit09911bf2008-07-26 23:55:2915#include "base/task.h"
16#include "base/timer.h"
[email protected]d83d03aa2009-11-02 21:44:3717#include "chrome/browser/chrome_thread.h"
[email protected]d11f5662009-11-12 20:52:5618#include "chrome/browser/net/url_request_context_getter.h"
initial.commit09911bf2008-07-26 23:55:2919#include "chrome/browser/profile.h"
20#include "chrome/browser/safe_browsing/protocol_parser.h"
initial.commit09911bf2008-07-26 23:55:2921#include "chrome/browser/safe_browsing/safe_browsing_service.h"
initial.commit09911bf2008-07-26 23:55:2922#include "chrome/common/env_vars.h"
[email protected]dfdb0de72009-02-19 21:58:1423#include "net/base/escape.h"
initial.commit09911bf2008-07-26 23:55:2924#include "net/base/load_flags.h"
[email protected]3c3f4ac52009-12-15 20:22:1725#include "net/url_request/url_request_status.h"
initial.commit09911bf2008-07-26 23:55:2926
[email protected]e1acf6f2008-10-27 20:43:3327using base::Time;
28using base::TimeDelta;
initial.commit09911bf2008-07-26 23:55:2929
30// Maximum time, in seconds, from start up before we must issue an update query.
[email protected]05f9b682008-09-29 22:18:0131static const int kSbTimerStartIntervalSec = 5 * 60;
initial.commit09911bf2008-07-26 23:55:2932
[email protected]a11c2c62009-08-07 22:47:5633// The maximum time, in seconds, to wait for a response to an update request.
34static const int kSbMaxUpdateWaitSec = 10;
35
initial.commit09911bf2008-07-26 23:55:2936// Update URL for querying about the latest set of chunk updates.
37static const char* const kSbUpdateUrl =
[email protected]d3216442009-03-05 21:07:2738 "http://safebrowsing.clients.google.com/safebrowsing/downloads?client=%s"
39 "&appver=%s&pver=2.2";
initial.commit09911bf2008-07-26 23:55:2940
41// GetHash request URL for retrieving full hashes.
42static const char* const kSbGetHashUrl =
[email protected]d3216442009-03-05 21:07:2743 "http://safebrowsing.clients.google.com/safebrowsing/gethash?client=%s"
44 "&appver=%s&pver=2.2";
initial.commit09911bf2008-07-26 23:55:2945
46// New MAC client key requests URL.
47static const char* const kSbNewKeyUrl =
[email protected]d3216442009-03-05 21:07:2748 "https://sb-ssl.google.com/safebrowsing/newkey?client=%s&appver=%s"
49 "&pver=2.2";
initial.commit09911bf2008-07-26 23:55:2950
[email protected]dfdb0de72009-02-19 21:58:1451// URL for reporting malware pages.
52static const char* const kSbMalwareReportUrl =
[email protected]d3216442009-03-05 21:07:2753 "http://safebrowsing.clients.google.com/safebrowsing/report?evts=malblhit"
54 "&evtd=%s&evtr=%s&evhr=%s&client=%s&appver=%s";
[email protected]dfdb0de72009-02-19 21:58:1455
initial.commit09911bf2008-07-26 23:55:2956// Maximum back off multiplier.
57static const int kSbMaxBackOff = 8;
58
59
initial.commit09911bf2008-07-26 23:55:2960// SafeBrowsingProtocolManager implementation ----------------------------------
61
62SafeBrowsingProtocolManager::SafeBrowsingProtocolManager(
63 SafeBrowsingService* sb_service,
[email protected]1a871512009-11-06 06:11:1864 const std::string& client_name,
initial.commit09911bf2008-07-26 23:55:2965 const std::string& client_key,
[email protected]d11f5662009-11-12 20:52:5666 const std::string& wrapped_key,
67 URLRequestContextGetter* request_context_getter)
initial.commit09911bf2008-07-26 23:55:2968 : sb_service_(sb_service),
69 request_type_(NO_REQUEST),
70 update_error_count_(0),
71 gethash_error_count_(0),
72 update_back_off_mult_(1),
73 gethash_back_off_mult_(1),
74 next_update_sec_(-1),
75 update_state_(FIRST_REQUEST),
76 initial_request_(true),
77 chunk_pending_to_write_(false),
initial.commit09911bf2008-07-26 23:55:2978 client_key_(client_key),
[email protected]6e3b12ff2009-01-06 22:17:5779 wrapped_key_(wrapped_key),
[email protected]1a871512009-11-06 06:11:1880 update_size_(0),
[email protected]d11f5662009-11-12 20:52:5681 client_name_(client_name),
82 request_context_getter_(request_context_getter) {
initial.commit09911bf2008-07-26 23:55:2983 // Set the backoff multiplier fuzz to a random value between 0 and 1.
[email protected]05f9b682008-09-29 22:18:0184 back_off_fuzz_ = static_cast<float>(base::RandDouble());
initial.commit09911bf2008-07-26 23:55:2985
[email protected]efbb60482009-11-12 21:38:5586 // The first update must happen between 1-5 minutes of start up.
[email protected]05f9b682008-09-29 22:18:0187 next_update_sec_ = base::RandInt(60, kSbTimerStartIntervalSec);
[email protected]484fce42008-10-01 00:37:1888
89 scoped_ptr<FileVersionInfo> version_info(
90 FileVersionInfo::CreateFileVersionInfoForCurrentModule());
91 if (!version_info.get())
92 version_ = "0.1";
93 else
94 version_ = WideToASCII(version_info->product_version());
initial.commit09911bf2008-07-26 23:55:2995}
96
97SafeBrowsingProtocolManager::~SafeBrowsingProtocolManager() {
initial.commit09911bf2008-07-26 23:55:2998 // Delete in-progress SafeBrowsing requests.
99 STLDeleteContainerPairFirstPointers(hash_requests_.begin(),
100 hash_requests_.end());
101 hash_requests_.clear();
[email protected]dfdb0de72009-02-19 21:58:14102
103 // Delete in-progress malware reports.
104 STLDeleteContainerPointers(malware_reports_.begin(), malware_reports_.end());
105 malware_reports_.clear();
initial.commit09911bf2008-07-26 23:55:29106}
107
108// Public API used by the SafeBrowsingService ----------------------------------
109
110// We can only have one update or chunk request outstanding, but there may be
111// multiple GetHash requests pending since we don't want to serialize them and
112// slow down the user.
113void SafeBrowsingProtocolManager::GetFullHash(
114 SafeBrowsingService::SafeBrowsingCheck* check,
115 const std::vector<SBPrefix>& prefixes) {
116 // If we are in GetHash backoff, we need to check if we're past the next
117 // allowed time. If we are, we can proceed with the request. If not, we are
118 // required to return empty results (i.e. treat the page as safe).
119 if (gethash_error_count_ && Time::Now() <= next_gethash_time_) {
120 std::vector<SBFullHashResult> full_hashes;
[email protected]200abc32008-09-05 01:44:33121 sb_service_->HandleGetHashResults(check, full_hashes, false);
initial.commit09911bf2008-07-26 23:55:29122 return;
123 }
124
125 std::string url = StringPrintf(kSbGetHashUrl,
[email protected]1a871512009-11-06 06:11:18126 client_name_.c_str(),
[email protected]484fce42008-10-01 00:37:18127 version_.c_str());
initial.commit09911bf2008-07-26 23:55:29128 if (!client_key_.empty()) {
129 url.append("&wrkey=");
130 url.append(wrapped_key_);
131 }
132
133 GURL gethash_url(url);
134 URLFetcher* fetcher = new URLFetcher(gethash_url, URLFetcher::POST, this);
135 hash_requests_[fetcher] = check;
136
137 std::string get_hash;
138 SafeBrowsingProtocolParser parser;
139 parser.FormatGetHash(prefixes, &get_hash);
140
141 fetcher->set_load_flags(net::LOAD_DISABLE_CACHE);
[email protected]d11f5662009-11-12 20:52:56142 fetcher->set_request_context(request_context_getter_);
[email protected]d36e3c8e2008-08-29 23:42:20143 fetcher->set_upload_data("text/plain", get_hash);
initial.commit09911bf2008-07-26 23:55:29144 fetcher->Start();
145}
146
147void SafeBrowsingProtocolManager::GetNextUpdate() {
148 if (initial_request_) {
149 if (client_key_.empty() || wrapped_key_.empty()) {
150 IssueKeyRequest();
151 return;
152 } else {
153 initial_request_ = false;
154 }
155 }
156
157 if (!request_.get())
158 IssueUpdateRequest();
159}
160
161// URLFetcher::Delegate implementation -----------------------------------------
162
163// All SafeBrowsing request responses are handled here.
164// TODO(paulg): Clarify with the SafeBrowsing team whether a failed parse of a
165// chunk should retry the download and parse of that chunk (and
166// what back off / how many times to try), and if that effects the
167// update back off. For now, a failed parse of the chunk means we
168// drop it. This isn't so bad because the next UPDATE_REQUEST we
169// do will report all the chunks we have. If that chunk is still
170// required, the SafeBrowsing servers will tell us to get it again.
171void SafeBrowsingProtocolManager::OnURLFetchComplete(
172 const URLFetcher* source,
173 const GURL& url,
174 const URLRequestStatus& status,
175 int response_code,
176 const ResponseCookies& cookies,
177 const std::string& data) {
178 scoped_ptr<const URLFetcher> fetcher;
179 bool parsed_ok = true;
180 bool must_back_off = false; // Reduce SafeBrowsing service query frequency.
181
[email protected]dfdb0de72009-02-19 21:58:14182 // See if this is a malware report fetcher. We don't take any action for
183 // the response to those.
184 std::set<const URLFetcher*>::iterator mit = malware_reports_.find(source);
185 if (mit != malware_reports_.end()) {
186 const URLFetcher* report = *mit;
187 malware_reports_.erase(mit);
188 delete report;
189 return;
190 }
191
initial.commit09911bf2008-07-26 23:55:29192 HashRequests::iterator it = hash_requests_.find(source);
193 if (it != hash_requests_.end()) {
194 // GetHash response.
195 fetcher.reset(it->first);
196 SafeBrowsingService::SafeBrowsingCheck* check = it->second;
197 std::vector<SBFullHashResult> full_hashes;
[email protected]200abc32008-09-05 01:44:33198 bool can_cache = false;
initial.commit09911bf2008-07-26 23:55:29199 if (response_code == 200 || response_code == 204) {
[email protected]682343d2009-04-17 19:51:40200 // For tracking our GetHash false positive (204) rate, compared to real
201 // (200) responses.
202 if (response_code == 200)
203 UMA_HISTOGRAM_COUNTS("SB2.GetHash200", 1);
204 else
205 UMA_HISTOGRAM_COUNTS("SB2.GetHash204", 1);
[email protected]200abc32008-09-05 01:44:33206 can_cache = true;
initial.commit09911bf2008-07-26 23:55:29207 gethash_error_count_ = 0;
208 gethash_back_off_mult_ = 1;
209 bool re_key = false;
210 SafeBrowsingProtocolParser parser;
211 parsed_ok = parser.ParseGetHash(data.data(),
212 static_cast<int>(data.length()),
213 client_key_,
214 &re_key,
215 &full_hashes);
216 if (!parsed_ok) {
217 // If we fail to parse it, we must still inform the SafeBrowsingService
218 // so that it doesn't hold up the user's request indefinitely. Not sure
219 // what to do at that point though!
220 full_hashes.clear();
221 } else {
222 if (re_key)
223 HandleReKey();
224 }
[email protected]3c3f4ac52009-12-15 20:22:17225 } else {
[email protected]7bdc1bf2009-07-28 15:48:03226 HandleGetHashError(Time::Now());
[email protected]3c3f4ac52009-12-15 20:22:17227 if (status.status() == URLRequestStatus::FAILED) {
228 SB_DLOG(INFO) << "SafeBrowsing GetHash request for: " << source->url()
229 << " failed with os error: " << status.os_error();
230 } else {
231 SB_DLOG(INFO) << "SafeBrowsing GetHash request for: " << source->url()
232 << " failed with error: " << response_code;
233 }
initial.commit09911bf2008-07-26 23:55:29234 }
235
236 // Call back the SafeBrowsingService with full_hashes, even if there was a
237 // parse error or an error response code (in which case full_hashes will be
238 // empty). We can't block the user regardless of the error status.
[email protected]200abc32008-09-05 01:44:33239 sb_service_->HandleGetHashResults(check, full_hashes, can_cache);
initial.commit09911bf2008-07-26 23:55:29240
241 hash_requests_.erase(it);
242 } else {
243 // Update, chunk or key response.
initial.commit09911bf2008-07-26 23:55:29244 fetcher.reset(request_.release());
245
[email protected]a11c2c62009-08-07 22:47:56246 if (request_type_ == UPDATE_REQUEST) {
247 if (!fetcher.get()) {
248 // We've timed out waiting for an update response, so we've cancelled
249 // the update request and scheduled a new one. Ignore this response.
250 return;
251 }
252
253 // Cancel the update response timeout now that we have the response.
254 update_timer_.Stop();
255 }
256
initial.commit09911bf2008-07-26 23:55:29257 if (response_code == 200) {
258 // We have data from the SafeBrowsing service.
259 parsed_ok = HandleServiceResponse(source->url(),
260 data.data(),
261 static_cast<int>(data.length()));
262 if (!parsed_ok) {
263 SB_DLOG(INFO) << "SafeBrowsing request for: " << source->url()
264 << "failed parse.";
[email protected]22717d1e2008-10-15 21:55:32265 must_back_off = true;
266 chunk_request_urls_.clear();
[email protected]6e3b12ff2009-01-06 22:17:57267 UpdateFinished(false);
initial.commit09911bf2008-07-26 23:55:29268 }
269
[email protected]cb1cdf492009-01-16 23:51:44270 switch (request_type_) {
271 case CHUNK_REQUEST:
272 if (parsed_ok)
273 chunk_request_urls_.pop_front();
274 break;
275 case GETKEY_REQUEST:
276 if (initial_request_) {
277 // This is the first request we've made this session. Now that we
278 // have the keys, do the regular update request.
279 initial_request_ = false;
280 GetNextUpdate();
281 return;
282 }
283 break;
284 case UPDATE_REQUEST:
285 if (chunk_request_urls_.empty() && parsed_ok) {
286 // We are up to date since the servers gave us nothing new, so we
287 // are done with this update cycle.
288 UpdateFinished(true);
289 }
290 break;
[email protected]7e242b52009-02-05 12:31:02291 default:
292 NOTREACHED();
293 break;
initial.commit09911bf2008-07-26 23:55:29294 }
[email protected]3c3f4ac52009-12-15 20:22:17295 } else {
296 // The SafeBrowsing service error, or very bad response code: back off.
initial.commit09911bf2008-07-26 23:55:29297 must_back_off = true;
298 if (request_type_ == CHUNK_REQUEST)
299 chunk_request_urls_.clear();
[email protected]6e3b12ff2009-01-06 22:17:57300 UpdateFinished(false);
[email protected]3c3f4ac52009-12-15 20:22:17301 if (status.status() == URLRequestStatus::FAILED) {
302 SB_DLOG(INFO) << "SafeBrowsing request for: " << source->url()
303 << " failed with os error: " << status.os_error();
304 } else {
305 SB_DLOG(INFO) << "SafeBrowsing request for: " << source->url()
306 << " failed with error: " << response_code;
307 }
initial.commit09911bf2008-07-26 23:55:29308 }
309 }
310
311 // Schedule a new update request if we've finished retrieving all the chunks
312 // from the previous update. We treat the update request and the chunk URLs it
313 // contains as an atomic unit as far as back off is concerned.
314 if (chunk_request_urls_.empty() &&
315 (request_type_ == CHUNK_REQUEST || request_type_ == UPDATE_REQUEST))
316 ScheduleNextUpdate(must_back_off);
317
318 // Get the next chunk if available.
319 IssueChunkRequest();
320}
321
322bool SafeBrowsingProtocolManager::HandleServiceResponse(const GURL& url,
323 const char* data,
324 int length) {
325 SafeBrowsingProtocolParser parser;
326
327 switch (request_type_) {
328 case UPDATE_REQUEST: {
329 int next_update_sec = -1;
330 bool re_key = false;
331 bool reset = false;
[email protected]7b1e37102010-03-08 21:43:16332 scoped_ptr<std::vector<SBChunkDelete> > chunk_deletes(
333 new std::vector<SBChunkDelete>);
initial.commit09911bf2008-07-26 23:55:29334 std::vector<ChunkUrl> chunk_urls;
335 if (!parser.ParseUpdate(data, length, client_key_,
336 &next_update_sec, &re_key,
[email protected]7b1e37102010-03-08 21:43:16337 &reset, chunk_deletes.get(), &chunk_urls)) {
initial.commit09911bf2008-07-26 23:55:29338 return false;
339 }
340
341 last_update_ = Time::Now();
342
343 if (update_state_ == FIRST_REQUEST)
344 update_state_ = SECOND_REQUEST;
345 else if (update_state_ == SECOND_REQUEST)
346 update_state_ = NORMAL_REQUEST;
347
348 // New time for the next update.
349 if (next_update_sec > 0) {
350 next_update_sec_ = next_update_sec;
351 } else if (update_state_ == SECOND_REQUEST) {
[email protected]05f9b682008-09-29 22:18:01352 next_update_sec_ = base::RandInt(15 * 60, 45 * 60);
initial.commit09911bf2008-07-26 23:55:29353 }
354
355 // We need to request a new set of keys for MAC.
356 if (re_key)
357 HandleReKey();
358
359 // New chunks to download.
360 if (!chunk_urls.empty()) {
[email protected]553dba62009-02-24 19:08:23361 UMA_HISTOGRAM_COUNTS("SB2.UpdateUrls", chunk_urls.size());
initial.commit09911bf2008-07-26 23:55:29362 for (size_t i = 0; i < chunk_urls.size(); ++i)
363 chunk_request_urls_.push_back(chunk_urls[i]);
364 }
365
366 // Handle the case were the SafeBrowsing service tells us to dump our
367 // database.
368 if (reset) {
369 sb_service_->ResetDatabase();
370 return true;
371 }
372
[email protected]7b1e37102010-03-08 21:43:16373 // Chunks to delete from our storage. Pass ownership of
374 // |chunk_deletes|.
initial.commit09911bf2008-07-26 23:55:29375 if (!chunk_deletes->empty())
[email protected]7b1e37102010-03-08 21:43:16376 sb_service_->HandleChunkDelete(chunk_deletes.release());
initial.commit09911bf2008-07-26 23:55:29377
378 break;
379 }
380 case CHUNK_REQUEST: {
[email protected]484c57a2009-03-21 01:24:01381 UMA_HISTOGRAM_TIMES("SB2.ChunkRequest",
382 base::Time::Now() - chunk_request_start_);
[email protected]22573822008-11-14 00:40:47383
initial.commit09911bf2008-07-26 23:55:29384 const ChunkUrl chunk_url = chunk_request_urls_.front();
initial.commit09911bf2008-07-26 23:55:29385 bool re_key = false;
[email protected]7b1e37102010-03-08 21:43:16386 scoped_ptr<SBChunkList> chunks(new SBChunkList);
[email protected]553dba62009-02-24 19:08:23387 UMA_HISTOGRAM_COUNTS("SB2.ChunkSize", length);
[email protected]6e3b12ff2009-01-06 22:17:57388 update_size_ += length;
initial.commit09911bf2008-07-26 23:55:29389 if (!parser.ParseChunk(data, length,
390 client_key_, chunk_url.mac,
[email protected]7b1e37102010-03-08 21:43:16391 &re_key, chunks.get())) {
initial.commit09911bf2008-07-26 23:55:29392#ifndef NDEBUG
393 std::string data_str;
394 data_str.assign(data, length);
395 std::string encoded_chunk;
[email protected]978df342009-11-24 06:21:53396 base::Base64Encode(data, &encoded_chunk);
initial.commit09911bf2008-07-26 23:55:29397 SB_DLOG(INFO) << "ParseChunk error for chunk: " << chunk_url.url
398 << ", client_key: " << client_key_
399 << ", wrapped_key: " << wrapped_key_
400 << ", mac: " << chunk_url.mac
401 << ", Base64Encode(data): " << encoded_chunk
402 << ", length: " << length;
403#endif
initial.commit09911bf2008-07-26 23:55:29404 return false;
405 }
406
407 if (re_key)
408 HandleReKey();
409
[email protected]7b1e37102010-03-08 21:43:16410 // Chunks to add to storage. Pass ownership of |chunks|.
411 if (!chunks->empty()) {
initial.commit09911bf2008-07-26 23:55:29412 chunk_pending_to_write_ = true;
[email protected]7b1e37102010-03-08 21:43:16413 sb_service_->HandleChunk(chunk_url.list_name, chunks.release());
initial.commit09911bf2008-07-26 23:55:29414 }
415
416 break;
417 }
418 case GETKEY_REQUEST: {
419 std::string client_key, wrapped_key;
420 if (!parser.ParseNewKey(data, length, &client_key, &wrapped_key))
421 return false;
422
423 client_key_ = client_key;
424 wrapped_key_ = wrapped_key;
[email protected]d83d03aa2009-11-02 21:44:37425 ChromeThread::PostTask(
426 ChromeThread::UI, FROM_HERE,
427 NewRunnableMethod(
428 sb_service_, &SafeBrowsingService::OnNewMacKeys, client_key_,
429 wrapped_key_));
initial.commit09911bf2008-07-26 23:55:29430 break;
431 }
432
433 default:
434 return false;
435 }
436
437 return true;
438}
439
440void SafeBrowsingProtocolManager::Initialize() {
441 // Don't want to hit the safe browsing servers on build/chrome bots.
[email protected]05f9b682008-09-29 22:18:01442 if (base::SysInfo::HasEnvVar(env_vars::kHeadless))
initial.commit09911bf2008-07-26 23:55:29443 return;
444
445 ScheduleNextUpdate(false /* no back off */);
446}
447
448void SafeBrowsingProtocolManager::ScheduleNextUpdate(bool back_off) {
449 DCHECK(next_update_sec_ > 0);
450
[email protected]2d316662008-09-03 18:18:14451 // Unschedule any current timer.
452 update_timer_.Stop();
initial.commit09911bf2008-07-26 23:55:29453
454 // Reschedule with the new update.
455 const int next_update = GetNextUpdateTime(back_off);
[email protected]2d316662008-09-03 18:18:14456 update_timer_.Start(TimeDelta::FromMilliseconds(next_update), this,
457 &SafeBrowsingProtocolManager::GetNextUpdate);
initial.commit09911bf2008-07-26 23:55:29458}
459
460// According to section 5 of the SafeBrowsing protocol specification, we must
461// back off after a certain number of errors. We only change 'next_update_sec_'
462// when we receive a response from the SafeBrowsing service.
463int SafeBrowsingProtocolManager::GetNextUpdateTime(bool back_off) {
464 int next = next_update_sec_;
465 if (back_off) {
466 next = GetNextBackOffTime(&update_error_count_, &update_back_off_mult_);
467 } else {
468 // Successful response means error reset.
469 update_error_count_ = 0;
470 update_back_off_mult_ = 1;
471 }
472 return next * 1000; // milliseconds
473}
474
475int SafeBrowsingProtocolManager::GetNextBackOffTime(int* error_count,
476 int* multiplier) {
477 DCHECK(multiplier && error_count);
478 (*error_count)++;
479 if (*error_count > 1 && *error_count < 6) {
480 int next = static_cast<int>(*multiplier * (1 + back_off_fuzz_) * 30 * 60);
481 *multiplier *= 2;
482 if (*multiplier > kSbMaxBackOff)
483 *multiplier = kSbMaxBackOff;
484 return next;
485 }
486
487 if (*error_count >= 6)
488 return 60 * 60 * 8; // 8 hours
489
490 return 60; // 1 minute
491}
492
493// This request requires getting a list of all the chunks for each list from the
494// database asynchronously. The request will be issued when we're called back in
495// OnGetChunksComplete.
496// TODO(paulg): We should get this at start up and maintain a ChunkRange cache
497// to avoid hitting the database with each update request. On the
498// otherhand, this request will only occur ~20-30 minutes so there
499// isn't that much overhead. Measure!
500void SafeBrowsingProtocolManager::IssueUpdateRequest() {
501 request_type_ = UPDATE_REQUEST;
[email protected]57119c3f2008-12-04 00:33:04502 sb_service_->UpdateStarted();
initial.commit09911bf2008-07-26 23:55:29503}
504
505void SafeBrowsingProtocolManager::IssueChunkRequest() {
506 // We are only allowed to have one request outstanding at any time. Also,
507 // don't get the next url until the previous one has been written to disk so
508 // that we don't use too much memory.
509 if (request_.get() || chunk_request_urls_.empty() || chunk_pending_to_write_)
510 return;
511
512 ChunkUrl next_chunk = chunk_request_urls_.front();
513 DCHECK(!next_chunk.url.empty());
514 if (!StartsWithASCII(next_chunk.url, "http://", false) &&
515 !StartsWithASCII(next_chunk.url, "https://", false))
516 next_chunk.url = "http://" + next_chunk.url;
517 GURL chunk_url(next_chunk.url);
518 request_type_ = CHUNK_REQUEST;
519 request_.reset(new URLFetcher(chunk_url, URLFetcher::GET, this));
520 request_->set_load_flags(net::LOAD_DISABLE_CACHE);
[email protected]d11f5662009-11-12 20:52:56521 request_->set_request_context(request_context_getter_);
[email protected]22573822008-11-14 00:40:47522 chunk_request_start_ = base::Time::Now();
initial.commit09911bf2008-07-26 23:55:29523 request_->Start();
524}
525
526void SafeBrowsingProtocolManager::IssueKeyRequest() {
527 GURL key_url(StringPrintf(kSbNewKeyUrl,
[email protected]1a871512009-11-06 06:11:18528 client_name_.c_str(),
[email protected]484fce42008-10-01 00:37:18529 version_.c_str()));
initial.commit09911bf2008-07-26 23:55:29530 request_type_ = GETKEY_REQUEST;
531 request_.reset(new URLFetcher(key_url, URLFetcher::GET, this));
532 request_->set_load_flags(net::LOAD_DISABLE_CACHE);
[email protected]d11f5662009-11-12 20:52:56533 request_->set_request_context(request_context_getter_);
initial.commit09911bf2008-07-26 23:55:29534 request_->Start();
535}
536
537void SafeBrowsingProtocolManager::OnGetChunksComplete(
538 const std::vector<SBListChunkRanges>& lists, bool database_error) {
539 DCHECK(request_type_ == UPDATE_REQUEST);
initial.commit09911bf2008-07-26 23:55:29540 if (database_error) {
[email protected]a11c2c62009-08-07 22:47:56541 UpdateFinished(false);
[email protected]3c3f4ac52009-12-15 20:22:17542 ScheduleNextUpdate(false);
initial.commit09911bf2008-07-26 23:55:29543 return;
544 }
545
546 const bool use_mac = !client_key_.empty();
547
548 // Format our stored chunks:
549 std::string list_data;
550 bool found_malware = false;
551 bool found_phishing = false;
552 for (size_t i = 0; i < lists.size(); ++i) {
553 list_data.append(FormatList(lists[i], use_mac));
[email protected]c3ff89492008-11-11 02:17:51554 if (lists[i].name == safe_browsing_util::kPhishingList)
initial.commit09911bf2008-07-26 23:55:29555 found_phishing = true;
556
[email protected]c3ff89492008-11-11 02:17:51557 if (lists[i].name == safe_browsing_util::kMalwareList)
initial.commit09911bf2008-07-26 23:55:29558 found_malware = true;
559 }
560
561 // If we have an empty database, let the server know we want data for these
562 // lists.
563 if (!found_phishing)
[email protected]c3ff89492008-11-11 02:17:51564 list_data.append(FormatList(
565 SBListChunkRanges(safe_browsing_util::kPhishingList), use_mac));
initial.commit09911bf2008-07-26 23:55:29566
567 if (!found_malware)
[email protected]c3ff89492008-11-11 02:17:51568 list_data.append(FormatList(
569 SBListChunkRanges(safe_browsing_util::kMalwareList), use_mac));
initial.commit09911bf2008-07-26 23:55:29570
571 std::string url = StringPrintf(kSbUpdateUrl,
[email protected]1a871512009-11-06 06:11:18572 client_name_.c_str(),
[email protected]484fce42008-10-01 00:37:18573 version_.c_str());
initial.commit09911bf2008-07-26 23:55:29574 if (use_mac) {
575 url.append("&wrkey=");
576 url.append(wrapped_key_);
577 }
578
579 GURL update_url(url);
580 request_.reset(new URLFetcher(update_url, URLFetcher::POST, this));
581 request_->set_load_flags(net::LOAD_DISABLE_CACHE);
[email protected]d11f5662009-11-12 20:52:56582 request_->set_request_context(request_context_getter_);
initial.commit09911bf2008-07-26 23:55:29583 request_->set_upload_data("text/plain", list_data);
584 request_->Start();
[email protected]a11c2c62009-08-07 22:47:56585
586 // Begin the update request timeout.
587 update_timer_.Start(TimeDelta::FromSeconds(kSbMaxUpdateWaitSec), this,
588 &SafeBrowsingProtocolManager::UpdateResponseTimeout);
589}
590
591// If we haven't heard back from the server with an update response, this method
592// will run. Close the current update session and schedule another update.
593void SafeBrowsingProtocolManager::UpdateResponseTimeout() {
594 DCHECK(request_type_ == UPDATE_REQUEST);
595 request_.reset();
[email protected]a11c2c62009-08-07 22:47:56596 UpdateFinished(false);
[email protected]3c3f4ac52009-12-15 20:22:17597 ScheduleNextUpdate(false);
initial.commit09911bf2008-07-26 23:55:29598}
599
600void SafeBrowsingProtocolManager::OnChunkInserted() {
601 chunk_pending_to_write_ = false;
602
603 if (chunk_request_urls_.empty()) {
[email protected]484c57a2009-03-21 01:24:01604 UMA_HISTOGRAM_LONG_TIMES("SB2.Update", Time::Now() - last_update_);
[email protected]6e3b12ff2009-01-06 22:17:57605 UpdateFinished(true);
initial.commit09911bf2008-07-26 23:55:29606 } else {
607 IssueChunkRequest();
608 }
609}
610
[email protected]dfdb0de72009-02-19 21:58:14611void SafeBrowsingProtocolManager::ReportMalware(const GURL& malware_url,
612 const GURL& page_url,
613 const GURL& referrer_url) {
614 std::string report_str = StringPrintf(
615 kSbMalwareReportUrl,
[email protected]0d2e6a62010-01-15 20:09:19616 EscapeQueryParamValue(malware_url.spec(), true).c_str(),
617 EscapeQueryParamValue(page_url.spec(), true).c_str(),
618 EscapeQueryParamValue(referrer_url.spec(), true).c_str(),
[email protected]1a871512009-11-06 06:11:18619 client_name_.c_str(),
[email protected]dfdb0de72009-02-19 21:58:14620 version_.c_str());
621 GURL report_url(report_str);
622 URLFetcher* report = new URLFetcher(report_url, URLFetcher::GET, this);
623 report->set_load_flags(net::LOAD_DISABLE_CACHE);
[email protected]d11f5662009-11-12 20:52:56624 report->set_request_context(request_context_getter_);
[email protected]dfdb0de72009-02-19 21:58:14625 report->Start();
626 malware_reports_.insert(report);
627}
628
initial.commit09911bf2008-07-26 23:55:29629// static
630std::string SafeBrowsingProtocolManager::FormatList(
631 const SBListChunkRanges& list, bool use_mac) {
632 std::string formatted_results;
633 formatted_results.append(list.name);
634 formatted_results.append(";");
635 if (!list.adds.empty()) {
636 formatted_results.append("a:" + list.adds);
637 if (!list.subs.empty() || use_mac)
638 formatted_results.append(":");
639 }
640 if (!list.subs.empty()) {
641 formatted_results.append("s:" + list.subs);
642 if (use_mac)
643 formatted_results.append(":");
644 }
645 if (use_mac)
646 formatted_results.append("mac");
647 formatted_results.append("\n");
648
649 return formatted_results;
650}
651
652void SafeBrowsingProtocolManager::HandleReKey() {
653 client_key_.clear();
654 wrapped_key_.clear();
655 IssueKeyRequest();
656}
657
[email protected]7bdc1bf2009-07-28 15:48:03658void SafeBrowsingProtocolManager::HandleGetHashError(const Time& now) {
initial.commit09911bf2008-07-26 23:55:29659 int next = GetNextBackOffTime(&gethash_error_count_, &gethash_back_off_mult_);
[email protected]7bdc1bf2009-07-28 15:48:03660 next_gethash_time_ = now + TimeDelta::FromSeconds(next);
initial.commit09911bf2008-07-26 23:55:29661}
[email protected]6e3b12ff2009-01-06 22:17:57662
663void SafeBrowsingProtocolManager::UpdateFinished(bool success) {
[email protected]553dba62009-02-24 19:08:23664 UMA_HISTOGRAM_COUNTS("SB2.UpdateSize", update_size_);
[email protected]6e3b12ff2009-01-06 22:17:57665 update_size_ = 0;
666 sb_service_->UpdateFinished(success);
[email protected]7e242b52009-02-05 12:31:02667}