blob: 340d0ca6443a773531570dbf66de5c4570766537 [file] [log] [blame]
[email protected]c83dd912010-04-06 18:50:511// Copyright (c) 2010 The Chromium Authors. All rights reserved.
license.botbf09a502008-08-24 00:55:552// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
initial.commit09911bf2008-07-26 23:55:294
5#include "chrome/browser/safe_browsing/protocol_manager.h"
6
[email protected]bd7e70442010-08-07 07:24:217#ifndef NDEBUG
[email protected]978df342009-11-24 06:21:538#include "base/base64.h"
[email protected]bd7e70442010-08-07 07:24:219#endif
[email protected]76b90d312010-08-03 03:00:5010#include "base/environment.h"
initial.commit09911bf2008-07-26 23:55:2911#include "base/histogram.h"
12#include "base/logging.h"
[email protected]05f9b682008-09-29 22:18:0113#include "base/rand_util.h"
[email protected]807204142009-05-05 03:31:4414#include "base/stl_util-inl.h"
initial.commit09911bf2008-07-26 23:55:2915#include "base/string_util.h"
16#include "base/task.h"
17#include "base/timer.h"
[email protected]d83d03aa2009-11-02 21:44:3718#include "chrome/browser/chrome_thread.h"
initial.commit09911bf2008-07-26 23:55:2919#include "chrome/browser/profile.h"
20#include "chrome/browser/safe_browsing/protocol_parser.h"
initial.commit09911bf2008-07-26 23:55:2921#include "chrome/browser/safe_browsing/safe_browsing_service.h"
[email protected]1eeb5e02010-07-20 23:02:1122#include "chrome/common/chrome_version_info.h"
initial.commit09911bf2008-07-26 23:55:2923#include "chrome/common/env_vars.h"
[email protected]68d2a05f2010-05-07 21:39:5524#include "chrome/common/net/url_request_context_getter.h"
[email protected]dfdb0de72009-02-19 21:58:1425#include "net/base/escape.h"
initial.commit09911bf2008-07-26 23:55:2926#include "net/base/load_flags.h"
[email protected]3c3f4ac52009-12-15 20:22:1727#include "net/url_request/url_request_status.h"
initial.commit09911bf2008-07-26 23:55:2928
[email protected]e1acf6f2008-10-27 20:43:3329using base::Time;
30using base::TimeDelta;
initial.commit09911bf2008-07-26 23:55:2931
32// Maximum time, in seconds, from start up before we must issue an update query.
[email protected]05f9b682008-09-29 22:18:0133static const int kSbTimerStartIntervalSec = 5 * 60;
initial.commit09911bf2008-07-26 23:55:2934
[email protected]a11c2c62009-08-07 22:47:5635// The maximum time, in seconds, to wait for a response to an update request.
36static const int kSbMaxUpdateWaitSec = 10;
37
initial.commit09911bf2008-07-26 23:55:2938// Maximum back off multiplier.
39static const int kSbMaxBackOff = 8;
40
41
initial.commit09911bf2008-07-26 23:55:2942// SafeBrowsingProtocolManager implementation ----------------------------------
43
44SafeBrowsingProtocolManager::SafeBrowsingProtocolManager(
45 SafeBrowsingService* sb_service,
[email protected]1a871512009-11-06 06:11:1846 const std::string& client_name,
initial.commit09911bf2008-07-26 23:55:2947 const std::string& client_key,
[email protected]d11f5662009-11-12 20:52:5648 const std::string& wrapped_key,
[email protected]894c4e82010-06-29 21:53:1849 URLRequestContextGetter* request_context_getter,
50 const std::string& info_url_prefix,
51 const std::string& mackey_url_prefix,
52 bool disable_auto_update)
initial.commit09911bf2008-07-26 23:55:2953 : sb_service_(sb_service),
54 request_type_(NO_REQUEST),
55 update_error_count_(0),
56 gethash_error_count_(0),
57 update_back_off_mult_(1),
58 gethash_back_off_mult_(1),
59 next_update_sec_(-1),
60 update_state_(FIRST_REQUEST),
61 initial_request_(true),
62 chunk_pending_to_write_(false),
initial.commit09911bf2008-07-26 23:55:2963 client_key_(client_key),
[email protected]6e3b12ff2009-01-06 22:17:5764 wrapped_key_(wrapped_key),
[email protected]1a871512009-11-06 06:11:1865 update_size_(0),
[email protected]d11f5662009-11-12 20:52:5666 client_name_(client_name),
[email protected]894c4e82010-06-29 21:53:1867 request_context_getter_(request_context_getter),
68 info_url_prefix_(info_url_prefix),
69 mackey_url_prefix_(mackey_url_prefix),
70 disable_auto_update_(disable_auto_update) {
71 DCHECK(!info_url_prefix_.empty() && !mackey_url_prefix_.empty());
72
initial.commit09911bf2008-07-26 23:55:2973 // Set the backoff multiplier fuzz to a random value between 0 and 1.
[email protected]05f9b682008-09-29 22:18:0174 back_off_fuzz_ = static_cast<float>(base::RandDouble());
[email protected]efbb60482009-11-12 21:38:5575 // The first update must happen between 1-5 minutes of start up.
[email protected]05f9b682008-09-29 22:18:0176 next_update_sec_ = base::RandInt(60, kSbTimerStartIntervalSec);
[email protected]484fce42008-10-01 00:37:1877
[email protected]0211f57e2010-08-27 20:28:4278 chrome::VersionInfo version_info;
79 if (!version_info.is_valid())
[email protected]484fce42008-10-01 00:37:1880 version_ = "0.1";
81 else
[email protected]0211f57e2010-08-27 20:28:4282 version_ = version_info.Version();
initial.commit09911bf2008-07-26 23:55:2983}
84
85SafeBrowsingProtocolManager::~SafeBrowsingProtocolManager() {
initial.commit09911bf2008-07-26 23:55:2986 // Delete in-progress SafeBrowsing requests.
87 STLDeleteContainerPairFirstPointers(hash_requests_.begin(),
88 hash_requests_.end());
89 hash_requests_.clear();
[email protected]dfdb0de72009-02-19 21:58:1490
91 // Delete in-progress malware reports.
92 STLDeleteContainerPointers(malware_reports_.begin(), malware_reports_.end());
93 malware_reports_.clear();
initial.commit09911bf2008-07-26 23:55:2994}
95
96// Public API used by the SafeBrowsingService ----------------------------------
97
98// We can only have one update or chunk request outstanding, but there may be
99// multiple GetHash requests pending since we don't want to serialize them and
100// slow down the user.
101void SafeBrowsingProtocolManager::GetFullHash(
102 SafeBrowsingService::SafeBrowsingCheck* check,
103 const std::vector<SBPrefix>& prefixes) {
104 // If we are in GetHash backoff, we need to check if we're past the next
105 // allowed time. If we are, we can proceed with the request. If not, we are
106 // required to return empty results (i.e. treat the page as safe).
107 if (gethash_error_count_ && Time::Now() <= next_gethash_time_) {
108 std::vector<SBFullHashResult> full_hashes;
[email protected]200abc32008-09-05 01:44:33109 sb_service_->HandleGetHashResults(check, full_hashes, false);
initial.commit09911bf2008-07-26 23:55:29110 return;
111 }
[email protected]894c4e82010-06-29 21:53:18112 bool use_mac = !client_key_.empty();
113 GURL gethash_url = GetHashUrl(use_mac);
initial.commit09911bf2008-07-26 23:55:29114 URLFetcher* fetcher = new URLFetcher(gethash_url, URLFetcher::POST, this);
115 hash_requests_[fetcher] = check;
116
117 std::string get_hash;
118 SafeBrowsingProtocolParser parser;
119 parser.FormatGetHash(prefixes, &get_hash);
120
121 fetcher->set_load_flags(net::LOAD_DISABLE_CACHE);
[email protected]d11f5662009-11-12 20:52:56122 fetcher->set_request_context(request_context_getter_);
[email protected]d36e3c8e2008-08-29 23:42:20123 fetcher->set_upload_data("text/plain", get_hash);
initial.commit09911bf2008-07-26 23:55:29124 fetcher->Start();
125}
126
127void SafeBrowsingProtocolManager::GetNextUpdate() {
128 if (initial_request_) {
129 if (client_key_.empty() || wrapped_key_.empty()) {
130 IssueKeyRequest();
131 return;
132 } else {
133 initial_request_ = false;
134 }
135 }
136
137 if (!request_.get())
138 IssueUpdateRequest();
139}
140
141// URLFetcher::Delegate implementation -----------------------------------------
142
143// All SafeBrowsing request responses are handled here.
144// TODO(paulg): Clarify with the SafeBrowsing team whether a failed parse of a
145// chunk should retry the download and parse of that chunk (and
146// what back off / how many times to try), and if that effects the
147// update back off. For now, a failed parse of the chunk means we
148// drop it. This isn't so bad because the next UPDATE_REQUEST we
149// do will report all the chunks we have. If that chunk is still
150// required, the SafeBrowsing servers will tell us to get it again.
151void SafeBrowsingProtocolManager::OnURLFetchComplete(
152 const URLFetcher* source,
153 const GURL& url,
154 const URLRequestStatus& status,
155 int response_code,
156 const ResponseCookies& cookies,
157 const std::string& data) {
158 scoped_ptr<const URLFetcher> fetcher;
159 bool parsed_ok = true;
160 bool must_back_off = false; // Reduce SafeBrowsing service query frequency.
161
[email protected]dfdb0de72009-02-19 21:58:14162 // See if this is a malware report fetcher. We don't take any action for
163 // the response to those.
164 std::set<const URLFetcher*>::iterator mit = malware_reports_.find(source);
165 if (mit != malware_reports_.end()) {
166 const URLFetcher* report = *mit;
167 malware_reports_.erase(mit);
168 delete report;
169 return;
170 }
171
initial.commit09911bf2008-07-26 23:55:29172 HashRequests::iterator it = hash_requests_.find(source);
173 if (it != hash_requests_.end()) {
174 // GetHash response.
175 fetcher.reset(it->first);
176 SafeBrowsingService::SafeBrowsingCheck* check = it->second;
177 std::vector<SBFullHashResult> full_hashes;
[email protected]200abc32008-09-05 01:44:33178 bool can_cache = false;
initial.commit09911bf2008-07-26 23:55:29179 if (response_code == 200 || response_code == 204) {
[email protected]682343d2009-04-17 19:51:40180 // For tracking our GetHash false positive (204) rate, compared to real
181 // (200) responses.
182 if (response_code == 200)
183 UMA_HISTOGRAM_COUNTS("SB2.GetHash200", 1);
184 else
185 UMA_HISTOGRAM_COUNTS("SB2.GetHash204", 1);
[email protected]200abc32008-09-05 01:44:33186 can_cache = true;
initial.commit09911bf2008-07-26 23:55:29187 gethash_error_count_ = 0;
188 gethash_back_off_mult_ = 1;
189 bool re_key = false;
190 SafeBrowsingProtocolParser parser;
191 parsed_ok = parser.ParseGetHash(data.data(),
192 static_cast<int>(data.length()),
193 client_key_,
194 &re_key,
195 &full_hashes);
196 if (!parsed_ok) {
197 // If we fail to parse it, we must still inform the SafeBrowsingService
198 // so that it doesn't hold up the user's request indefinitely. Not sure
199 // what to do at that point though!
200 full_hashes.clear();
201 } else {
202 if (re_key)
203 HandleReKey();
204 }
[email protected]3c3f4ac52009-12-15 20:22:17205 } else {
[email protected]7bdc1bf2009-07-28 15:48:03206 HandleGetHashError(Time::Now());
[email protected]3c3f4ac52009-12-15 20:22:17207 if (status.status() == URLRequestStatus::FAILED) {
208 SB_DLOG(INFO) << "SafeBrowsing GetHash request for: " << source->url()
209 << " failed with os error: " << status.os_error();
210 } else {
211 SB_DLOG(INFO) << "SafeBrowsing GetHash request for: " << source->url()
212 << " failed with error: " << response_code;
213 }
initial.commit09911bf2008-07-26 23:55:29214 }
215
216 // Call back the SafeBrowsingService with full_hashes, even if there was a
217 // parse error or an error response code (in which case full_hashes will be
218 // empty). We can't block the user regardless of the error status.
[email protected]200abc32008-09-05 01:44:33219 sb_service_->HandleGetHashResults(check, full_hashes, can_cache);
initial.commit09911bf2008-07-26 23:55:29220
221 hash_requests_.erase(it);
222 } else {
223 // Update, chunk or key response.
initial.commit09911bf2008-07-26 23:55:29224 fetcher.reset(request_.release());
225
[email protected]a11c2c62009-08-07 22:47:56226 if (request_type_ == UPDATE_REQUEST) {
227 if (!fetcher.get()) {
228 // We've timed out waiting for an update response, so we've cancelled
229 // the update request and scheduled a new one. Ignore this response.
230 return;
231 }
232
233 // Cancel the update response timeout now that we have the response.
234 update_timer_.Stop();
235 }
236
initial.commit09911bf2008-07-26 23:55:29237 if (response_code == 200) {
238 // We have data from the SafeBrowsing service.
239 parsed_ok = HandleServiceResponse(source->url(),
240 data.data(),
241 static_cast<int>(data.length()));
242 if (!parsed_ok) {
243 SB_DLOG(INFO) << "SafeBrowsing request for: " << source->url()
244 << "failed parse.";
[email protected]22717d1e2008-10-15 21:55:32245 must_back_off = true;
246 chunk_request_urls_.clear();
[email protected]6e3b12ff2009-01-06 22:17:57247 UpdateFinished(false);
initial.commit09911bf2008-07-26 23:55:29248 }
249
[email protected]cb1cdf492009-01-16 23:51:44250 switch (request_type_) {
251 case CHUNK_REQUEST:
252 if (parsed_ok)
253 chunk_request_urls_.pop_front();
254 break;
255 case GETKEY_REQUEST:
256 if (initial_request_) {
257 // This is the first request we've made this session. Now that we
258 // have the keys, do the regular update request.
259 initial_request_ = false;
260 GetNextUpdate();
261 return;
262 }
263 break;
264 case UPDATE_REQUEST:
265 if (chunk_request_urls_.empty() && parsed_ok) {
266 // We are up to date since the servers gave us nothing new, so we
267 // are done with this update cycle.
268 UpdateFinished(true);
269 }
270 break;
[email protected]7e242b52009-02-05 12:31:02271 default:
272 NOTREACHED();
273 break;
initial.commit09911bf2008-07-26 23:55:29274 }
[email protected]3c3f4ac52009-12-15 20:22:17275 } else {
276 // The SafeBrowsing service error, or very bad response code: back off.
initial.commit09911bf2008-07-26 23:55:29277 must_back_off = true;
278 if (request_type_ == CHUNK_REQUEST)
279 chunk_request_urls_.clear();
[email protected]6e3b12ff2009-01-06 22:17:57280 UpdateFinished(false);
[email protected]3c3f4ac52009-12-15 20:22:17281 if (status.status() == URLRequestStatus::FAILED) {
282 SB_DLOG(INFO) << "SafeBrowsing request for: " << source->url()
283 << " failed with os error: " << status.os_error();
284 } else {
285 SB_DLOG(INFO) << "SafeBrowsing request for: " << source->url()
286 << " failed with error: " << response_code;
287 }
initial.commit09911bf2008-07-26 23:55:29288 }
289 }
290
291 // Schedule a new update request if we've finished retrieving all the chunks
292 // from the previous update. We treat the update request and the chunk URLs it
293 // contains as an atomic unit as far as back off is concerned.
294 if (chunk_request_urls_.empty() &&
295 (request_type_ == CHUNK_REQUEST || request_type_ == UPDATE_REQUEST))
296 ScheduleNextUpdate(must_back_off);
297
298 // Get the next chunk if available.
299 IssueChunkRequest();
300}
301
302bool SafeBrowsingProtocolManager::HandleServiceResponse(const GURL& url,
303 const char* data,
304 int length) {
305 SafeBrowsingProtocolParser parser;
306
307 switch (request_type_) {
308 case UPDATE_REQUEST: {
309 int next_update_sec = -1;
310 bool re_key = false;
311 bool reset = false;
[email protected]7b1e37102010-03-08 21:43:16312 scoped_ptr<std::vector<SBChunkDelete> > chunk_deletes(
313 new std::vector<SBChunkDelete>);
initial.commit09911bf2008-07-26 23:55:29314 std::vector<ChunkUrl> chunk_urls;
315 if (!parser.ParseUpdate(data, length, client_key_,
316 &next_update_sec, &re_key,
[email protected]7b1e37102010-03-08 21:43:16317 &reset, chunk_deletes.get(), &chunk_urls)) {
initial.commit09911bf2008-07-26 23:55:29318 return false;
319 }
320
321 last_update_ = Time::Now();
322
323 if (update_state_ == FIRST_REQUEST)
324 update_state_ = SECOND_REQUEST;
325 else if (update_state_ == SECOND_REQUEST)
326 update_state_ = NORMAL_REQUEST;
327
328 // New time for the next update.
329 if (next_update_sec > 0) {
330 next_update_sec_ = next_update_sec;
331 } else if (update_state_ == SECOND_REQUEST) {
[email protected]05f9b682008-09-29 22:18:01332 next_update_sec_ = base::RandInt(15 * 60, 45 * 60);
initial.commit09911bf2008-07-26 23:55:29333 }
334
335 // We need to request a new set of keys for MAC.
336 if (re_key)
337 HandleReKey();
338
339 // New chunks to download.
340 if (!chunk_urls.empty()) {
[email protected]553dba62009-02-24 19:08:23341 UMA_HISTOGRAM_COUNTS("SB2.UpdateUrls", chunk_urls.size());
initial.commit09911bf2008-07-26 23:55:29342 for (size_t i = 0; i < chunk_urls.size(); ++i)
343 chunk_request_urls_.push_back(chunk_urls[i]);
344 }
345
346 // Handle the case were the SafeBrowsing service tells us to dump our
347 // database.
348 if (reset) {
349 sb_service_->ResetDatabase();
350 return true;
351 }
352
[email protected]7b1e37102010-03-08 21:43:16353 // Chunks to delete from our storage. Pass ownership of
354 // |chunk_deletes|.
initial.commit09911bf2008-07-26 23:55:29355 if (!chunk_deletes->empty())
[email protected]7b1e37102010-03-08 21:43:16356 sb_service_->HandleChunkDelete(chunk_deletes.release());
initial.commit09911bf2008-07-26 23:55:29357
358 break;
359 }
360 case CHUNK_REQUEST: {
[email protected]484c57a2009-03-21 01:24:01361 UMA_HISTOGRAM_TIMES("SB2.ChunkRequest",
362 base::Time::Now() - chunk_request_start_);
[email protected]22573822008-11-14 00:40:47363
initial.commit09911bf2008-07-26 23:55:29364 const ChunkUrl chunk_url = chunk_request_urls_.front();
initial.commit09911bf2008-07-26 23:55:29365 bool re_key = false;
[email protected]7b1e37102010-03-08 21:43:16366 scoped_ptr<SBChunkList> chunks(new SBChunkList);
[email protected]553dba62009-02-24 19:08:23367 UMA_HISTOGRAM_COUNTS("SB2.ChunkSize", length);
[email protected]6e3b12ff2009-01-06 22:17:57368 update_size_ += length;
initial.commit09911bf2008-07-26 23:55:29369 if (!parser.ParseChunk(data, length,
370 client_key_, chunk_url.mac,
[email protected]7b1e37102010-03-08 21:43:16371 &re_key, chunks.get())) {
initial.commit09911bf2008-07-26 23:55:29372#ifndef NDEBUG
373 std::string data_str;
374 data_str.assign(data, length);
375 std::string encoded_chunk;
[email protected]978df342009-11-24 06:21:53376 base::Base64Encode(data, &encoded_chunk);
initial.commit09911bf2008-07-26 23:55:29377 SB_DLOG(INFO) << "ParseChunk error for chunk: " << chunk_url.url
378 << ", client_key: " << client_key_
379 << ", wrapped_key: " << wrapped_key_
380 << ", mac: " << chunk_url.mac
381 << ", Base64Encode(data): " << encoded_chunk
382 << ", length: " << length;
383#endif
initial.commit09911bf2008-07-26 23:55:29384 return false;
385 }
386
387 if (re_key)
388 HandleReKey();
389
[email protected]7b1e37102010-03-08 21:43:16390 // Chunks to add to storage. Pass ownership of |chunks|.
391 if (!chunks->empty()) {
initial.commit09911bf2008-07-26 23:55:29392 chunk_pending_to_write_ = true;
[email protected]7b1e37102010-03-08 21:43:16393 sb_service_->HandleChunk(chunk_url.list_name, chunks.release());
initial.commit09911bf2008-07-26 23:55:29394 }
395
396 break;
397 }
398 case GETKEY_REQUEST: {
399 std::string client_key, wrapped_key;
400 if (!parser.ParseNewKey(data, length, &client_key, &wrapped_key))
401 return false;
402
403 client_key_ = client_key;
404 wrapped_key_ = wrapped_key;
[email protected]d83d03aa2009-11-02 21:44:37405 ChromeThread::PostTask(
406 ChromeThread::UI, FROM_HERE,
407 NewRunnableMethod(
408 sb_service_, &SafeBrowsingService::OnNewMacKeys, client_key_,
409 wrapped_key_));
initial.commit09911bf2008-07-26 23:55:29410 break;
411 }
412
413 default:
414 return false;
415 }
416
417 return true;
418}
419
420void SafeBrowsingProtocolManager::Initialize() {
421 // Don't want to hit the safe browsing servers on build/chrome bots.
[email protected]76b90d312010-08-03 03:00:50422 scoped_ptr<base::Environment> env(base::Environment::Create());
[email protected]9432ade2010-08-04 23:43:20423 if (env->HasVar(env_vars::kHeadless))
initial.commit09911bf2008-07-26 23:55:29424 return;
425
426 ScheduleNextUpdate(false /* no back off */);
427}
428
429void SafeBrowsingProtocolManager::ScheduleNextUpdate(bool back_off) {
[email protected]894c4e82010-06-29 21:53:18430 DCHECK_GT(next_update_sec_, 0);
initial.commit09911bf2008-07-26 23:55:29431
[email protected]894c4e82010-06-29 21:53:18432 if (disable_auto_update_) {
433 // Unschedule any current timer.
434 update_timer_.Stop();
435 return;
436 }
initial.commit09911bf2008-07-26 23:55:29437 // Reschedule with the new update.
438 const int next_update = GetNextUpdateTime(back_off);
[email protected]894c4e82010-06-29 21:53:18439 ForceScheduleNextUpdate(next_update);
440}
441
442void SafeBrowsingProtocolManager::ForceScheduleNextUpdate(
443 const int next_update_msec) {
444 DCHECK_GE(next_update_msec, 0);
445 // Unschedule any current timer.
446 update_timer_.Stop();
447 update_timer_.Start(TimeDelta::FromMilliseconds(next_update_msec), this,
[email protected]2d316662008-09-03 18:18:14448 &SafeBrowsingProtocolManager::GetNextUpdate);
initial.commit09911bf2008-07-26 23:55:29449}
450
451// According to section 5 of the SafeBrowsing protocol specification, we must
452// back off after a certain number of errors. We only change 'next_update_sec_'
453// when we receive a response from the SafeBrowsing service.
454int SafeBrowsingProtocolManager::GetNextUpdateTime(bool back_off) {
455 int next = next_update_sec_;
456 if (back_off) {
457 next = GetNextBackOffTime(&update_error_count_, &update_back_off_mult_);
458 } else {
459 // Successful response means error reset.
460 update_error_count_ = 0;
461 update_back_off_mult_ = 1;
462 }
463 return next * 1000; // milliseconds
464}
465
466int SafeBrowsingProtocolManager::GetNextBackOffTime(int* error_count,
467 int* multiplier) {
468 DCHECK(multiplier && error_count);
469 (*error_count)++;
470 if (*error_count > 1 && *error_count < 6) {
471 int next = static_cast<int>(*multiplier * (1 + back_off_fuzz_) * 30 * 60);
472 *multiplier *= 2;
473 if (*multiplier > kSbMaxBackOff)
474 *multiplier = kSbMaxBackOff;
475 return next;
476 }
477
478 if (*error_count >= 6)
479 return 60 * 60 * 8; // 8 hours
480
481 return 60; // 1 minute
482}
483
484// This request requires getting a list of all the chunks for each list from the
485// database asynchronously. The request will be issued when we're called back in
486// OnGetChunksComplete.
487// TODO(paulg): We should get this at start up and maintain a ChunkRange cache
488// to avoid hitting the database with each update request. On the
489// otherhand, this request will only occur ~20-30 minutes so there
490// isn't that much overhead. Measure!
491void SafeBrowsingProtocolManager::IssueUpdateRequest() {
492 request_type_ = UPDATE_REQUEST;
[email protected]57119c3f2008-12-04 00:33:04493 sb_service_->UpdateStarted();
initial.commit09911bf2008-07-26 23:55:29494}
495
496void SafeBrowsingProtocolManager::IssueChunkRequest() {
497 // We are only allowed to have one request outstanding at any time. Also,
498 // don't get the next url until the previous one has been written to disk so
499 // that we don't use too much memory.
500 if (request_.get() || chunk_request_urls_.empty() || chunk_pending_to_write_)
501 return;
502
503 ChunkUrl next_chunk = chunk_request_urls_.front();
504 DCHECK(!next_chunk.url.empty());
[email protected]894c4e82010-06-29 21:53:18505 GURL chunk_url = NextChunkUrl(next_chunk.url);
initial.commit09911bf2008-07-26 23:55:29506 request_type_ = CHUNK_REQUEST;
507 request_.reset(new URLFetcher(chunk_url, URLFetcher::GET, this));
508 request_->set_load_flags(net::LOAD_DISABLE_CACHE);
[email protected]d11f5662009-11-12 20:52:56509 request_->set_request_context(request_context_getter_);
[email protected]22573822008-11-14 00:40:47510 chunk_request_start_ = base::Time::Now();
initial.commit09911bf2008-07-26 23:55:29511 request_->Start();
512}
513
514void SafeBrowsingProtocolManager::IssueKeyRequest() {
[email protected]894c4e82010-06-29 21:53:18515 GURL key_url = MacKeyUrl();
initial.commit09911bf2008-07-26 23:55:29516 request_type_ = GETKEY_REQUEST;
517 request_.reset(new URLFetcher(key_url, URLFetcher::GET, this));
518 request_->set_load_flags(net::LOAD_DISABLE_CACHE);
[email protected]d11f5662009-11-12 20:52:56519 request_->set_request_context(request_context_getter_);
initial.commit09911bf2008-07-26 23:55:29520 request_->Start();
521}
522
523void SafeBrowsingProtocolManager::OnGetChunksComplete(
524 const std::vector<SBListChunkRanges>& lists, bool database_error) {
[email protected]894c4e82010-06-29 21:53:18525 DCHECK_EQ(request_type_, UPDATE_REQUEST);
initial.commit09911bf2008-07-26 23:55:29526 if (database_error) {
[email protected]a11c2c62009-08-07 22:47:56527 UpdateFinished(false);
[email protected]3c3f4ac52009-12-15 20:22:17528 ScheduleNextUpdate(false);
initial.commit09911bf2008-07-26 23:55:29529 return;
530 }
531
532 const bool use_mac = !client_key_.empty();
533
534 // Format our stored chunks:
535 std::string list_data;
536 bool found_malware = false;
537 bool found_phishing = false;
538 for (size_t i = 0; i < lists.size(); ++i) {
539 list_data.append(FormatList(lists[i], use_mac));
[email protected]c3ff89492008-11-11 02:17:51540 if (lists[i].name == safe_browsing_util::kPhishingList)
initial.commit09911bf2008-07-26 23:55:29541 found_phishing = true;
542
[email protected]c3ff89492008-11-11 02:17:51543 if (lists[i].name == safe_browsing_util::kMalwareList)
initial.commit09911bf2008-07-26 23:55:29544 found_malware = true;
545 }
546
547 // If we have an empty database, let the server know we want data for these
548 // lists.
549 if (!found_phishing)
[email protected]c3ff89492008-11-11 02:17:51550 list_data.append(FormatList(
551 SBListChunkRanges(safe_browsing_util::kPhishingList), use_mac));
initial.commit09911bf2008-07-26 23:55:29552
553 if (!found_malware)
[email protected]c3ff89492008-11-11 02:17:51554 list_data.append(FormatList(
555 SBListChunkRanges(safe_browsing_util::kMalwareList), use_mac));
initial.commit09911bf2008-07-26 23:55:29556
[email protected]894c4e82010-06-29 21:53:18557 GURL update_url = UpdateUrl(use_mac);
initial.commit09911bf2008-07-26 23:55:29558 request_.reset(new URLFetcher(update_url, URLFetcher::POST, this));
559 request_->set_load_flags(net::LOAD_DISABLE_CACHE);
[email protected]d11f5662009-11-12 20:52:56560 request_->set_request_context(request_context_getter_);
initial.commit09911bf2008-07-26 23:55:29561 request_->set_upload_data("text/plain", list_data);
562 request_->Start();
[email protected]a11c2c62009-08-07 22:47:56563
564 // Begin the update request timeout.
565 update_timer_.Start(TimeDelta::FromSeconds(kSbMaxUpdateWaitSec), this,
566 &SafeBrowsingProtocolManager::UpdateResponseTimeout);
567}
568
569// If we haven't heard back from the server with an update response, this method
570// will run. Close the current update session and schedule another update.
571void SafeBrowsingProtocolManager::UpdateResponseTimeout() {
[email protected]894c4e82010-06-29 21:53:18572 DCHECK_EQ(request_type_, UPDATE_REQUEST);
[email protected]a11c2c62009-08-07 22:47:56573 request_.reset();
[email protected]a11c2c62009-08-07 22:47:56574 UpdateFinished(false);
[email protected]3c3f4ac52009-12-15 20:22:17575 ScheduleNextUpdate(false);
initial.commit09911bf2008-07-26 23:55:29576}
577
578void SafeBrowsingProtocolManager::OnChunkInserted() {
579 chunk_pending_to_write_ = false;
580
581 if (chunk_request_urls_.empty()) {
[email protected]484c57a2009-03-21 01:24:01582 UMA_HISTOGRAM_LONG_TIMES("SB2.Update", Time::Now() - last_update_);
[email protected]6e3b12ff2009-01-06 22:17:57583 UpdateFinished(true);
initial.commit09911bf2008-07-26 23:55:29584 } else {
585 IssueChunkRequest();
586 }
587}
588
[email protected]dfdb0de72009-02-19 21:58:14589void SafeBrowsingProtocolManager::ReportMalware(const GURL& malware_url,
590 const GURL& page_url,
[email protected]ee4d3e802010-08-12 23:59:51591 const GURL& referrer_url,
592 bool is_subresource) {
593 GURL report_url = MalwareReportUrl(malware_url, page_url, referrer_url,
594 is_subresource);
[email protected]dfdb0de72009-02-19 21:58:14595 URLFetcher* report = new URLFetcher(report_url, URLFetcher::GET, this);
596 report->set_load_flags(net::LOAD_DISABLE_CACHE);
[email protected]d11f5662009-11-12 20:52:56597 report->set_request_context(request_context_getter_);
[email protected]dfdb0de72009-02-19 21:58:14598 report->Start();
599 malware_reports_.insert(report);
600}
601
initial.commit09911bf2008-07-26 23:55:29602// static
603std::string SafeBrowsingProtocolManager::FormatList(
604 const SBListChunkRanges& list, bool use_mac) {
605 std::string formatted_results;
606 formatted_results.append(list.name);
607 formatted_results.append(";");
608 if (!list.adds.empty()) {
609 formatted_results.append("a:" + list.adds);
610 if (!list.subs.empty() || use_mac)
611 formatted_results.append(":");
612 }
613 if (!list.subs.empty()) {
614 formatted_results.append("s:" + list.subs);
615 if (use_mac)
616 formatted_results.append(":");
617 }
618 if (use_mac)
619 formatted_results.append("mac");
620 formatted_results.append("\n");
621
622 return formatted_results;
623}
624
625void SafeBrowsingProtocolManager::HandleReKey() {
626 client_key_.clear();
627 wrapped_key_.clear();
628 IssueKeyRequest();
629}
630
[email protected]7bdc1bf2009-07-28 15:48:03631void SafeBrowsingProtocolManager::HandleGetHashError(const Time& now) {
initial.commit09911bf2008-07-26 23:55:29632 int next = GetNextBackOffTime(&gethash_error_count_, &gethash_back_off_mult_);
[email protected]7bdc1bf2009-07-28 15:48:03633 next_gethash_time_ = now + TimeDelta::FromSeconds(next);
initial.commit09911bf2008-07-26 23:55:29634}
[email protected]6e3b12ff2009-01-06 22:17:57635
636void SafeBrowsingProtocolManager::UpdateFinished(bool success) {
[email protected]553dba62009-02-24 19:08:23637 UMA_HISTOGRAM_COUNTS("SB2.UpdateSize", update_size_);
[email protected]6e3b12ff2009-01-06 22:17:57638 update_size_ = 0;
639 sb_service_->UpdateFinished(success);
[email protected]7e242b52009-02-05 12:31:02640}
[email protected]894c4e82010-06-29 21:53:18641
642std::string SafeBrowsingProtocolManager::ComposeUrl(
643 const std::string& prefix, const std::string& method,
644 const std::string& client_name, const std::string& version,
645 const std::string& additional_query) {
646 DCHECK(!prefix.empty() && !method.empty() &&
647 !client_name.empty() && !version.empty());
648 std::string url = StringPrintf("%s/%s?client=%s&appver=%s&pver=2.2",
649 prefix.c_str(), method.c_str(),
650 client_name.c_str(), version.c_str());
651 if (!additional_query.empty()) {
652 url.append(additional_query);
653 }
654 return url;
655}
656
657GURL SafeBrowsingProtocolManager::UpdateUrl(bool use_mac) const {
658 std::string url = ComposeUrl(info_url_prefix_, "downloads", client_name_,
659 version_, additional_query_);
660 if (use_mac) {
661 url.append("&wrkey=");
662 url.append(wrapped_key_);
663 }
664 return GURL(url);
665}
666
667GURL SafeBrowsingProtocolManager::GetHashUrl(bool use_mac) const {
668 std::string url= ComposeUrl(info_url_prefix_, "gethash", client_name_,
669 version_, additional_query_);
670 if (use_mac) {
671 url.append("&wrkey=");
672 url.append(wrapped_key_);
673 }
674 return GURL(url);
675}
676
677GURL SafeBrowsingProtocolManager::MacKeyUrl() const {
678 return GURL(ComposeUrl(mackey_url_prefix_, "newkey", client_name_, version_,
679 additional_query_));
680}
681
682GURL SafeBrowsingProtocolManager::MalwareReportUrl(
683 const GURL& malware_url, const GURL& page_url,
[email protected]ee4d3e802010-08-12 23:59:51684 const GURL& referrer_url, bool is_subresource) const {
[email protected]894c4e82010-06-29 21:53:18685 std::string url = ComposeUrl(info_url_prefix_, "report", client_name_,
686 version_, additional_query_);
[email protected]ee4d3e802010-08-12 23:59:51687 return GURL(StringPrintf("%s&evts=malblhit&evtd=%s&evtr=%s&evhr=%s&evtb=%d",
[email protected]894c4e82010-06-29 21:53:18688 url.c_str(), EscapeQueryParamValue(malware_url.spec(), true).c_str(),
689 EscapeQueryParamValue(page_url.spec(), true).c_str(),
[email protected]ee4d3e802010-08-12 23:59:51690 EscapeQueryParamValue(referrer_url.spec(), true).c_str(),
691 is_subresource));
[email protected]894c4e82010-06-29 21:53:18692}
693
694GURL SafeBrowsingProtocolManager::NextChunkUrl(const std::string& url) const {
695 std::string next_url;
696 if (!StartsWithASCII(url, "http://", false) &&
697 !StartsWithASCII(url, "https://", false)) {
698 next_url = "http://" + url;
699 } else {
700 next_url = url;
701 }
702 if (!additional_query_.empty())
703 next_url += additional_query_;
704 return GURL(next_url);
705}