blob: 886c8050bcd9dcf7aae93bd6ce2b804166efa3bd [file] [log] [blame]
[email protected]c83dd912010-04-06 18:50:511// Copyright (c) 2010 The Chromium Authors. All rights reserved.
license.botbf09a502008-08-24 00:55:552// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
initial.commit09911bf2008-07-26 23:55:294
5#include "chrome/browser/safe_browsing/protocol_manager.h"
6
[email protected]978df342009-11-24 06:21:537#include "base/base64.h"
[email protected]76b90d312010-08-03 03:00:508#include "base/environment.h"
[email protected]484fce42008-10-01 00:37:189#include "base/file_version_info.h"
initial.commit09911bf2008-07-26 23:55:2910#include "base/histogram.h"
11#include "base/logging.h"
[email protected]05f9b682008-09-29 22:18:0112#include "base/rand_util.h"
[email protected]807204142009-05-05 03:31:4413#include "base/stl_util-inl.h"
initial.commit09911bf2008-07-26 23:55:2914#include "base/string_util.h"
15#include "base/task.h"
16#include "base/timer.h"
[email protected]d83d03aa2009-11-02 21:44:3717#include "chrome/browser/chrome_thread.h"
initial.commit09911bf2008-07-26 23:55:2918#include "chrome/browser/profile.h"
19#include "chrome/browser/safe_browsing/protocol_parser.h"
initial.commit09911bf2008-07-26 23:55:2920#include "chrome/browser/safe_browsing/safe_browsing_service.h"
[email protected]1eeb5e02010-07-20 23:02:1121#include "chrome/common/chrome_version_info.h"
initial.commit09911bf2008-07-26 23:55:2922#include "chrome/common/env_vars.h"
[email protected]68d2a05f2010-05-07 21:39:5523#include "chrome/common/net/url_request_context_getter.h"
[email protected]dfdb0de72009-02-19 21:58:1424#include "net/base/escape.h"
initial.commit09911bf2008-07-26 23:55:2925#include "net/base/load_flags.h"
[email protected]3c3f4ac52009-12-15 20:22:1726#include "net/url_request/url_request_status.h"
initial.commit09911bf2008-07-26 23:55:2927
[email protected]e1acf6f2008-10-27 20:43:3328using base::Time;
29using base::TimeDelta;
initial.commit09911bf2008-07-26 23:55:2930
31// Maximum time, in seconds, from start up before we must issue an update query.
[email protected]05f9b682008-09-29 22:18:0132static const int kSbTimerStartIntervalSec = 5 * 60;
initial.commit09911bf2008-07-26 23:55:2933
[email protected]a11c2c62009-08-07 22:47:5634// The maximum time, in seconds, to wait for a response to an update request.
35static const int kSbMaxUpdateWaitSec = 10;
36
initial.commit09911bf2008-07-26 23:55:2937// Maximum back off multiplier.
38static const int kSbMaxBackOff = 8;
39
40
initial.commit09911bf2008-07-26 23:55:2941// SafeBrowsingProtocolManager implementation ----------------------------------
42
43SafeBrowsingProtocolManager::SafeBrowsingProtocolManager(
44 SafeBrowsingService* sb_service,
[email protected]1a871512009-11-06 06:11:1845 const std::string& client_name,
initial.commit09911bf2008-07-26 23:55:2946 const std::string& client_key,
[email protected]d11f5662009-11-12 20:52:5647 const std::string& wrapped_key,
[email protected]894c4e82010-06-29 21:53:1848 URLRequestContextGetter* request_context_getter,
49 const std::string& info_url_prefix,
50 const std::string& mackey_url_prefix,
51 bool disable_auto_update)
initial.commit09911bf2008-07-26 23:55:2952 : sb_service_(sb_service),
53 request_type_(NO_REQUEST),
54 update_error_count_(0),
55 gethash_error_count_(0),
56 update_back_off_mult_(1),
57 gethash_back_off_mult_(1),
58 next_update_sec_(-1),
59 update_state_(FIRST_REQUEST),
60 initial_request_(true),
61 chunk_pending_to_write_(false),
initial.commit09911bf2008-07-26 23:55:2962 client_key_(client_key),
[email protected]6e3b12ff2009-01-06 22:17:5763 wrapped_key_(wrapped_key),
[email protected]1a871512009-11-06 06:11:1864 update_size_(0),
[email protected]d11f5662009-11-12 20:52:5665 client_name_(client_name),
[email protected]894c4e82010-06-29 21:53:1866 request_context_getter_(request_context_getter),
67 info_url_prefix_(info_url_prefix),
68 mackey_url_prefix_(mackey_url_prefix),
69 disable_auto_update_(disable_auto_update) {
70 DCHECK(!info_url_prefix_.empty() && !mackey_url_prefix_.empty());
71
initial.commit09911bf2008-07-26 23:55:2972 // Set the backoff multiplier fuzz to a random value between 0 and 1.
[email protected]05f9b682008-09-29 22:18:0173 back_off_fuzz_ = static_cast<float>(base::RandDouble());
[email protected]efbb60482009-11-12 21:38:5574 // The first update must happen between 1-5 minutes of start up.
[email protected]05f9b682008-09-29 22:18:0175 next_update_sec_ = base::RandInt(60, kSbTimerStartIntervalSec);
[email protected]484fce42008-10-01 00:37:1876
[email protected]1eeb5e02010-07-20 23:02:1177 scoped_ptr<FileVersionInfo> version_info(chrome::GetChromeVersionInfo());
[email protected]484fce42008-10-01 00:37:1878 if (!version_info.get())
79 version_ = "0.1";
80 else
81 version_ = WideToASCII(version_info->product_version());
initial.commit09911bf2008-07-26 23:55:2982}
83
84SafeBrowsingProtocolManager::~SafeBrowsingProtocolManager() {
initial.commit09911bf2008-07-26 23:55:2985 // Delete in-progress SafeBrowsing requests.
86 STLDeleteContainerPairFirstPointers(hash_requests_.begin(),
87 hash_requests_.end());
88 hash_requests_.clear();
[email protected]dfdb0de72009-02-19 21:58:1489
90 // Delete in-progress malware reports.
91 STLDeleteContainerPointers(malware_reports_.begin(), malware_reports_.end());
92 malware_reports_.clear();
initial.commit09911bf2008-07-26 23:55:2993}
94
95// Public API used by the SafeBrowsingService ----------------------------------
96
97// We can only have one update or chunk request outstanding, but there may be
98// multiple GetHash requests pending since we don't want to serialize them and
99// slow down the user.
100void SafeBrowsingProtocolManager::GetFullHash(
101 SafeBrowsingService::SafeBrowsingCheck* check,
102 const std::vector<SBPrefix>& prefixes) {
103 // If we are in GetHash backoff, we need to check if we're past the next
104 // allowed time. If we are, we can proceed with the request. If not, we are
105 // required to return empty results (i.e. treat the page as safe).
106 if (gethash_error_count_ && Time::Now() <= next_gethash_time_) {
107 std::vector<SBFullHashResult> full_hashes;
[email protected]200abc32008-09-05 01:44:33108 sb_service_->HandleGetHashResults(check, full_hashes, false);
initial.commit09911bf2008-07-26 23:55:29109 return;
110 }
[email protected]894c4e82010-06-29 21:53:18111 bool use_mac = !client_key_.empty();
112 GURL gethash_url = GetHashUrl(use_mac);
initial.commit09911bf2008-07-26 23:55:29113 URLFetcher* fetcher = new URLFetcher(gethash_url, URLFetcher::POST, this);
114 hash_requests_[fetcher] = check;
115
116 std::string get_hash;
117 SafeBrowsingProtocolParser parser;
118 parser.FormatGetHash(prefixes, &get_hash);
119
120 fetcher->set_load_flags(net::LOAD_DISABLE_CACHE);
[email protected]d11f5662009-11-12 20:52:56121 fetcher->set_request_context(request_context_getter_);
[email protected]d36e3c8e2008-08-29 23:42:20122 fetcher->set_upload_data("text/plain", get_hash);
initial.commit09911bf2008-07-26 23:55:29123 fetcher->Start();
124}
125
126void SafeBrowsingProtocolManager::GetNextUpdate() {
127 if (initial_request_) {
128 if (client_key_.empty() || wrapped_key_.empty()) {
129 IssueKeyRequest();
130 return;
131 } else {
132 initial_request_ = false;
133 }
134 }
135
136 if (!request_.get())
137 IssueUpdateRequest();
138}
139
140// URLFetcher::Delegate implementation -----------------------------------------
141
142// All SafeBrowsing request responses are handled here.
143// TODO(paulg): Clarify with the SafeBrowsing team whether a failed parse of a
144// chunk should retry the download and parse of that chunk (and
145// what back off / how many times to try), and if that effects the
146// update back off. For now, a failed parse of the chunk means we
147// drop it. This isn't so bad because the next UPDATE_REQUEST we
148// do will report all the chunks we have. If that chunk is still
149// required, the SafeBrowsing servers will tell us to get it again.
150void SafeBrowsingProtocolManager::OnURLFetchComplete(
151 const URLFetcher* source,
152 const GURL& url,
153 const URLRequestStatus& status,
154 int response_code,
155 const ResponseCookies& cookies,
156 const std::string& data) {
157 scoped_ptr<const URLFetcher> fetcher;
158 bool parsed_ok = true;
159 bool must_back_off = false; // Reduce SafeBrowsing service query frequency.
160
[email protected]dfdb0de72009-02-19 21:58:14161 // See if this is a malware report fetcher. We don't take any action for
162 // the response to those.
163 std::set<const URLFetcher*>::iterator mit = malware_reports_.find(source);
164 if (mit != malware_reports_.end()) {
165 const URLFetcher* report = *mit;
166 malware_reports_.erase(mit);
167 delete report;
168 return;
169 }
170
initial.commit09911bf2008-07-26 23:55:29171 HashRequests::iterator it = hash_requests_.find(source);
172 if (it != hash_requests_.end()) {
173 // GetHash response.
174 fetcher.reset(it->first);
175 SafeBrowsingService::SafeBrowsingCheck* check = it->second;
176 std::vector<SBFullHashResult> full_hashes;
[email protected]200abc32008-09-05 01:44:33177 bool can_cache = false;
initial.commit09911bf2008-07-26 23:55:29178 if (response_code == 200 || response_code == 204) {
[email protected]682343d2009-04-17 19:51:40179 // For tracking our GetHash false positive (204) rate, compared to real
180 // (200) responses.
181 if (response_code == 200)
182 UMA_HISTOGRAM_COUNTS("SB2.GetHash200", 1);
183 else
184 UMA_HISTOGRAM_COUNTS("SB2.GetHash204", 1);
[email protected]200abc32008-09-05 01:44:33185 can_cache = true;
initial.commit09911bf2008-07-26 23:55:29186 gethash_error_count_ = 0;
187 gethash_back_off_mult_ = 1;
188 bool re_key = false;
189 SafeBrowsingProtocolParser parser;
190 parsed_ok = parser.ParseGetHash(data.data(),
191 static_cast<int>(data.length()),
192 client_key_,
193 &re_key,
194 &full_hashes);
195 if (!parsed_ok) {
196 // If we fail to parse it, we must still inform the SafeBrowsingService
197 // so that it doesn't hold up the user's request indefinitely. Not sure
198 // what to do at that point though!
199 full_hashes.clear();
200 } else {
201 if (re_key)
202 HandleReKey();
203 }
[email protected]3c3f4ac52009-12-15 20:22:17204 } else {
[email protected]7bdc1bf2009-07-28 15:48:03205 HandleGetHashError(Time::Now());
[email protected]3c3f4ac52009-12-15 20:22:17206 if (status.status() == URLRequestStatus::FAILED) {
207 SB_DLOG(INFO) << "SafeBrowsing GetHash request for: " << source->url()
208 << " failed with os error: " << status.os_error();
209 } else {
210 SB_DLOG(INFO) << "SafeBrowsing GetHash request for: " << source->url()
211 << " failed with error: " << response_code;
212 }
initial.commit09911bf2008-07-26 23:55:29213 }
214
215 // Call back the SafeBrowsingService with full_hashes, even if there was a
216 // parse error or an error response code (in which case full_hashes will be
217 // empty). We can't block the user regardless of the error status.
[email protected]200abc32008-09-05 01:44:33218 sb_service_->HandleGetHashResults(check, full_hashes, can_cache);
initial.commit09911bf2008-07-26 23:55:29219
220 hash_requests_.erase(it);
221 } else {
222 // Update, chunk or key response.
initial.commit09911bf2008-07-26 23:55:29223 fetcher.reset(request_.release());
224
[email protected]a11c2c62009-08-07 22:47:56225 if (request_type_ == UPDATE_REQUEST) {
226 if (!fetcher.get()) {
227 // We've timed out waiting for an update response, so we've cancelled
228 // the update request and scheduled a new one. Ignore this response.
229 return;
230 }
231
232 // Cancel the update response timeout now that we have the response.
233 update_timer_.Stop();
234 }
235
initial.commit09911bf2008-07-26 23:55:29236 if (response_code == 200) {
237 // We have data from the SafeBrowsing service.
238 parsed_ok = HandleServiceResponse(source->url(),
239 data.data(),
240 static_cast<int>(data.length()));
241 if (!parsed_ok) {
242 SB_DLOG(INFO) << "SafeBrowsing request for: " << source->url()
243 << "failed parse.";
[email protected]22717d1e2008-10-15 21:55:32244 must_back_off = true;
245 chunk_request_urls_.clear();
[email protected]6e3b12ff2009-01-06 22:17:57246 UpdateFinished(false);
initial.commit09911bf2008-07-26 23:55:29247 }
248
[email protected]cb1cdf492009-01-16 23:51:44249 switch (request_type_) {
250 case CHUNK_REQUEST:
251 if (parsed_ok)
252 chunk_request_urls_.pop_front();
253 break;
254 case GETKEY_REQUEST:
255 if (initial_request_) {
256 // This is the first request we've made this session. Now that we
257 // have the keys, do the regular update request.
258 initial_request_ = false;
259 GetNextUpdate();
260 return;
261 }
262 break;
263 case UPDATE_REQUEST:
264 if (chunk_request_urls_.empty() && parsed_ok) {
265 // We are up to date since the servers gave us nothing new, so we
266 // are done with this update cycle.
267 UpdateFinished(true);
268 }
269 break;
[email protected]7e242b52009-02-05 12:31:02270 default:
271 NOTREACHED();
272 break;
initial.commit09911bf2008-07-26 23:55:29273 }
[email protected]3c3f4ac52009-12-15 20:22:17274 } else {
275 // The SafeBrowsing service error, or very bad response code: back off.
initial.commit09911bf2008-07-26 23:55:29276 must_back_off = true;
277 if (request_type_ == CHUNK_REQUEST)
278 chunk_request_urls_.clear();
[email protected]6e3b12ff2009-01-06 22:17:57279 UpdateFinished(false);
[email protected]3c3f4ac52009-12-15 20:22:17280 if (status.status() == URLRequestStatus::FAILED) {
281 SB_DLOG(INFO) << "SafeBrowsing request for: " << source->url()
282 << " failed with os error: " << status.os_error();
283 } else {
284 SB_DLOG(INFO) << "SafeBrowsing request for: " << source->url()
285 << " failed with error: " << response_code;
286 }
initial.commit09911bf2008-07-26 23:55:29287 }
288 }
289
290 // Schedule a new update request if we've finished retrieving all the chunks
291 // from the previous update. We treat the update request and the chunk URLs it
292 // contains as an atomic unit as far as back off is concerned.
293 if (chunk_request_urls_.empty() &&
294 (request_type_ == CHUNK_REQUEST || request_type_ == UPDATE_REQUEST))
295 ScheduleNextUpdate(must_back_off);
296
297 // Get the next chunk if available.
298 IssueChunkRequest();
299}
300
301bool SafeBrowsingProtocolManager::HandleServiceResponse(const GURL& url,
302 const char* data,
303 int length) {
304 SafeBrowsingProtocolParser parser;
305
306 switch (request_type_) {
307 case UPDATE_REQUEST: {
308 int next_update_sec = -1;
309 bool re_key = false;
310 bool reset = false;
[email protected]7b1e37102010-03-08 21:43:16311 scoped_ptr<std::vector<SBChunkDelete> > chunk_deletes(
312 new std::vector<SBChunkDelete>);
initial.commit09911bf2008-07-26 23:55:29313 std::vector<ChunkUrl> chunk_urls;
314 if (!parser.ParseUpdate(data, length, client_key_,
315 &next_update_sec, &re_key,
[email protected]7b1e37102010-03-08 21:43:16316 &reset, chunk_deletes.get(), &chunk_urls)) {
initial.commit09911bf2008-07-26 23:55:29317 return false;
318 }
319
320 last_update_ = Time::Now();
321
322 if (update_state_ == FIRST_REQUEST)
323 update_state_ = SECOND_REQUEST;
324 else if (update_state_ == SECOND_REQUEST)
325 update_state_ = NORMAL_REQUEST;
326
327 // New time for the next update.
328 if (next_update_sec > 0) {
329 next_update_sec_ = next_update_sec;
330 } else if (update_state_ == SECOND_REQUEST) {
[email protected]05f9b682008-09-29 22:18:01331 next_update_sec_ = base::RandInt(15 * 60, 45 * 60);
initial.commit09911bf2008-07-26 23:55:29332 }
333
334 // We need to request a new set of keys for MAC.
335 if (re_key)
336 HandleReKey();
337
338 // New chunks to download.
339 if (!chunk_urls.empty()) {
[email protected]553dba62009-02-24 19:08:23340 UMA_HISTOGRAM_COUNTS("SB2.UpdateUrls", chunk_urls.size());
initial.commit09911bf2008-07-26 23:55:29341 for (size_t i = 0; i < chunk_urls.size(); ++i)
342 chunk_request_urls_.push_back(chunk_urls[i]);
343 }
344
345 // Handle the case were the SafeBrowsing service tells us to dump our
346 // database.
347 if (reset) {
348 sb_service_->ResetDatabase();
349 return true;
350 }
351
[email protected]7b1e37102010-03-08 21:43:16352 // Chunks to delete from our storage. Pass ownership of
353 // |chunk_deletes|.
initial.commit09911bf2008-07-26 23:55:29354 if (!chunk_deletes->empty())
[email protected]7b1e37102010-03-08 21:43:16355 sb_service_->HandleChunkDelete(chunk_deletes.release());
initial.commit09911bf2008-07-26 23:55:29356
357 break;
358 }
359 case CHUNK_REQUEST: {
[email protected]484c57a2009-03-21 01:24:01360 UMA_HISTOGRAM_TIMES("SB2.ChunkRequest",
361 base::Time::Now() - chunk_request_start_);
[email protected]22573822008-11-14 00:40:47362
initial.commit09911bf2008-07-26 23:55:29363 const ChunkUrl chunk_url = chunk_request_urls_.front();
initial.commit09911bf2008-07-26 23:55:29364 bool re_key = false;
[email protected]7b1e37102010-03-08 21:43:16365 scoped_ptr<SBChunkList> chunks(new SBChunkList);
[email protected]553dba62009-02-24 19:08:23366 UMA_HISTOGRAM_COUNTS("SB2.ChunkSize", length);
[email protected]6e3b12ff2009-01-06 22:17:57367 update_size_ += length;
initial.commit09911bf2008-07-26 23:55:29368 if (!parser.ParseChunk(data, length,
369 client_key_, chunk_url.mac,
[email protected]7b1e37102010-03-08 21:43:16370 &re_key, chunks.get())) {
initial.commit09911bf2008-07-26 23:55:29371#ifndef NDEBUG
372 std::string data_str;
373 data_str.assign(data, length);
374 std::string encoded_chunk;
[email protected]978df342009-11-24 06:21:53375 base::Base64Encode(data, &encoded_chunk);
initial.commit09911bf2008-07-26 23:55:29376 SB_DLOG(INFO) << "ParseChunk error for chunk: " << chunk_url.url
377 << ", client_key: " << client_key_
378 << ", wrapped_key: " << wrapped_key_
379 << ", mac: " << chunk_url.mac
380 << ", Base64Encode(data): " << encoded_chunk
381 << ", length: " << length;
382#endif
initial.commit09911bf2008-07-26 23:55:29383 return false;
384 }
385
386 if (re_key)
387 HandleReKey();
388
[email protected]7b1e37102010-03-08 21:43:16389 // Chunks to add to storage. Pass ownership of |chunks|.
390 if (!chunks->empty()) {
initial.commit09911bf2008-07-26 23:55:29391 chunk_pending_to_write_ = true;
[email protected]7b1e37102010-03-08 21:43:16392 sb_service_->HandleChunk(chunk_url.list_name, chunks.release());
initial.commit09911bf2008-07-26 23:55:29393 }
394
395 break;
396 }
397 case GETKEY_REQUEST: {
398 std::string client_key, wrapped_key;
399 if (!parser.ParseNewKey(data, length, &client_key, &wrapped_key))
400 return false;
401
402 client_key_ = client_key;
403 wrapped_key_ = wrapped_key;
[email protected]d83d03aa2009-11-02 21:44:37404 ChromeThread::PostTask(
405 ChromeThread::UI, FROM_HERE,
406 NewRunnableMethod(
407 sb_service_, &SafeBrowsingService::OnNewMacKeys, client_key_,
408 wrapped_key_));
initial.commit09911bf2008-07-26 23:55:29409 break;
410 }
411
412 default:
413 return false;
414 }
415
416 return true;
417}
418
419void SafeBrowsingProtocolManager::Initialize() {
420 // Don't want to hit the safe browsing servers on build/chrome bots.
[email protected]76b90d312010-08-03 03:00:50421 scoped_ptr<base::Environment> env(base::Environment::Create());
[email protected]c83dd912010-04-06 18:50:51422 if (env->HasEnv(env_vars::kHeadless))
initial.commit09911bf2008-07-26 23:55:29423 return;
424
425 ScheduleNextUpdate(false /* no back off */);
426}
427
428void SafeBrowsingProtocolManager::ScheduleNextUpdate(bool back_off) {
[email protected]894c4e82010-06-29 21:53:18429 DCHECK_GT(next_update_sec_, 0);
initial.commit09911bf2008-07-26 23:55:29430
[email protected]894c4e82010-06-29 21:53:18431 if (disable_auto_update_) {
432 // Unschedule any current timer.
433 update_timer_.Stop();
434 return;
435 }
initial.commit09911bf2008-07-26 23:55:29436 // Reschedule with the new update.
437 const int next_update = GetNextUpdateTime(back_off);
[email protected]894c4e82010-06-29 21:53:18438 ForceScheduleNextUpdate(next_update);
439}
440
441void SafeBrowsingProtocolManager::ForceScheduleNextUpdate(
442 const int next_update_msec) {
443 DCHECK_GE(next_update_msec, 0);
444 // Unschedule any current timer.
445 update_timer_.Stop();
446 update_timer_.Start(TimeDelta::FromMilliseconds(next_update_msec), this,
[email protected]2d316662008-09-03 18:18:14447 &SafeBrowsingProtocolManager::GetNextUpdate);
initial.commit09911bf2008-07-26 23:55:29448}
449
450// According to section 5 of the SafeBrowsing protocol specification, we must
451// back off after a certain number of errors. We only change 'next_update_sec_'
452// when we receive a response from the SafeBrowsing service.
453int SafeBrowsingProtocolManager::GetNextUpdateTime(bool back_off) {
454 int next = next_update_sec_;
455 if (back_off) {
456 next = GetNextBackOffTime(&update_error_count_, &update_back_off_mult_);
457 } else {
458 // Successful response means error reset.
459 update_error_count_ = 0;
460 update_back_off_mult_ = 1;
461 }
462 return next * 1000; // milliseconds
463}
464
465int SafeBrowsingProtocolManager::GetNextBackOffTime(int* error_count,
466 int* multiplier) {
467 DCHECK(multiplier && error_count);
468 (*error_count)++;
469 if (*error_count > 1 && *error_count < 6) {
470 int next = static_cast<int>(*multiplier * (1 + back_off_fuzz_) * 30 * 60);
471 *multiplier *= 2;
472 if (*multiplier > kSbMaxBackOff)
473 *multiplier = kSbMaxBackOff;
474 return next;
475 }
476
477 if (*error_count >= 6)
478 return 60 * 60 * 8; // 8 hours
479
480 return 60; // 1 minute
481}
482
483// This request requires getting a list of all the chunks for each list from the
484// database asynchronously. The request will be issued when we're called back in
485// OnGetChunksComplete.
486// TODO(paulg): We should get this at start up and maintain a ChunkRange cache
487// to avoid hitting the database with each update request. On the
488// otherhand, this request will only occur ~20-30 minutes so there
489// isn't that much overhead. Measure!
490void SafeBrowsingProtocolManager::IssueUpdateRequest() {
491 request_type_ = UPDATE_REQUEST;
[email protected]57119c3f2008-12-04 00:33:04492 sb_service_->UpdateStarted();
initial.commit09911bf2008-07-26 23:55:29493}
494
495void SafeBrowsingProtocolManager::IssueChunkRequest() {
496 // We are only allowed to have one request outstanding at any time. Also,
497 // don't get the next url until the previous one has been written to disk so
498 // that we don't use too much memory.
499 if (request_.get() || chunk_request_urls_.empty() || chunk_pending_to_write_)
500 return;
501
502 ChunkUrl next_chunk = chunk_request_urls_.front();
503 DCHECK(!next_chunk.url.empty());
[email protected]894c4e82010-06-29 21:53:18504 GURL chunk_url = NextChunkUrl(next_chunk.url);
initial.commit09911bf2008-07-26 23:55:29505 request_type_ = CHUNK_REQUEST;
506 request_.reset(new URLFetcher(chunk_url, URLFetcher::GET, this));
507 request_->set_load_flags(net::LOAD_DISABLE_CACHE);
[email protected]d11f5662009-11-12 20:52:56508 request_->set_request_context(request_context_getter_);
[email protected]22573822008-11-14 00:40:47509 chunk_request_start_ = base::Time::Now();
initial.commit09911bf2008-07-26 23:55:29510 request_->Start();
511}
512
513void SafeBrowsingProtocolManager::IssueKeyRequest() {
[email protected]894c4e82010-06-29 21:53:18514 GURL key_url = MacKeyUrl();
initial.commit09911bf2008-07-26 23:55:29515 request_type_ = GETKEY_REQUEST;
516 request_.reset(new URLFetcher(key_url, URLFetcher::GET, this));
517 request_->set_load_flags(net::LOAD_DISABLE_CACHE);
[email protected]d11f5662009-11-12 20:52:56518 request_->set_request_context(request_context_getter_);
initial.commit09911bf2008-07-26 23:55:29519 request_->Start();
520}
521
522void SafeBrowsingProtocolManager::OnGetChunksComplete(
523 const std::vector<SBListChunkRanges>& lists, bool database_error) {
[email protected]894c4e82010-06-29 21:53:18524 DCHECK_EQ(request_type_, UPDATE_REQUEST);
initial.commit09911bf2008-07-26 23:55:29525 if (database_error) {
[email protected]a11c2c62009-08-07 22:47:56526 UpdateFinished(false);
[email protected]3c3f4ac52009-12-15 20:22:17527 ScheduleNextUpdate(false);
initial.commit09911bf2008-07-26 23:55:29528 return;
529 }
530
531 const bool use_mac = !client_key_.empty();
532
533 // Format our stored chunks:
534 std::string list_data;
535 bool found_malware = false;
536 bool found_phishing = false;
537 for (size_t i = 0; i < lists.size(); ++i) {
538 list_data.append(FormatList(lists[i], use_mac));
[email protected]c3ff89492008-11-11 02:17:51539 if (lists[i].name == safe_browsing_util::kPhishingList)
initial.commit09911bf2008-07-26 23:55:29540 found_phishing = true;
541
[email protected]c3ff89492008-11-11 02:17:51542 if (lists[i].name == safe_browsing_util::kMalwareList)
initial.commit09911bf2008-07-26 23:55:29543 found_malware = true;
544 }
545
546 // If we have an empty database, let the server know we want data for these
547 // lists.
548 if (!found_phishing)
[email protected]c3ff89492008-11-11 02:17:51549 list_data.append(FormatList(
550 SBListChunkRanges(safe_browsing_util::kPhishingList), use_mac));
initial.commit09911bf2008-07-26 23:55:29551
552 if (!found_malware)
[email protected]c3ff89492008-11-11 02:17:51553 list_data.append(FormatList(
554 SBListChunkRanges(safe_browsing_util::kMalwareList), use_mac));
initial.commit09911bf2008-07-26 23:55:29555
[email protected]894c4e82010-06-29 21:53:18556 GURL update_url = UpdateUrl(use_mac);
initial.commit09911bf2008-07-26 23:55:29557 request_.reset(new URLFetcher(update_url, URLFetcher::POST, this));
558 request_->set_load_flags(net::LOAD_DISABLE_CACHE);
[email protected]d11f5662009-11-12 20:52:56559 request_->set_request_context(request_context_getter_);
initial.commit09911bf2008-07-26 23:55:29560 request_->set_upload_data("text/plain", list_data);
561 request_->Start();
[email protected]a11c2c62009-08-07 22:47:56562
563 // Begin the update request timeout.
564 update_timer_.Start(TimeDelta::FromSeconds(kSbMaxUpdateWaitSec), this,
565 &SafeBrowsingProtocolManager::UpdateResponseTimeout);
566}
567
568// If we haven't heard back from the server with an update response, this method
569// will run. Close the current update session and schedule another update.
570void SafeBrowsingProtocolManager::UpdateResponseTimeout() {
[email protected]894c4e82010-06-29 21:53:18571 DCHECK_EQ(request_type_, UPDATE_REQUEST);
[email protected]a11c2c62009-08-07 22:47:56572 request_.reset();
[email protected]a11c2c62009-08-07 22:47:56573 UpdateFinished(false);
[email protected]3c3f4ac52009-12-15 20:22:17574 ScheduleNextUpdate(false);
initial.commit09911bf2008-07-26 23:55:29575}
576
577void SafeBrowsingProtocolManager::OnChunkInserted() {
578 chunk_pending_to_write_ = false;
579
580 if (chunk_request_urls_.empty()) {
[email protected]484c57a2009-03-21 01:24:01581 UMA_HISTOGRAM_LONG_TIMES("SB2.Update", Time::Now() - last_update_);
[email protected]6e3b12ff2009-01-06 22:17:57582 UpdateFinished(true);
initial.commit09911bf2008-07-26 23:55:29583 } else {
584 IssueChunkRequest();
585 }
586}
587
[email protected]dfdb0de72009-02-19 21:58:14588void SafeBrowsingProtocolManager::ReportMalware(const GURL& malware_url,
589 const GURL& page_url,
590 const GURL& referrer_url) {
[email protected]894c4e82010-06-29 21:53:18591 GURL report_url = MalwareReportUrl(malware_url, page_url, referrer_url);
[email protected]dfdb0de72009-02-19 21:58:14592 URLFetcher* report = new URLFetcher(report_url, URLFetcher::GET, this);
593 report->set_load_flags(net::LOAD_DISABLE_CACHE);
[email protected]d11f5662009-11-12 20:52:56594 report->set_request_context(request_context_getter_);
[email protected]dfdb0de72009-02-19 21:58:14595 report->Start();
596 malware_reports_.insert(report);
597}
598
initial.commit09911bf2008-07-26 23:55:29599// static
600std::string SafeBrowsingProtocolManager::FormatList(
601 const SBListChunkRanges& list, bool use_mac) {
602 std::string formatted_results;
603 formatted_results.append(list.name);
604 formatted_results.append(";");
605 if (!list.adds.empty()) {
606 formatted_results.append("a:" + list.adds);
607 if (!list.subs.empty() || use_mac)
608 formatted_results.append(":");
609 }
610 if (!list.subs.empty()) {
611 formatted_results.append("s:" + list.subs);
612 if (use_mac)
613 formatted_results.append(":");
614 }
615 if (use_mac)
616 formatted_results.append("mac");
617 formatted_results.append("\n");
618
619 return formatted_results;
620}
621
622void SafeBrowsingProtocolManager::HandleReKey() {
623 client_key_.clear();
624 wrapped_key_.clear();
625 IssueKeyRequest();
626}
627
[email protected]7bdc1bf2009-07-28 15:48:03628void SafeBrowsingProtocolManager::HandleGetHashError(const Time& now) {
initial.commit09911bf2008-07-26 23:55:29629 int next = GetNextBackOffTime(&gethash_error_count_, &gethash_back_off_mult_);
[email protected]7bdc1bf2009-07-28 15:48:03630 next_gethash_time_ = now + TimeDelta::FromSeconds(next);
initial.commit09911bf2008-07-26 23:55:29631}
[email protected]6e3b12ff2009-01-06 22:17:57632
633void SafeBrowsingProtocolManager::UpdateFinished(bool success) {
[email protected]553dba62009-02-24 19:08:23634 UMA_HISTOGRAM_COUNTS("SB2.UpdateSize", update_size_);
[email protected]6e3b12ff2009-01-06 22:17:57635 update_size_ = 0;
636 sb_service_->UpdateFinished(success);
[email protected]7e242b52009-02-05 12:31:02637}
[email protected]894c4e82010-06-29 21:53:18638
639std::string SafeBrowsingProtocolManager::ComposeUrl(
640 const std::string& prefix, const std::string& method,
641 const std::string& client_name, const std::string& version,
642 const std::string& additional_query) {
643 DCHECK(!prefix.empty() && !method.empty() &&
644 !client_name.empty() && !version.empty());
645 std::string url = StringPrintf("%s/%s?client=%s&appver=%s&pver=2.2",
646 prefix.c_str(), method.c_str(),
647 client_name.c_str(), version.c_str());
648 if (!additional_query.empty()) {
649 url.append(additional_query);
650 }
651 return url;
652}
653
654GURL SafeBrowsingProtocolManager::UpdateUrl(bool use_mac) const {
655 std::string url = ComposeUrl(info_url_prefix_, "downloads", client_name_,
656 version_, additional_query_);
657 if (use_mac) {
658 url.append("&wrkey=");
659 url.append(wrapped_key_);
660 }
661 return GURL(url);
662}
663
664GURL SafeBrowsingProtocolManager::GetHashUrl(bool use_mac) const {
665 std::string url= ComposeUrl(info_url_prefix_, "gethash", client_name_,
666 version_, additional_query_);
667 if (use_mac) {
668 url.append("&wrkey=");
669 url.append(wrapped_key_);
670 }
671 return GURL(url);
672}
673
674GURL SafeBrowsingProtocolManager::MacKeyUrl() const {
675 return GURL(ComposeUrl(mackey_url_prefix_, "newkey", client_name_, version_,
676 additional_query_));
677}
678
679GURL SafeBrowsingProtocolManager::MalwareReportUrl(
680 const GURL& malware_url, const GURL& page_url,
681 const GURL& referrer_url) const {
682 std::string url = ComposeUrl(info_url_prefix_, "report", client_name_,
683 version_, additional_query_);
684 return GURL(StringPrintf("%s&evts=malblhit&evtd=%s&evtr=%s&evhr=%s",
685 url.c_str(), EscapeQueryParamValue(malware_url.spec(), true).c_str(),
686 EscapeQueryParamValue(page_url.spec(), true).c_str(),
687 EscapeQueryParamValue(referrer_url.spec(), true).c_str()));
688}
689
690GURL SafeBrowsingProtocolManager::NextChunkUrl(const std::string& url) const {
691 std::string next_url;
692 if (!StartsWithASCII(url, "http://", false) &&
693 !StartsWithASCII(url, "https://", false)) {
694 next_url = "http://" + url;
695 } else {
696 next_url = url;
697 }
698 if (!additional_query_.empty())
699 next_url += additional_query_;
700 return GURL(next_url);
701}