blob: c81e96a9d2d44430ad20513682243781f769c525 [file] [log] [blame]
license.botbf09a502008-08-24 00:55:551// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
initial.commit09911bf2008-07-26 23:55:294
5#include "chrome/browser/safe_browsing/protocol_manager.h"
6
[email protected]484fce42008-10-01 00:37:187#include "base/file_version_info.h"
initial.commit09911bf2008-07-26 23:55:298#include "base/histogram.h"
9#include "base/logging.h"
10#include "base/message_loop.h"
[email protected]05f9b682008-09-29 22:18:0111#include "base/rand_util.h"
[email protected]807204142009-05-05 03:31:4412#include "base/stl_util-inl.h"
initial.commit09911bf2008-07-26 23:55:2913#include "base/string_util.h"
[email protected]05f9b682008-09-29 22:18:0114#include "base/sys_info.h"
initial.commit09911bf2008-07-26 23:55:2915#include "base/task.h"
16#include "base/timer.h"
17#include "chrome/browser/profile.h"
18#include "chrome/browser/safe_browsing/protocol_parser.h"
initial.commit09911bf2008-07-26 23:55:2919#include "chrome/browser/safe_browsing/safe_browsing_service.h"
initial.commit09911bf2008-07-26 23:55:2920#include "chrome/common/env_vars.h"
initial.commit09911bf2008-07-26 23:55:2921#include "net/base/base64.h"
[email protected]dfdb0de72009-02-19 21:58:1422#include "net/base/escape.h"
initial.commit09911bf2008-07-26 23:55:2923#include "net/base/load_flags.h"
24
[email protected]e1acf6f2008-10-27 20:43:3325using base::Time;
26using base::TimeDelta;
initial.commit09911bf2008-07-26 23:55:2927
28// Maximum time, in seconds, from start up before we must issue an update query.
[email protected]05f9b682008-09-29 22:18:0129static const int kSbTimerStartIntervalSec = 5 * 60;
initial.commit09911bf2008-07-26 23:55:2930
[email protected]a11c2c62009-08-07 22:47:5631// The maximum time, in seconds, to wait for a response to an update request.
32static const int kSbMaxUpdateWaitSec = 10;
33
initial.commit09911bf2008-07-26 23:55:2934// Update URL for querying about the latest set of chunk updates.
35static const char* const kSbUpdateUrl =
[email protected]d3216442009-03-05 21:07:2736 "http://safebrowsing.clients.google.com/safebrowsing/downloads?client=%s"
37 "&appver=%s&pver=2.2";
initial.commit09911bf2008-07-26 23:55:2938
39// GetHash request URL for retrieving full hashes.
40static const char* const kSbGetHashUrl =
[email protected]d3216442009-03-05 21:07:2741 "http://safebrowsing.clients.google.com/safebrowsing/gethash?client=%s"
42 "&appver=%s&pver=2.2";
initial.commit09911bf2008-07-26 23:55:2943
44// New MAC client key requests URL.
45static const char* const kSbNewKeyUrl =
[email protected]d3216442009-03-05 21:07:2746 "https://sb-ssl.google.com/safebrowsing/newkey?client=%s&appver=%s"
47 "&pver=2.2";
initial.commit09911bf2008-07-26 23:55:2948
[email protected]dfdb0de72009-02-19 21:58:1449// URL for reporting malware pages.
50static const char* const kSbMalwareReportUrl =
[email protected]d3216442009-03-05 21:07:2751 "http://safebrowsing.clients.google.com/safebrowsing/report?evts=malblhit"
52 "&evtd=%s&evtr=%s&evhr=%s&client=%s&appver=%s";
[email protected]dfdb0de72009-02-19 21:58:1453
[email protected]f1da1262008-08-31 23:03:5854#if defined(GOOGLE_CHROME_BUILD)
55static const char* const kSbClientName = "googlechrome";
56#else
57static const char* const kSbClientName = "chromium";
58#endif
initial.commit09911bf2008-07-26 23:55:2959
60// Maximum back off multiplier.
61static const int kSbMaxBackOff = 8;
62
63
initial.commit09911bf2008-07-26 23:55:2964// SafeBrowsingProtocolManager implementation ----------------------------------
65
66SafeBrowsingProtocolManager::SafeBrowsingProtocolManager(
67 SafeBrowsingService* sb_service,
68 MessageLoop* notify_loop,
69 const std::string& client_key,
70 const std::string& wrapped_key)
71 : sb_service_(sb_service),
72 request_type_(NO_REQUEST),
73 update_error_count_(0),
74 gethash_error_count_(0),
75 update_back_off_mult_(1),
76 gethash_back_off_mult_(1),
77 next_update_sec_(-1),
78 update_state_(FIRST_REQUEST),
79 initial_request_(true),
80 chunk_pending_to_write_(false),
81 notify_loop_(notify_loop),
82 client_key_(client_key),
[email protected]6e3b12ff2009-01-06 22:17:5783 wrapped_key_(wrapped_key),
84 update_size_(0) {
initial.commit09911bf2008-07-26 23:55:2985 // Set the backoff multiplier fuzz to a random value between 0 and 1.
[email protected]05f9b682008-09-29 22:18:0186 back_off_fuzz_ = static_cast<float>(base::RandDouble());
initial.commit09911bf2008-07-26 23:55:2987
88 // The first update must happen between 0-5 minutes of start up.
[email protected]05f9b682008-09-29 22:18:0189 next_update_sec_ = base::RandInt(60, kSbTimerStartIntervalSec);
[email protected]484fce42008-10-01 00:37:1890
91 scoped_ptr<FileVersionInfo> version_info(
92 FileVersionInfo::CreateFileVersionInfoForCurrentModule());
93 if (!version_info.get())
94 version_ = "0.1";
95 else
96 version_ = WideToASCII(version_info->product_version());
initial.commit09911bf2008-07-26 23:55:2997}
98
99SafeBrowsingProtocolManager::~SafeBrowsingProtocolManager() {
initial.commit09911bf2008-07-26 23:55:29100 // Delete in-progress SafeBrowsing requests.
101 STLDeleteContainerPairFirstPointers(hash_requests_.begin(),
102 hash_requests_.end());
103 hash_requests_.clear();
[email protected]dfdb0de72009-02-19 21:58:14104
105 // Delete in-progress malware reports.
106 STLDeleteContainerPointers(malware_reports_.begin(), malware_reports_.end());
107 malware_reports_.clear();
initial.commit09911bf2008-07-26 23:55:29108}
109
110// Public API used by the SafeBrowsingService ----------------------------------
111
112// We can only have one update or chunk request outstanding, but there may be
113// multiple GetHash requests pending since we don't want to serialize them and
114// slow down the user.
115void SafeBrowsingProtocolManager::GetFullHash(
116 SafeBrowsingService::SafeBrowsingCheck* check,
117 const std::vector<SBPrefix>& prefixes) {
118 // If we are in GetHash backoff, we need to check if we're past the next
119 // allowed time. If we are, we can proceed with the request. If not, we are
120 // required to return empty results (i.e. treat the page as safe).
121 if (gethash_error_count_ && Time::Now() <= next_gethash_time_) {
122 std::vector<SBFullHashResult> full_hashes;
[email protected]200abc32008-09-05 01:44:33123 sb_service_->HandleGetHashResults(check, full_hashes, false);
initial.commit09911bf2008-07-26 23:55:29124 return;
125 }
126
127 std::string url = StringPrintf(kSbGetHashUrl,
128 kSbClientName,
[email protected]484fce42008-10-01 00:37:18129 version_.c_str());
initial.commit09911bf2008-07-26 23:55:29130 if (!client_key_.empty()) {
131 url.append("&wrkey=");
132 url.append(wrapped_key_);
133 }
134
135 GURL gethash_url(url);
136 URLFetcher* fetcher = new URLFetcher(gethash_url, URLFetcher::POST, this);
137 hash_requests_[fetcher] = check;
138
139 std::string get_hash;
140 SafeBrowsingProtocolParser parser;
141 parser.FormatGetHash(prefixes, &get_hash);
142
143 fetcher->set_load_flags(net::LOAD_DISABLE_CACHE);
144 fetcher->set_request_context(Profile::GetDefaultRequestContext());
[email protected]d36e3c8e2008-08-29 23:42:20145 fetcher->set_upload_data("text/plain", get_hash);
initial.commit09911bf2008-07-26 23:55:29146 fetcher->Start();
147}
148
149void SafeBrowsingProtocolManager::GetNextUpdate() {
150 if (initial_request_) {
151 if (client_key_.empty() || wrapped_key_.empty()) {
152 IssueKeyRequest();
153 return;
154 } else {
155 initial_request_ = false;
156 }
157 }
158
159 if (!request_.get())
160 IssueUpdateRequest();
161}
162
163// URLFetcher::Delegate implementation -----------------------------------------
164
165// All SafeBrowsing request responses are handled here.
166// TODO(paulg): Clarify with the SafeBrowsing team whether a failed parse of a
167// chunk should retry the download and parse of that chunk (and
168// what back off / how many times to try), and if that effects the
169// update back off. For now, a failed parse of the chunk means we
170// drop it. This isn't so bad because the next UPDATE_REQUEST we
171// do will report all the chunks we have. If that chunk is still
172// required, the SafeBrowsing servers will tell us to get it again.
173void SafeBrowsingProtocolManager::OnURLFetchComplete(
174 const URLFetcher* source,
175 const GURL& url,
176 const URLRequestStatus& status,
177 int response_code,
178 const ResponseCookies& cookies,
179 const std::string& data) {
180 scoped_ptr<const URLFetcher> fetcher;
181 bool parsed_ok = true;
182 bool must_back_off = false; // Reduce SafeBrowsing service query frequency.
183
[email protected]dfdb0de72009-02-19 21:58:14184 // See if this is a malware report fetcher. We don't take any action for
185 // the response to those.
186 std::set<const URLFetcher*>::iterator mit = malware_reports_.find(source);
187 if (mit != malware_reports_.end()) {
188 const URLFetcher* report = *mit;
189 malware_reports_.erase(mit);
190 delete report;
191 return;
192 }
193
initial.commit09911bf2008-07-26 23:55:29194 HashRequests::iterator it = hash_requests_.find(source);
195 if (it != hash_requests_.end()) {
196 // GetHash response.
197 fetcher.reset(it->first);
198 SafeBrowsingService::SafeBrowsingCheck* check = it->second;
199 std::vector<SBFullHashResult> full_hashes;
[email protected]200abc32008-09-05 01:44:33200 bool can_cache = false;
initial.commit09911bf2008-07-26 23:55:29201 if (response_code == 200 || response_code == 204) {
[email protected]682343d2009-04-17 19:51:40202 // For tracking our GetHash false positive (204) rate, compared to real
203 // (200) responses.
204 if (response_code == 200)
205 UMA_HISTOGRAM_COUNTS("SB2.GetHash200", 1);
206 else
207 UMA_HISTOGRAM_COUNTS("SB2.GetHash204", 1);
[email protected]200abc32008-09-05 01:44:33208 can_cache = true;
initial.commit09911bf2008-07-26 23:55:29209 gethash_error_count_ = 0;
210 gethash_back_off_mult_ = 1;
211 bool re_key = false;
212 SafeBrowsingProtocolParser parser;
213 parsed_ok = parser.ParseGetHash(data.data(),
214 static_cast<int>(data.length()),
215 client_key_,
216 &re_key,
217 &full_hashes);
218 if (!parsed_ok) {
219 // If we fail to parse it, we must still inform the SafeBrowsingService
220 // so that it doesn't hold up the user's request indefinitely. Not sure
221 // what to do at that point though!
222 full_hashes.clear();
223 } else {
224 if (re_key)
225 HandleReKey();
226 }
227 } else if (response_code >= 300) {
[email protected]7bdc1bf2009-07-28 15:48:03228 HandleGetHashError(Time::Now());
initial.commit09911bf2008-07-26 23:55:29229 SB_DLOG(INFO) << "SafeBrowsing GetHash request for: " << source->url()
230 << ", failed with error: " << response_code;
231 }
232
233 // Call back the SafeBrowsingService with full_hashes, even if there was a
234 // parse error or an error response code (in which case full_hashes will be
235 // empty). We can't block the user regardless of the error status.
[email protected]200abc32008-09-05 01:44:33236 sb_service_->HandleGetHashResults(check, full_hashes, can_cache);
initial.commit09911bf2008-07-26 23:55:29237
238 hash_requests_.erase(it);
239 } else {
240 // Update, chunk or key response.
initial.commit09911bf2008-07-26 23:55:29241 fetcher.reset(request_.release());
242
[email protected]a11c2c62009-08-07 22:47:56243 if (request_type_ == UPDATE_REQUEST) {
244 if (!fetcher.get()) {
245 // We've timed out waiting for an update response, so we've cancelled
246 // the update request and scheduled a new one. Ignore this response.
247 return;
248 }
249
250 // Cancel the update response timeout now that we have the response.
251 update_timer_.Stop();
252 }
253
initial.commit09911bf2008-07-26 23:55:29254 if (response_code == 200) {
255 // We have data from the SafeBrowsing service.
256 parsed_ok = HandleServiceResponse(source->url(),
257 data.data(),
258 static_cast<int>(data.length()));
259 if (!parsed_ok) {
260 SB_DLOG(INFO) << "SafeBrowsing request for: " << source->url()
261 << "failed parse.";
[email protected]22717d1e2008-10-15 21:55:32262 must_back_off = true;
263 chunk_request_urls_.clear();
[email protected]6e3b12ff2009-01-06 22:17:57264 UpdateFinished(false);
initial.commit09911bf2008-07-26 23:55:29265 }
266
[email protected]cb1cdf492009-01-16 23:51:44267 switch (request_type_) {
268 case CHUNK_REQUEST:
269 if (parsed_ok)
270 chunk_request_urls_.pop_front();
271 break;
272 case GETKEY_REQUEST:
273 if (initial_request_) {
274 // This is the first request we've made this session. Now that we
275 // have the keys, do the regular update request.
276 initial_request_ = false;
277 GetNextUpdate();
278 return;
279 }
280 break;
281 case UPDATE_REQUEST:
282 if (chunk_request_urls_.empty() && parsed_ok) {
283 // We are up to date since the servers gave us nothing new, so we
284 // are done with this update cycle.
285 UpdateFinished(true);
286 }
287 break;
[email protected]7e242b52009-02-05 12:31:02288 default:
289 NOTREACHED();
290 break;
initial.commit09911bf2008-07-26 23:55:29291 }
[email protected]cb1cdf492009-01-16 23:51:44292
initial.commit09911bf2008-07-26 23:55:29293 } else if (response_code >= 300) {
294 // The SafeBrowsing service error: back off.
295 must_back_off = true;
296 if (request_type_ == CHUNK_REQUEST)
297 chunk_request_urls_.clear();
[email protected]6e3b12ff2009-01-06 22:17:57298 UpdateFinished(false);
initial.commit09911bf2008-07-26 23:55:29299 SB_DLOG(INFO) << "SafeBrowsing request for: " << source->url()
300 << ", failed with error: " << response_code;
301 }
302 }
303
304 // Schedule a new update request if we've finished retrieving all the chunks
305 // from the previous update. We treat the update request and the chunk URLs it
306 // contains as an atomic unit as far as back off is concerned.
307 if (chunk_request_urls_.empty() &&
308 (request_type_ == CHUNK_REQUEST || request_type_ == UPDATE_REQUEST))
309 ScheduleNextUpdate(must_back_off);
310
311 // Get the next chunk if available.
312 IssueChunkRequest();
313}
314
315bool SafeBrowsingProtocolManager::HandleServiceResponse(const GURL& url,
316 const char* data,
317 int length) {
318 SafeBrowsingProtocolParser parser;
319
320 switch (request_type_) {
321 case UPDATE_REQUEST: {
322 int next_update_sec = -1;
323 bool re_key = false;
324 bool reset = false;
325 std::vector<SBChunkDelete>* chunk_deletes =
326 new std::vector<SBChunkDelete>;
327 std::vector<ChunkUrl> chunk_urls;
328 if (!parser.ParseUpdate(data, length, client_key_,
329 &next_update_sec, &re_key,
330 &reset, chunk_deletes, &chunk_urls)) {
331 delete chunk_deletes;
332 return false;
333 }
334
335 last_update_ = Time::Now();
336
337 if (update_state_ == FIRST_REQUEST)
338 update_state_ = SECOND_REQUEST;
339 else if (update_state_ == SECOND_REQUEST)
340 update_state_ = NORMAL_REQUEST;
341
342 // New time for the next update.
343 if (next_update_sec > 0) {
344 next_update_sec_ = next_update_sec;
345 } else if (update_state_ == SECOND_REQUEST) {
[email protected]05f9b682008-09-29 22:18:01346 next_update_sec_ = base::RandInt(15 * 60, 45 * 60);
initial.commit09911bf2008-07-26 23:55:29347 }
348
349 // We need to request a new set of keys for MAC.
350 if (re_key)
351 HandleReKey();
352
353 // New chunks to download.
354 if (!chunk_urls.empty()) {
[email protected]553dba62009-02-24 19:08:23355 UMA_HISTOGRAM_COUNTS("SB2.UpdateUrls", chunk_urls.size());
initial.commit09911bf2008-07-26 23:55:29356 for (size_t i = 0; i < chunk_urls.size(); ++i)
357 chunk_request_urls_.push_back(chunk_urls[i]);
358 }
359
360 // Handle the case were the SafeBrowsing service tells us to dump our
361 // database.
362 if (reset) {
363 sb_service_->ResetDatabase();
[email protected]1d8f8b42008-11-21 22:22:41364 delete chunk_deletes;
initial.commit09911bf2008-07-26 23:55:29365 return true;
366 }
367
368 // Chunks to delete from our storage.
369 if (!chunk_deletes->empty())
370 sb_service_->HandleChunkDelete(chunk_deletes);
[email protected]1d8f8b42008-11-21 22:22:41371 else
372 delete chunk_deletes;
initial.commit09911bf2008-07-26 23:55:29373
374 break;
375 }
376 case CHUNK_REQUEST: {
[email protected]484c57a2009-03-21 01:24:01377 UMA_HISTOGRAM_TIMES("SB2.ChunkRequest",
378 base::Time::Now() - chunk_request_start_);
[email protected]22573822008-11-14 00:40:47379
initial.commit09911bf2008-07-26 23:55:29380 const ChunkUrl chunk_url = chunk_request_urls_.front();
initial.commit09911bf2008-07-26 23:55:29381 bool re_key = false;
382 std::deque<SBChunk>* chunks = new std::deque<SBChunk>;
[email protected]553dba62009-02-24 19:08:23383 UMA_HISTOGRAM_COUNTS("SB2.ChunkSize", length);
[email protected]6e3b12ff2009-01-06 22:17:57384 update_size_ += length;
initial.commit09911bf2008-07-26 23:55:29385 if (!parser.ParseChunk(data, length,
386 client_key_, chunk_url.mac,
387 &re_key, chunks)) {
388#ifndef NDEBUG
389 std::string data_str;
390 data_str.assign(data, length);
391 std::string encoded_chunk;
[email protected]a9bb6f692008-07-30 16:40:10392 net::Base64Encode(data, &encoded_chunk);
initial.commit09911bf2008-07-26 23:55:29393 SB_DLOG(INFO) << "ParseChunk error for chunk: " << chunk_url.url
394 << ", client_key: " << client_key_
395 << ", wrapped_key: " << wrapped_key_
396 << ", mac: " << chunk_url.mac
397 << ", Base64Encode(data): " << encoded_chunk
398 << ", length: " << length;
399#endif
400 safe_browsing_util::FreeChunks(chunks);
401 delete chunks;
402 return false;
403 }
404
405 if (re_key)
406 HandleReKey();
407
408 if (chunks->empty()) {
409 delete chunks;
410 } else {
411 chunk_pending_to_write_ = true;
[email protected]8b02bb8a2008-10-22 02:05:09412 sb_service_->HandleChunk(chunk_url.list_name, chunks);
initial.commit09911bf2008-07-26 23:55:29413 }
414
415 break;
416 }
417 case GETKEY_REQUEST: {
418 std::string client_key, wrapped_key;
419 if (!parser.ParseNewKey(data, length, &client_key, &wrapped_key))
420 return false;
421
422 client_key_ = client_key;
423 wrapped_key_ = wrapped_key;
424 notify_loop_->PostTask(FROM_HERE, NewRunnableMethod(
425 sb_service_, &SafeBrowsingService::OnNewMacKeys, client_key_,
426 wrapped_key_));
427 break;
428 }
429
430 default:
431 return false;
432 }
433
434 return true;
435}
436
437void SafeBrowsingProtocolManager::Initialize() {
438 // Don't want to hit the safe browsing servers on build/chrome bots.
[email protected]05f9b682008-09-29 22:18:01439 if (base::SysInfo::HasEnvVar(env_vars::kHeadless))
initial.commit09911bf2008-07-26 23:55:29440 return;
441
442 ScheduleNextUpdate(false /* no back off */);
443}
444
445void SafeBrowsingProtocolManager::ScheduleNextUpdate(bool back_off) {
446 DCHECK(next_update_sec_ > 0);
447
[email protected]2d316662008-09-03 18:18:14448 // Unschedule any current timer.
449 update_timer_.Stop();
initial.commit09911bf2008-07-26 23:55:29450
451 // Reschedule with the new update.
452 const int next_update = GetNextUpdateTime(back_off);
[email protected]2d316662008-09-03 18:18:14453 update_timer_.Start(TimeDelta::FromMilliseconds(next_update), this,
454 &SafeBrowsingProtocolManager::GetNextUpdate);
initial.commit09911bf2008-07-26 23:55:29455}
456
457// According to section 5 of the SafeBrowsing protocol specification, we must
458// back off after a certain number of errors. We only change 'next_update_sec_'
459// when we receive a response from the SafeBrowsing service.
460int SafeBrowsingProtocolManager::GetNextUpdateTime(bool back_off) {
461 int next = next_update_sec_;
462 if (back_off) {
463 next = GetNextBackOffTime(&update_error_count_, &update_back_off_mult_);
464 } else {
465 // Successful response means error reset.
466 update_error_count_ = 0;
467 update_back_off_mult_ = 1;
468 }
469 return next * 1000; // milliseconds
470}
471
472int SafeBrowsingProtocolManager::GetNextBackOffTime(int* error_count,
473 int* multiplier) {
474 DCHECK(multiplier && error_count);
475 (*error_count)++;
476 if (*error_count > 1 && *error_count < 6) {
477 int next = static_cast<int>(*multiplier * (1 + back_off_fuzz_) * 30 * 60);
478 *multiplier *= 2;
479 if (*multiplier > kSbMaxBackOff)
480 *multiplier = kSbMaxBackOff;
481 return next;
482 }
483
484 if (*error_count >= 6)
485 return 60 * 60 * 8; // 8 hours
486
487 return 60; // 1 minute
488}
489
490// This request requires getting a list of all the chunks for each list from the
491// database asynchronously. The request will be issued when we're called back in
492// OnGetChunksComplete.
493// TODO(paulg): We should get this at start up and maintain a ChunkRange cache
494// to avoid hitting the database with each update request. On the
495// otherhand, this request will only occur ~20-30 minutes so there
496// isn't that much overhead. Measure!
497void SafeBrowsingProtocolManager::IssueUpdateRequest() {
498 request_type_ = UPDATE_REQUEST;
[email protected]57119c3f2008-12-04 00:33:04499 sb_service_->UpdateStarted();
initial.commit09911bf2008-07-26 23:55:29500}
501
502void SafeBrowsingProtocolManager::IssueChunkRequest() {
503 // We are only allowed to have one request outstanding at any time. Also,
504 // don't get the next url until the previous one has been written to disk so
505 // that we don't use too much memory.
506 if (request_.get() || chunk_request_urls_.empty() || chunk_pending_to_write_)
507 return;
508
509 ChunkUrl next_chunk = chunk_request_urls_.front();
510 DCHECK(!next_chunk.url.empty());
511 if (!StartsWithASCII(next_chunk.url, "http://", false) &&
512 !StartsWithASCII(next_chunk.url, "https://", false))
513 next_chunk.url = "http://" + next_chunk.url;
514 GURL chunk_url(next_chunk.url);
515 request_type_ = CHUNK_REQUEST;
516 request_.reset(new URLFetcher(chunk_url, URLFetcher::GET, this));
517 request_->set_load_flags(net::LOAD_DISABLE_CACHE);
518 request_->set_request_context(Profile::GetDefaultRequestContext());
[email protected]22573822008-11-14 00:40:47519 chunk_request_start_ = base::Time::Now();
initial.commit09911bf2008-07-26 23:55:29520 request_->Start();
521}
522
523void SafeBrowsingProtocolManager::IssueKeyRequest() {
524 GURL key_url(StringPrintf(kSbNewKeyUrl,
525 kSbClientName,
[email protected]484fce42008-10-01 00:37:18526 version_.c_str()));
initial.commit09911bf2008-07-26 23:55:29527 request_type_ = GETKEY_REQUEST;
528 request_.reset(new URLFetcher(key_url, URLFetcher::GET, this));
529 request_->set_load_flags(net::LOAD_DISABLE_CACHE);
530 request_->set_request_context(Profile::GetDefaultRequestContext());
531 request_->Start();
532}
533
534void SafeBrowsingProtocolManager::OnGetChunksComplete(
535 const std::vector<SBListChunkRanges>& lists, bool database_error) {
536 DCHECK(request_type_ == UPDATE_REQUEST);
537
538 if (database_error) {
539 ScheduleNextUpdate(false);
[email protected]a11c2c62009-08-07 22:47:56540 UpdateFinished(false);
initial.commit09911bf2008-07-26 23:55:29541 return;
542 }
543
544 const bool use_mac = !client_key_.empty();
545
546 // Format our stored chunks:
547 std::string list_data;
548 bool found_malware = false;
549 bool found_phishing = false;
550 for (size_t i = 0; i < lists.size(); ++i) {
551 list_data.append(FormatList(lists[i], use_mac));
[email protected]c3ff89492008-11-11 02:17:51552 if (lists[i].name == safe_browsing_util::kPhishingList)
initial.commit09911bf2008-07-26 23:55:29553 found_phishing = true;
554
[email protected]c3ff89492008-11-11 02:17:51555 if (lists[i].name == safe_browsing_util::kMalwareList)
initial.commit09911bf2008-07-26 23:55:29556 found_malware = true;
557 }
558
559 // If we have an empty database, let the server know we want data for these
560 // lists.
561 if (!found_phishing)
[email protected]c3ff89492008-11-11 02:17:51562 list_data.append(FormatList(
563 SBListChunkRanges(safe_browsing_util::kPhishingList), use_mac));
initial.commit09911bf2008-07-26 23:55:29564
565 if (!found_malware)
[email protected]c3ff89492008-11-11 02:17:51566 list_data.append(FormatList(
567 SBListChunkRanges(safe_browsing_util::kMalwareList), use_mac));
initial.commit09911bf2008-07-26 23:55:29568
569 std::string url = StringPrintf(kSbUpdateUrl,
570 kSbClientName,
[email protected]484fce42008-10-01 00:37:18571 version_.c_str());
initial.commit09911bf2008-07-26 23:55:29572 if (use_mac) {
573 url.append("&wrkey=");
574 url.append(wrapped_key_);
575 }
576
577 GURL update_url(url);
578 request_.reset(new URLFetcher(update_url, URLFetcher::POST, this));
579 request_->set_load_flags(net::LOAD_DISABLE_CACHE);
580 request_->set_request_context(Profile::GetDefaultRequestContext());
581 request_->set_upload_data("text/plain", list_data);
582 request_->Start();
[email protected]a11c2c62009-08-07 22:47:56583
584 // Begin the update request timeout.
585 update_timer_.Start(TimeDelta::FromSeconds(kSbMaxUpdateWaitSec), this,
586 &SafeBrowsingProtocolManager::UpdateResponseTimeout);
587}
588
589// If we haven't heard back from the server with an update response, this method
590// will run. Close the current update session and schedule another update.
591void SafeBrowsingProtocolManager::UpdateResponseTimeout() {
592 DCHECK(request_type_ == UPDATE_REQUEST);
593 request_.reset();
594 ScheduleNextUpdate(false);
595 UpdateFinished(false);
initial.commit09911bf2008-07-26 23:55:29596}
597
598void SafeBrowsingProtocolManager::OnChunkInserted() {
599 chunk_pending_to_write_ = false;
600
601 if (chunk_request_urls_.empty()) {
[email protected]484c57a2009-03-21 01:24:01602 UMA_HISTOGRAM_LONG_TIMES("SB2.Update", Time::Now() - last_update_);
[email protected]6e3b12ff2009-01-06 22:17:57603 UpdateFinished(true);
initial.commit09911bf2008-07-26 23:55:29604 } else {
605 IssueChunkRequest();
606 }
607}
608
[email protected]dfdb0de72009-02-19 21:58:14609void SafeBrowsingProtocolManager::ReportMalware(const GURL& malware_url,
610 const GURL& page_url,
611 const GURL& referrer_url) {
612 std::string report_str = StringPrintf(
613 kSbMalwareReportUrl,
614 EscapeQueryParamValue(malware_url.spec()).c_str(),
615 EscapeQueryParamValue(page_url.spec()).c_str(),
616 EscapeQueryParamValue(referrer_url.spec()).c_str(),
617 kSbClientName,
618 version_.c_str());
619 GURL report_url(report_str);
620 URLFetcher* report = new URLFetcher(report_url, URLFetcher::GET, this);
621 report->set_load_flags(net::LOAD_DISABLE_CACHE);
622 report->set_request_context(Profile::GetDefaultRequestContext());
623 report->Start();
624 malware_reports_.insert(report);
625}
626
initial.commit09911bf2008-07-26 23:55:29627// static
628std::string SafeBrowsingProtocolManager::FormatList(
629 const SBListChunkRanges& list, bool use_mac) {
630 std::string formatted_results;
631 formatted_results.append(list.name);
632 formatted_results.append(";");
633 if (!list.adds.empty()) {
634 formatted_results.append("a:" + list.adds);
635 if (!list.subs.empty() || use_mac)
636 formatted_results.append(":");
637 }
638 if (!list.subs.empty()) {
639 formatted_results.append("s:" + list.subs);
640 if (use_mac)
641 formatted_results.append(":");
642 }
643 if (use_mac)
644 formatted_results.append("mac");
645 formatted_results.append("\n");
646
647 return formatted_results;
648}
649
650void SafeBrowsingProtocolManager::HandleReKey() {
651 client_key_.clear();
652 wrapped_key_.clear();
653 IssueKeyRequest();
654}
655
[email protected]7bdc1bf2009-07-28 15:48:03656void SafeBrowsingProtocolManager::HandleGetHashError(const Time& now) {
initial.commit09911bf2008-07-26 23:55:29657 int next = GetNextBackOffTime(&gethash_error_count_, &gethash_back_off_mult_);
[email protected]7bdc1bf2009-07-28 15:48:03658 next_gethash_time_ = now + TimeDelta::FromSeconds(next);
initial.commit09911bf2008-07-26 23:55:29659}
[email protected]6e3b12ff2009-01-06 22:17:57660
661void SafeBrowsingProtocolManager::UpdateFinished(bool success) {
[email protected]553dba62009-02-24 19:08:23662 UMA_HISTOGRAM_COUNTS("SB2.UpdateSize", update_size_);
[email protected]6e3b12ff2009-01-06 22:17:57663 update_size_ = 0;
664 sb_service_->UpdateFinished(success);
[email protected]7e242b52009-02-05 12:31:02665}