blob: a720bcc28adc3b1ee51930f21c8928f7fabb60dd [file] [log] [blame]
license.botbf09a502008-08-24 00:55:551// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
initial.commit09911bf2008-07-26 23:55:294
5#include "chrome/browser/safe_browsing/protocol_manager.h"
6
[email protected]484fce42008-10-01 00:37:187#include "base/file_version_info.h"
initial.commit09911bf2008-07-26 23:55:298#include "base/histogram.h"
9#include "base/logging.h"
10#include "base/message_loop.h"
[email protected]05f9b682008-09-29 22:18:0111#include "base/rand_util.h"
[email protected]807204142009-05-05 03:31:4412#include "base/stl_util-inl.h"
initial.commit09911bf2008-07-26 23:55:2913#include "base/string_util.h"
[email protected]05f9b682008-09-29 22:18:0114#include "base/sys_info.h"
initial.commit09911bf2008-07-26 23:55:2915#include "base/task.h"
16#include "base/timer.h"
17#include "chrome/browser/profile.h"
18#include "chrome/browser/safe_browsing/protocol_parser.h"
initial.commit09911bf2008-07-26 23:55:2919#include "chrome/browser/safe_browsing/safe_browsing_service.h"
initial.commit09911bf2008-07-26 23:55:2920#include "chrome/common/env_vars.h"
initial.commit09911bf2008-07-26 23:55:2921#include "net/base/base64.h"
[email protected]dfdb0de72009-02-19 21:58:1422#include "net/base/escape.h"
initial.commit09911bf2008-07-26 23:55:2923#include "net/base/load_flags.h"
24
[email protected]e1acf6f2008-10-27 20:43:3325using base::Time;
26using base::TimeDelta;
initial.commit09911bf2008-07-26 23:55:2927
28// Maximum time, in seconds, from start up before we must issue an update query.
[email protected]05f9b682008-09-29 22:18:0129static const int kSbTimerStartIntervalSec = 5 * 60;
initial.commit09911bf2008-07-26 23:55:2930
31// Update URL for querying about the latest set of chunk updates.
32static const char* const kSbUpdateUrl =
[email protected]d3216442009-03-05 21:07:2733 "http://safebrowsing.clients.google.com/safebrowsing/downloads?client=%s"
34 "&appver=%s&pver=2.2";
initial.commit09911bf2008-07-26 23:55:2935
36// GetHash request URL for retrieving full hashes.
37static const char* const kSbGetHashUrl =
[email protected]d3216442009-03-05 21:07:2738 "http://safebrowsing.clients.google.com/safebrowsing/gethash?client=%s"
39 "&appver=%s&pver=2.2";
initial.commit09911bf2008-07-26 23:55:2940
41// New MAC client key requests URL.
42static const char* const kSbNewKeyUrl =
[email protected]d3216442009-03-05 21:07:2743 "https://sb-ssl.google.com/safebrowsing/newkey?client=%s&appver=%s"
44 "&pver=2.2";
initial.commit09911bf2008-07-26 23:55:2945
[email protected]dfdb0de72009-02-19 21:58:1446// URL for reporting malware pages.
47static const char* const kSbMalwareReportUrl =
[email protected]d3216442009-03-05 21:07:2748 "http://safebrowsing.clients.google.com/safebrowsing/report?evts=malblhit"
49 "&evtd=%s&evtr=%s&evhr=%s&client=%s&appver=%s";
[email protected]dfdb0de72009-02-19 21:58:1450
[email protected]f1da1262008-08-31 23:03:5851#if defined(GOOGLE_CHROME_BUILD)
52static const char* const kSbClientName = "googlechrome";
53#else
54static const char* const kSbClientName = "chromium";
55#endif
initial.commit09911bf2008-07-26 23:55:2956
57// Maximum back off multiplier.
58static const int kSbMaxBackOff = 8;
59
60
initial.commit09911bf2008-07-26 23:55:2961// SafeBrowsingProtocolManager implementation ----------------------------------
62
63SafeBrowsingProtocolManager::SafeBrowsingProtocolManager(
64 SafeBrowsingService* sb_service,
65 MessageLoop* notify_loop,
66 const std::string& client_key,
67 const std::string& wrapped_key)
68 : sb_service_(sb_service),
69 request_type_(NO_REQUEST),
70 update_error_count_(0),
71 gethash_error_count_(0),
72 update_back_off_mult_(1),
73 gethash_back_off_mult_(1),
74 next_update_sec_(-1),
75 update_state_(FIRST_REQUEST),
76 initial_request_(true),
77 chunk_pending_to_write_(false),
78 notify_loop_(notify_loop),
79 client_key_(client_key),
[email protected]6e3b12ff2009-01-06 22:17:5780 wrapped_key_(wrapped_key),
81 update_size_(0) {
initial.commit09911bf2008-07-26 23:55:2982 // Set the backoff multiplier fuzz to a random value between 0 and 1.
[email protected]05f9b682008-09-29 22:18:0183 back_off_fuzz_ = static_cast<float>(base::RandDouble());
initial.commit09911bf2008-07-26 23:55:2984
85 // The first update must happen between 0-5 minutes of start up.
[email protected]05f9b682008-09-29 22:18:0186 next_update_sec_ = base::RandInt(60, kSbTimerStartIntervalSec);
[email protected]484fce42008-10-01 00:37:1887
88 scoped_ptr<FileVersionInfo> version_info(
89 FileVersionInfo::CreateFileVersionInfoForCurrentModule());
90 if (!version_info.get())
91 version_ = "0.1";
92 else
93 version_ = WideToASCII(version_info->product_version());
initial.commit09911bf2008-07-26 23:55:2994}
95
96SafeBrowsingProtocolManager::~SafeBrowsingProtocolManager() {
initial.commit09911bf2008-07-26 23:55:2997 // Delete in-progress SafeBrowsing requests.
98 STLDeleteContainerPairFirstPointers(hash_requests_.begin(),
99 hash_requests_.end());
100 hash_requests_.clear();
[email protected]dfdb0de72009-02-19 21:58:14101
102 // Delete in-progress malware reports.
103 STLDeleteContainerPointers(malware_reports_.begin(), malware_reports_.end());
104 malware_reports_.clear();
initial.commit09911bf2008-07-26 23:55:29105}
106
107// Public API used by the SafeBrowsingService ----------------------------------
108
109// We can only have one update or chunk request outstanding, but there may be
110// multiple GetHash requests pending since we don't want to serialize them and
111// slow down the user.
112void SafeBrowsingProtocolManager::GetFullHash(
113 SafeBrowsingService::SafeBrowsingCheck* check,
114 const std::vector<SBPrefix>& prefixes) {
115 // If we are in GetHash backoff, we need to check if we're past the next
116 // allowed time. If we are, we can proceed with the request. If not, we are
117 // required to return empty results (i.e. treat the page as safe).
118 if (gethash_error_count_ && Time::Now() <= next_gethash_time_) {
119 std::vector<SBFullHashResult> full_hashes;
[email protected]200abc32008-09-05 01:44:33120 sb_service_->HandleGetHashResults(check, full_hashes, false);
initial.commit09911bf2008-07-26 23:55:29121 return;
122 }
123
124 std::string url = StringPrintf(kSbGetHashUrl,
125 kSbClientName,
[email protected]484fce42008-10-01 00:37:18126 version_.c_str());
initial.commit09911bf2008-07-26 23:55:29127 if (!client_key_.empty()) {
128 url.append("&wrkey=");
129 url.append(wrapped_key_);
130 }
131
132 GURL gethash_url(url);
133 URLFetcher* fetcher = new URLFetcher(gethash_url, URLFetcher::POST, this);
134 hash_requests_[fetcher] = check;
135
136 std::string get_hash;
137 SafeBrowsingProtocolParser parser;
138 parser.FormatGetHash(prefixes, &get_hash);
139
140 fetcher->set_load_flags(net::LOAD_DISABLE_CACHE);
141 fetcher->set_request_context(Profile::GetDefaultRequestContext());
[email protected]d36e3c8e2008-08-29 23:42:20142 fetcher->set_upload_data("text/plain", get_hash);
initial.commit09911bf2008-07-26 23:55:29143 fetcher->Start();
144}
145
146void SafeBrowsingProtocolManager::GetNextUpdate() {
147 if (initial_request_) {
148 if (client_key_.empty() || wrapped_key_.empty()) {
149 IssueKeyRequest();
150 return;
151 } else {
152 initial_request_ = false;
153 }
154 }
155
156 if (!request_.get())
157 IssueUpdateRequest();
158}
159
160// URLFetcher::Delegate implementation -----------------------------------------
161
162// All SafeBrowsing request responses are handled here.
163// TODO(paulg): Clarify with the SafeBrowsing team whether a failed parse of a
164// chunk should retry the download and parse of that chunk (and
165// what back off / how many times to try), and if that effects the
166// update back off. For now, a failed parse of the chunk means we
167// drop it. This isn't so bad because the next UPDATE_REQUEST we
168// do will report all the chunks we have. If that chunk is still
169// required, the SafeBrowsing servers will tell us to get it again.
170void SafeBrowsingProtocolManager::OnURLFetchComplete(
171 const URLFetcher* source,
172 const GURL& url,
173 const URLRequestStatus& status,
174 int response_code,
175 const ResponseCookies& cookies,
176 const std::string& data) {
177 scoped_ptr<const URLFetcher> fetcher;
178 bool parsed_ok = true;
179 bool must_back_off = false; // Reduce SafeBrowsing service query frequency.
180
[email protected]dfdb0de72009-02-19 21:58:14181 // See if this is a malware report fetcher. We don't take any action for
182 // the response to those.
183 std::set<const URLFetcher*>::iterator mit = malware_reports_.find(source);
184 if (mit != malware_reports_.end()) {
185 const URLFetcher* report = *mit;
186 malware_reports_.erase(mit);
187 delete report;
188 return;
189 }
190
initial.commit09911bf2008-07-26 23:55:29191 HashRequests::iterator it = hash_requests_.find(source);
192 if (it != hash_requests_.end()) {
193 // GetHash response.
194 fetcher.reset(it->first);
195 SafeBrowsingService::SafeBrowsingCheck* check = it->second;
196 std::vector<SBFullHashResult> full_hashes;
[email protected]200abc32008-09-05 01:44:33197 bool can_cache = false;
initial.commit09911bf2008-07-26 23:55:29198 if (response_code == 200 || response_code == 204) {
[email protected]682343d2009-04-17 19:51:40199 // For tracking our GetHash false positive (204) rate, compared to real
200 // (200) responses.
201 if (response_code == 200)
202 UMA_HISTOGRAM_COUNTS("SB2.GetHash200", 1);
203 else
204 UMA_HISTOGRAM_COUNTS("SB2.GetHash204", 1);
[email protected]200abc32008-09-05 01:44:33205 can_cache = true;
initial.commit09911bf2008-07-26 23:55:29206 gethash_error_count_ = 0;
207 gethash_back_off_mult_ = 1;
208 bool re_key = false;
209 SafeBrowsingProtocolParser parser;
210 parsed_ok = parser.ParseGetHash(data.data(),
211 static_cast<int>(data.length()),
212 client_key_,
213 &re_key,
214 &full_hashes);
215 if (!parsed_ok) {
216 // If we fail to parse it, we must still inform the SafeBrowsingService
217 // so that it doesn't hold up the user's request indefinitely. Not sure
218 // what to do at that point though!
219 full_hashes.clear();
220 } else {
221 if (re_key)
222 HandleReKey();
223 }
224 } else if (response_code >= 300) {
[email protected]7bdc1bf2009-07-28 15:48:03225 HandleGetHashError(Time::Now());
initial.commit09911bf2008-07-26 23:55:29226 SB_DLOG(INFO) << "SafeBrowsing GetHash request for: " << source->url()
227 << ", failed with error: " << response_code;
228 }
229
230 // Call back the SafeBrowsingService with full_hashes, even if there was a
231 // parse error or an error response code (in which case full_hashes will be
232 // empty). We can't block the user regardless of the error status.
[email protected]200abc32008-09-05 01:44:33233 sb_service_->HandleGetHashResults(check, full_hashes, can_cache);
initial.commit09911bf2008-07-26 23:55:29234
235 hash_requests_.erase(it);
236 } else {
237 // Update, chunk or key response.
238 DCHECK(source == request_.get());
239 fetcher.reset(request_.release());
240
241 if (response_code == 200) {
242 // We have data from the SafeBrowsing service.
243 parsed_ok = HandleServiceResponse(source->url(),
244 data.data(),
245 static_cast<int>(data.length()));
246 if (!parsed_ok) {
247 SB_DLOG(INFO) << "SafeBrowsing request for: " << source->url()
248 << "failed parse.";
[email protected]22717d1e2008-10-15 21:55:32249 must_back_off = true;
250 chunk_request_urls_.clear();
[email protected]6e3b12ff2009-01-06 22:17:57251 UpdateFinished(false);
initial.commit09911bf2008-07-26 23:55:29252 }
253
[email protected]cb1cdf492009-01-16 23:51:44254 switch (request_type_) {
255 case CHUNK_REQUEST:
256 if (parsed_ok)
257 chunk_request_urls_.pop_front();
258 break;
259 case GETKEY_REQUEST:
260 if (initial_request_) {
261 // This is the first request we've made this session. Now that we
262 // have the keys, do the regular update request.
263 initial_request_ = false;
264 GetNextUpdate();
265 return;
266 }
267 break;
268 case UPDATE_REQUEST:
269 if (chunk_request_urls_.empty() && parsed_ok) {
270 // We are up to date since the servers gave us nothing new, so we
271 // are done with this update cycle.
272 UpdateFinished(true);
273 }
274 break;
[email protected]7e242b52009-02-05 12:31:02275 default:
276 NOTREACHED();
277 break;
initial.commit09911bf2008-07-26 23:55:29278 }
[email protected]cb1cdf492009-01-16 23:51:44279
initial.commit09911bf2008-07-26 23:55:29280 } else if (response_code >= 300) {
281 // The SafeBrowsing service error: back off.
282 must_back_off = true;
283 if (request_type_ == CHUNK_REQUEST)
284 chunk_request_urls_.clear();
[email protected]6e3b12ff2009-01-06 22:17:57285 UpdateFinished(false);
initial.commit09911bf2008-07-26 23:55:29286 SB_DLOG(INFO) << "SafeBrowsing request for: " << source->url()
287 << ", failed with error: " << response_code;
288 }
289 }
290
291 // Schedule a new update request if we've finished retrieving all the chunks
292 // from the previous update. We treat the update request and the chunk URLs it
293 // contains as an atomic unit as far as back off is concerned.
294 if (chunk_request_urls_.empty() &&
295 (request_type_ == CHUNK_REQUEST || request_type_ == UPDATE_REQUEST))
296 ScheduleNextUpdate(must_back_off);
297
298 // Get the next chunk if available.
299 IssueChunkRequest();
300}
301
302bool SafeBrowsingProtocolManager::HandleServiceResponse(const GURL& url,
303 const char* data,
304 int length) {
305 SafeBrowsingProtocolParser parser;
306
307 switch (request_type_) {
308 case UPDATE_REQUEST: {
309 int next_update_sec = -1;
310 bool re_key = false;
311 bool reset = false;
312 std::vector<SBChunkDelete>* chunk_deletes =
313 new std::vector<SBChunkDelete>;
314 std::vector<ChunkUrl> chunk_urls;
315 if (!parser.ParseUpdate(data, length, client_key_,
316 &next_update_sec, &re_key,
317 &reset, chunk_deletes, &chunk_urls)) {
318 delete chunk_deletes;
319 return false;
320 }
321
322 last_update_ = Time::Now();
323
324 if (update_state_ == FIRST_REQUEST)
325 update_state_ = SECOND_REQUEST;
326 else if (update_state_ == SECOND_REQUEST)
327 update_state_ = NORMAL_REQUEST;
328
329 // New time for the next update.
330 if (next_update_sec > 0) {
331 next_update_sec_ = next_update_sec;
332 } else if (update_state_ == SECOND_REQUEST) {
[email protected]05f9b682008-09-29 22:18:01333 next_update_sec_ = base::RandInt(15 * 60, 45 * 60);
initial.commit09911bf2008-07-26 23:55:29334 }
335
336 // We need to request a new set of keys for MAC.
337 if (re_key)
338 HandleReKey();
339
340 // New chunks to download.
341 if (!chunk_urls.empty()) {
[email protected]553dba62009-02-24 19:08:23342 UMA_HISTOGRAM_COUNTS("SB2.UpdateUrls", chunk_urls.size());
initial.commit09911bf2008-07-26 23:55:29343 for (size_t i = 0; i < chunk_urls.size(); ++i)
344 chunk_request_urls_.push_back(chunk_urls[i]);
345 }
346
347 // Handle the case were the SafeBrowsing service tells us to dump our
348 // database.
349 if (reset) {
350 sb_service_->ResetDatabase();
[email protected]1d8f8b42008-11-21 22:22:41351 delete chunk_deletes;
initial.commit09911bf2008-07-26 23:55:29352 return true;
353 }
354
355 // Chunks to delete from our storage.
356 if (!chunk_deletes->empty())
357 sb_service_->HandleChunkDelete(chunk_deletes);
[email protected]1d8f8b42008-11-21 22:22:41358 else
359 delete chunk_deletes;
initial.commit09911bf2008-07-26 23:55:29360
361 break;
362 }
363 case CHUNK_REQUEST: {
[email protected]484c57a2009-03-21 01:24:01364 UMA_HISTOGRAM_TIMES("SB2.ChunkRequest",
365 base::Time::Now() - chunk_request_start_);
[email protected]22573822008-11-14 00:40:47366
initial.commit09911bf2008-07-26 23:55:29367 const ChunkUrl chunk_url = chunk_request_urls_.front();
initial.commit09911bf2008-07-26 23:55:29368 bool re_key = false;
369 std::deque<SBChunk>* chunks = new std::deque<SBChunk>;
[email protected]553dba62009-02-24 19:08:23370 UMA_HISTOGRAM_COUNTS("SB2.ChunkSize", length);
[email protected]6e3b12ff2009-01-06 22:17:57371 update_size_ += length;
initial.commit09911bf2008-07-26 23:55:29372 if (!parser.ParseChunk(data, length,
373 client_key_, chunk_url.mac,
374 &re_key, chunks)) {
375#ifndef NDEBUG
376 std::string data_str;
377 data_str.assign(data, length);
378 std::string encoded_chunk;
[email protected]a9bb6f692008-07-30 16:40:10379 net::Base64Encode(data, &encoded_chunk);
initial.commit09911bf2008-07-26 23:55:29380 SB_DLOG(INFO) << "ParseChunk error for chunk: " << chunk_url.url
381 << ", client_key: " << client_key_
382 << ", wrapped_key: " << wrapped_key_
383 << ", mac: " << chunk_url.mac
384 << ", Base64Encode(data): " << encoded_chunk
385 << ", length: " << length;
386#endif
387 safe_browsing_util::FreeChunks(chunks);
388 delete chunks;
389 return false;
390 }
391
392 if (re_key)
393 HandleReKey();
394
395 if (chunks->empty()) {
396 delete chunks;
397 } else {
398 chunk_pending_to_write_ = true;
[email protected]8b02bb8a2008-10-22 02:05:09399 sb_service_->HandleChunk(chunk_url.list_name, chunks);
initial.commit09911bf2008-07-26 23:55:29400 }
401
402 break;
403 }
404 case GETKEY_REQUEST: {
405 std::string client_key, wrapped_key;
406 if (!parser.ParseNewKey(data, length, &client_key, &wrapped_key))
407 return false;
408
409 client_key_ = client_key;
410 wrapped_key_ = wrapped_key;
411 notify_loop_->PostTask(FROM_HERE, NewRunnableMethod(
412 sb_service_, &SafeBrowsingService::OnNewMacKeys, client_key_,
413 wrapped_key_));
414 break;
415 }
416
417 default:
418 return false;
419 }
420
421 return true;
422}
423
424void SafeBrowsingProtocolManager::Initialize() {
425 // Don't want to hit the safe browsing servers on build/chrome bots.
[email protected]05f9b682008-09-29 22:18:01426 if (base::SysInfo::HasEnvVar(env_vars::kHeadless))
initial.commit09911bf2008-07-26 23:55:29427 return;
428
429 ScheduleNextUpdate(false /* no back off */);
430}
431
432void SafeBrowsingProtocolManager::ScheduleNextUpdate(bool back_off) {
433 DCHECK(next_update_sec_ > 0);
434
[email protected]2d316662008-09-03 18:18:14435 // Unschedule any current timer.
436 update_timer_.Stop();
initial.commit09911bf2008-07-26 23:55:29437
438 // Reschedule with the new update.
439 const int next_update = GetNextUpdateTime(back_off);
[email protected]2d316662008-09-03 18:18:14440 update_timer_.Start(TimeDelta::FromMilliseconds(next_update), this,
441 &SafeBrowsingProtocolManager::GetNextUpdate);
initial.commit09911bf2008-07-26 23:55:29442}
443
444// According to section 5 of the SafeBrowsing protocol specification, we must
445// back off after a certain number of errors. We only change 'next_update_sec_'
446// when we receive a response from the SafeBrowsing service.
447int SafeBrowsingProtocolManager::GetNextUpdateTime(bool back_off) {
448 int next = next_update_sec_;
449 if (back_off) {
450 next = GetNextBackOffTime(&update_error_count_, &update_back_off_mult_);
451 } else {
452 // Successful response means error reset.
453 update_error_count_ = 0;
454 update_back_off_mult_ = 1;
455 }
456 return next * 1000; // milliseconds
457}
458
459int SafeBrowsingProtocolManager::GetNextBackOffTime(int* error_count,
460 int* multiplier) {
461 DCHECK(multiplier && error_count);
462 (*error_count)++;
463 if (*error_count > 1 && *error_count < 6) {
464 int next = static_cast<int>(*multiplier * (1 + back_off_fuzz_) * 30 * 60);
465 *multiplier *= 2;
466 if (*multiplier > kSbMaxBackOff)
467 *multiplier = kSbMaxBackOff;
468 return next;
469 }
470
471 if (*error_count >= 6)
472 return 60 * 60 * 8; // 8 hours
473
474 return 60; // 1 minute
475}
476
477// This request requires getting a list of all the chunks for each list from the
478// database asynchronously. The request will be issued when we're called back in
479// OnGetChunksComplete.
480// TODO(paulg): We should get this at start up and maintain a ChunkRange cache
481// to avoid hitting the database with each update request. On the
482// otherhand, this request will only occur ~20-30 minutes so there
483// isn't that much overhead. Measure!
484void SafeBrowsingProtocolManager::IssueUpdateRequest() {
485 request_type_ = UPDATE_REQUEST;
[email protected]57119c3f2008-12-04 00:33:04486 sb_service_->UpdateStarted();
initial.commit09911bf2008-07-26 23:55:29487}
488
489void SafeBrowsingProtocolManager::IssueChunkRequest() {
490 // We are only allowed to have one request outstanding at any time. Also,
491 // don't get the next url until the previous one has been written to disk so
492 // that we don't use too much memory.
493 if (request_.get() || chunk_request_urls_.empty() || chunk_pending_to_write_)
494 return;
495
496 ChunkUrl next_chunk = chunk_request_urls_.front();
497 DCHECK(!next_chunk.url.empty());
498 if (!StartsWithASCII(next_chunk.url, "http://", false) &&
499 !StartsWithASCII(next_chunk.url, "https://", false))
500 next_chunk.url = "http://" + next_chunk.url;
501 GURL chunk_url(next_chunk.url);
502 request_type_ = CHUNK_REQUEST;
503 request_.reset(new URLFetcher(chunk_url, URLFetcher::GET, this));
504 request_->set_load_flags(net::LOAD_DISABLE_CACHE);
505 request_->set_request_context(Profile::GetDefaultRequestContext());
[email protected]22573822008-11-14 00:40:47506 chunk_request_start_ = base::Time::Now();
initial.commit09911bf2008-07-26 23:55:29507 request_->Start();
508}
509
510void SafeBrowsingProtocolManager::IssueKeyRequest() {
511 GURL key_url(StringPrintf(kSbNewKeyUrl,
512 kSbClientName,
[email protected]484fce42008-10-01 00:37:18513 version_.c_str()));
initial.commit09911bf2008-07-26 23:55:29514 request_type_ = GETKEY_REQUEST;
515 request_.reset(new URLFetcher(key_url, URLFetcher::GET, this));
516 request_->set_load_flags(net::LOAD_DISABLE_CACHE);
517 request_->set_request_context(Profile::GetDefaultRequestContext());
518 request_->Start();
519}
520
521void SafeBrowsingProtocolManager::OnGetChunksComplete(
522 const std::vector<SBListChunkRanges>& lists, bool database_error) {
523 DCHECK(request_type_ == UPDATE_REQUEST);
524
525 if (database_error) {
526 ScheduleNextUpdate(false);
527 return;
528 }
529
530 const bool use_mac = !client_key_.empty();
531
532 // Format our stored chunks:
533 std::string list_data;
534 bool found_malware = false;
535 bool found_phishing = false;
536 for (size_t i = 0; i < lists.size(); ++i) {
537 list_data.append(FormatList(lists[i], use_mac));
[email protected]c3ff89492008-11-11 02:17:51538 if (lists[i].name == safe_browsing_util::kPhishingList)
initial.commit09911bf2008-07-26 23:55:29539 found_phishing = true;
540
[email protected]c3ff89492008-11-11 02:17:51541 if (lists[i].name == safe_browsing_util::kMalwareList)
initial.commit09911bf2008-07-26 23:55:29542 found_malware = true;
543 }
544
545 // If we have an empty database, let the server know we want data for these
546 // lists.
547 if (!found_phishing)
[email protected]c3ff89492008-11-11 02:17:51548 list_data.append(FormatList(
549 SBListChunkRanges(safe_browsing_util::kPhishingList), use_mac));
initial.commit09911bf2008-07-26 23:55:29550
551 if (!found_malware)
[email protected]c3ff89492008-11-11 02:17:51552 list_data.append(FormatList(
553 SBListChunkRanges(safe_browsing_util::kMalwareList), use_mac));
initial.commit09911bf2008-07-26 23:55:29554
555 std::string url = StringPrintf(kSbUpdateUrl,
556 kSbClientName,
[email protected]484fce42008-10-01 00:37:18557 version_.c_str());
initial.commit09911bf2008-07-26 23:55:29558 if (use_mac) {
559 url.append("&wrkey=");
560 url.append(wrapped_key_);
561 }
562
563 GURL update_url(url);
564 request_.reset(new URLFetcher(update_url, URLFetcher::POST, this));
565 request_->set_load_flags(net::LOAD_DISABLE_CACHE);
566 request_->set_request_context(Profile::GetDefaultRequestContext());
567 request_->set_upload_data("text/plain", list_data);
568 request_->Start();
569}
570
571void SafeBrowsingProtocolManager::OnChunkInserted() {
572 chunk_pending_to_write_ = false;
573
574 if (chunk_request_urls_.empty()) {
[email protected]484c57a2009-03-21 01:24:01575 UMA_HISTOGRAM_LONG_TIMES("SB2.Update", Time::Now() - last_update_);
[email protected]6e3b12ff2009-01-06 22:17:57576 UpdateFinished(true);
initial.commit09911bf2008-07-26 23:55:29577 } else {
578 IssueChunkRequest();
579 }
580}
581
[email protected]dfdb0de72009-02-19 21:58:14582void SafeBrowsingProtocolManager::ReportMalware(const GURL& malware_url,
583 const GURL& page_url,
584 const GURL& referrer_url) {
585 std::string report_str = StringPrintf(
586 kSbMalwareReportUrl,
587 EscapeQueryParamValue(malware_url.spec()).c_str(),
588 EscapeQueryParamValue(page_url.spec()).c_str(),
589 EscapeQueryParamValue(referrer_url.spec()).c_str(),
590 kSbClientName,
591 version_.c_str());
592 GURL report_url(report_str);
593 URLFetcher* report = new URLFetcher(report_url, URLFetcher::GET, this);
594 report->set_load_flags(net::LOAD_DISABLE_CACHE);
595 report->set_request_context(Profile::GetDefaultRequestContext());
596 report->Start();
597 malware_reports_.insert(report);
598}
599
initial.commit09911bf2008-07-26 23:55:29600// static
601std::string SafeBrowsingProtocolManager::FormatList(
602 const SBListChunkRanges& list, bool use_mac) {
603 std::string formatted_results;
604 formatted_results.append(list.name);
605 formatted_results.append(";");
606 if (!list.adds.empty()) {
607 formatted_results.append("a:" + list.adds);
608 if (!list.subs.empty() || use_mac)
609 formatted_results.append(":");
610 }
611 if (!list.subs.empty()) {
612 formatted_results.append("s:" + list.subs);
613 if (use_mac)
614 formatted_results.append(":");
615 }
616 if (use_mac)
617 formatted_results.append("mac");
618 formatted_results.append("\n");
619
620 return formatted_results;
621}
622
623void SafeBrowsingProtocolManager::HandleReKey() {
624 client_key_.clear();
625 wrapped_key_.clear();
626 IssueKeyRequest();
627}
628
[email protected]7bdc1bf2009-07-28 15:48:03629void SafeBrowsingProtocolManager::HandleGetHashError(const Time& now) {
initial.commit09911bf2008-07-26 23:55:29630 int next = GetNextBackOffTime(&gethash_error_count_, &gethash_back_off_mult_);
[email protected]7bdc1bf2009-07-28 15:48:03631 next_gethash_time_ = now + TimeDelta::FromSeconds(next);
initial.commit09911bf2008-07-26 23:55:29632}
[email protected]6e3b12ff2009-01-06 22:17:57633
634void SafeBrowsingProtocolManager::UpdateFinished(bool success) {
[email protected]553dba62009-02-24 19:08:23635 UMA_HISTOGRAM_COUNTS("SB2.UpdateSize", update_size_);
[email protected]6e3b12ff2009-01-06 22:17:57636 update_size_ = 0;
637 sb_service_->UpdateFinished(success);
[email protected]7e242b52009-02-05 12:31:02638}