blob: cd2c1beed322a5a5fa6f1fba4aa9d2678e366cc5 [file] [log] [blame]
license.botbf09a502008-08-24 00:55:551// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
initial.commit09911bf2008-07-26 23:55:294
5#include "chrome/browser/safe_browsing/protocol_manager.h"
6
[email protected]484fce42008-10-01 00:37:187#include "base/file_version_info.h"
initial.commit09911bf2008-07-26 23:55:298#include "base/histogram.h"
9#include "base/logging.h"
10#include "base/message_loop.h"
[email protected]05f9b682008-09-29 22:18:0111#include "base/rand_util.h"
initial.commit09911bf2008-07-26 23:55:2912#include "base/string_util.h"
[email protected]05f9b682008-09-29 22:18:0113#include "base/sys_info.h"
initial.commit09911bf2008-07-26 23:55:2914#include "base/task.h"
15#include "base/timer.h"
16#include "chrome/browser/profile.h"
17#include "chrome/browser/safe_browsing/protocol_parser.h"
initial.commit09911bf2008-07-26 23:55:2918#include "chrome/browser/safe_browsing/safe_browsing_service.h"
initial.commit09911bf2008-07-26 23:55:2919#include "chrome/common/env_vars.h"
initial.commit09911bf2008-07-26 23:55:2920#include "chrome/common/stl_util-inl.h"
21#include "net/base/base64.h"
22#include "net/base/load_flags.h"
23
[email protected]e1acf6f2008-10-27 20:43:3324using base::Time;
25using base::TimeDelta;
initial.commit09911bf2008-07-26 23:55:2926
27// Maximum time, in seconds, from start up before we must issue an update query.
[email protected]05f9b682008-09-29 22:18:0128static const int kSbTimerStartIntervalSec = 5 * 60;
initial.commit09911bf2008-07-26 23:55:2929
30// Update URL for querying about the latest set of chunk updates.
31static const char* const kSbUpdateUrl =
[email protected]9891b1342008-11-07 23:05:3332 "http://safebrowsing.clients.google.com/safebrowsing/downloads?client=%s&appver=%s&pver=2.2";
initial.commit09911bf2008-07-26 23:55:2933
34// GetHash request URL for retrieving full hashes.
35static const char* const kSbGetHashUrl =
[email protected]9891b1342008-11-07 23:05:3336 "http://safebrowsing.clients.google.com/safebrowsing/gethash?client=%s&appver=%s&pver=2.2";
initial.commit09911bf2008-07-26 23:55:2937
38// New MAC client key requests URL.
39static const char* const kSbNewKeyUrl =
[email protected]9891b1342008-11-07 23:05:3340 "https://sb-ssl.google.com/safebrowsing/newkey?client=%s&appver=%s&pver=2.2";
initial.commit09911bf2008-07-26 23:55:2941
[email protected]f1da1262008-08-31 23:03:5842#if defined(GOOGLE_CHROME_BUILD)
43static const char* const kSbClientName = "googlechrome";
44#else
45static const char* const kSbClientName = "chromium";
46#endif
initial.commit09911bf2008-07-26 23:55:2947
48// Maximum back off multiplier.
49static const int kSbMaxBackOff = 8;
50
51
initial.commit09911bf2008-07-26 23:55:2952// SafeBrowsingProtocolManager implementation ----------------------------------
53
54SafeBrowsingProtocolManager::SafeBrowsingProtocolManager(
55 SafeBrowsingService* sb_service,
56 MessageLoop* notify_loop,
57 const std::string& client_key,
58 const std::string& wrapped_key)
59 : sb_service_(sb_service),
60 request_type_(NO_REQUEST),
61 update_error_count_(0),
62 gethash_error_count_(0),
63 update_back_off_mult_(1),
64 gethash_back_off_mult_(1),
65 next_update_sec_(-1),
66 update_state_(FIRST_REQUEST),
67 initial_request_(true),
68 chunk_pending_to_write_(false),
69 notify_loop_(notify_loop),
70 client_key_(client_key),
71 wrapped_key_(wrapped_key) {
72 // Set the backoff multiplier fuzz to a random value between 0 and 1.
[email protected]05f9b682008-09-29 22:18:0173 back_off_fuzz_ = static_cast<float>(base::RandDouble());
initial.commit09911bf2008-07-26 23:55:2974
75 // The first update must happen between 0-5 minutes of start up.
[email protected]05f9b682008-09-29 22:18:0176 next_update_sec_ = base::RandInt(60, kSbTimerStartIntervalSec);
[email protected]484fce42008-10-01 00:37:1877
78 scoped_ptr<FileVersionInfo> version_info(
79 FileVersionInfo::CreateFileVersionInfoForCurrentModule());
80 if (!version_info.get())
81 version_ = "0.1";
82 else
83 version_ = WideToASCII(version_info->product_version());
initial.commit09911bf2008-07-26 23:55:2984}
85
86SafeBrowsingProtocolManager::~SafeBrowsingProtocolManager() {
initial.commit09911bf2008-07-26 23:55:2987 // Delete in-progress SafeBrowsing requests.
88 STLDeleteContainerPairFirstPointers(hash_requests_.begin(),
89 hash_requests_.end());
90 hash_requests_.clear();
91}
92
93// Public API used by the SafeBrowsingService ----------------------------------
94
95// We can only have one update or chunk request outstanding, but there may be
96// multiple GetHash requests pending since we don't want to serialize them and
97// slow down the user.
98void SafeBrowsingProtocolManager::GetFullHash(
99 SafeBrowsingService::SafeBrowsingCheck* check,
100 const std::vector<SBPrefix>& prefixes) {
101 // If we are in GetHash backoff, we need to check if we're past the next
102 // allowed time. If we are, we can proceed with the request. If not, we are
103 // required to return empty results (i.e. treat the page as safe).
104 if (gethash_error_count_ && Time::Now() <= next_gethash_time_) {
105 std::vector<SBFullHashResult> full_hashes;
[email protected]200abc32008-09-05 01:44:33106 sb_service_->HandleGetHashResults(check, full_hashes, false);
initial.commit09911bf2008-07-26 23:55:29107 return;
108 }
109
110 std::string url = StringPrintf(kSbGetHashUrl,
111 kSbClientName,
[email protected]484fce42008-10-01 00:37:18112 version_.c_str());
initial.commit09911bf2008-07-26 23:55:29113 if (!client_key_.empty()) {
114 url.append("&wrkey=");
115 url.append(wrapped_key_);
116 }
117
118 GURL gethash_url(url);
119 URLFetcher* fetcher = new URLFetcher(gethash_url, URLFetcher::POST, this);
120 hash_requests_[fetcher] = check;
121
122 std::string get_hash;
123 SafeBrowsingProtocolParser parser;
124 parser.FormatGetHash(prefixes, &get_hash);
125
126 fetcher->set_load_flags(net::LOAD_DISABLE_CACHE);
127 fetcher->set_request_context(Profile::GetDefaultRequestContext());
[email protected]d36e3c8e2008-08-29 23:42:20128 fetcher->set_upload_data("text/plain", get_hash);
initial.commit09911bf2008-07-26 23:55:29129 fetcher->Start();
130}
131
132void SafeBrowsingProtocolManager::GetNextUpdate() {
133 if (initial_request_) {
134 if (client_key_.empty() || wrapped_key_.empty()) {
135 IssueKeyRequest();
136 return;
137 } else {
138 initial_request_ = false;
139 }
140 }
141
142 if (!request_.get())
143 IssueUpdateRequest();
144}
145
146// URLFetcher::Delegate implementation -----------------------------------------
147
148// All SafeBrowsing request responses are handled here.
149// TODO(paulg): Clarify with the SafeBrowsing team whether a failed parse of a
150// chunk should retry the download and parse of that chunk (and
151// what back off / how many times to try), and if that effects the
152// update back off. For now, a failed parse of the chunk means we
153// drop it. This isn't so bad because the next UPDATE_REQUEST we
154// do will report all the chunks we have. If that chunk is still
155// required, the SafeBrowsing servers will tell us to get it again.
156void SafeBrowsingProtocolManager::OnURLFetchComplete(
157 const URLFetcher* source,
158 const GURL& url,
159 const URLRequestStatus& status,
160 int response_code,
161 const ResponseCookies& cookies,
162 const std::string& data) {
163 scoped_ptr<const URLFetcher> fetcher;
164 bool parsed_ok = true;
165 bool must_back_off = false; // Reduce SafeBrowsing service query frequency.
166
167 HashRequests::iterator it = hash_requests_.find(source);
168 if (it != hash_requests_.end()) {
169 // GetHash response.
170 fetcher.reset(it->first);
171 SafeBrowsingService::SafeBrowsingCheck* check = it->second;
172 std::vector<SBFullHashResult> full_hashes;
[email protected]200abc32008-09-05 01:44:33173 bool can_cache = false;
initial.commit09911bf2008-07-26 23:55:29174 if (response_code == 200 || response_code == 204) {
[email protected]200abc32008-09-05 01:44:33175 can_cache = true;
initial.commit09911bf2008-07-26 23:55:29176 gethash_error_count_ = 0;
177 gethash_back_off_mult_ = 1;
178 bool re_key = false;
179 SafeBrowsingProtocolParser parser;
180 parsed_ok = parser.ParseGetHash(data.data(),
181 static_cast<int>(data.length()),
182 client_key_,
183 &re_key,
184 &full_hashes);
185 if (!parsed_ok) {
186 // If we fail to parse it, we must still inform the SafeBrowsingService
187 // so that it doesn't hold up the user's request indefinitely. Not sure
188 // what to do at that point though!
189 full_hashes.clear();
190 } else {
191 if (re_key)
192 HandleReKey();
193 }
194 } else if (response_code >= 300) {
195 HandleGetHashError();
196 SB_DLOG(INFO) << "SafeBrowsing GetHash request for: " << source->url()
197 << ", failed with error: " << response_code;
198 }
199
200 // Call back the SafeBrowsingService with full_hashes, even if there was a
201 // parse error or an error response code (in which case full_hashes will be
202 // empty). We can't block the user regardless of the error status.
[email protected]200abc32008-09-05 01:44:33203 sb_service_->HandleGetHashResults(check, full_hashes, can_cache);
initial.commit09911bf2008-07-26 23:55:29204
205 hash_requests_.erase(it);
206 } else {
207 // Update, chunk or key response.
208 DCHECK(source == request_.get());
209 fetcher.reset(request_.release());
210
211 if (response_code == 200) {
212 // We have data from the SafeBrowsing service.
213 parsed_ok = HandleServiceResponse(source->url(),
214 data.data(),
215 static_cast<int>(data.length()));
216 if (!parsed_ok) {
217 SB_DLOG(INFO) << "SafeBrowsing request for: " << source->url()
218 << "failed parse.";
[email protected]22717d1e2008-10-15 21:55:32219 must_back_off = true;
220 chunk_request_urls_.clear();
[email protected]57119c3f2008-12-04 00:33:04221 sb_service_->UpdateFinished(false);
initial.commit09911bf2008-07-26 23:55:29222 }
223
[email protected]22717d1e2008-10-15 21:55:32224 if (request_type_ == CHUNK_REQUEST && parsed_ok) {
225 chunk_request_urls_.pop_front();
initial.commit09911bf2008-07-26 23:55:29226 } else if (request_type_ == GETKEY_REQUEST && initial_request_) {
227 // This is the first request we've made this session. Now that we have
228 // the keys, do the regular update request.
229 initial_request_ = false;
230 GetNextUpdate();
231 return;
232 }
233 } else if (response_code >= 300) {
234 // The SafeBrowsing service error: back off.
235 must_back_off = true;
236 if (request_type_ == CHUNK_REQUEST)
237 chunk_request_urls_.clear();
[email protected]57119c3f2008-12-04 00:33:04238 sb_service_->UpdateFinished(false);
initial.commit09911bf2008-07-26 23:55:29239 SB_DLOG(INFO) << "SafeBrowsing request for: " << source->url()
240 << ", failed with error: " << response_code;
241 }
242 }
243
244 // Schedule a new update request if we've finished retrieving all the chunks
245 // from the previous update. We treat the update request and the chunk URLs it
246 // contains as an atomic unit as far as back off is concerned.
247 if (chunk_request_urls_.empty() &&
248 (request_type_ == CHUNK_REQUEST || request_type_ == UPDATE_REQUEST))
249 ScheduleNextUpdate(must_back_off);
250
251 // Get the next chunk if available.
252 IssueChunkRequest();
253}
254
255bool SafeBrowsingProtocolManager::HandleServiceResponse(const GURL& url,
256 const char* data,
257 int length) {
258 SafeBrowsingProtocolParser parser;
259
260 switch (request_type_) {
261 case UPDATE_REQUEST: {
262 int next_update_sec = -1;
263 bool re_key = false;
264 bool reset = false;
265 std::vector<SBChunkDelete>* chunk_deletes =
266 new std::vector<SBChunkDelete>;
267 std::vector<ChunkUrl> chunk_urls;
268 if (!parser.ParseUpdate(data, length, client_key_,
269 &next_update_sec, &re_key,
270 &reset, chunk_deletes, &chunk_urls)) {
271 delete chunk_deletes;
initial.commit09911bf2008-07-26 23:55:29272 return false;
273 }
274
275 last_update_ = Time::Now();
276
277 if (update_state_ == FIRST_REQUEST)
278 update_state_ = SECOND_REQUEST;
279 else if (update_state_ == SECOND_REQUEST)
280 update_state_ = NORMAL_REQUEST;
281
282 // New time for the next update.
283 if (next_update_sec > 0) {
284 next_update_sec_ = next_update_sec;
285 } else if (update_state_ == SECOND_REQUEST) {
[email protected]05f9b682008-09-29 22:18:01286 next_update_sec_ = base::RandInt(15 * 60, 45 * 60);
initial.commit09911bf2008-07-26 23:55:29287 }
288
289 // We need to request a new set of keys for MAC.
290 if (re_key)
291 HandleReKey();
292
293 // New chunks to download.
294 if (!chunk_urls.empty()) {
295 for (size_t i = 0; i < chunk_urls.size(); ++i)
296 chunk_request_urls_.push_back(chunk_urls[i]);
297 }
298
299 // Handle the case were the SafeBrowsing service tells us to dump our
300 // database.
301 if (reset) {
302 sb_service_->ResetDatabase();
[email protected]1d8f8b42008-11-21 22:22:41303 delete chunk_deletes;
initial.commit09911bf2008-07-26 23:55:29304 return true;
305 }
306
307 // Chunks to delete from our storage.
308 if (!chunk_deletes->empty())
309 sb_service_->HandleChunkDelete(chunk_deletes);
[email protected]1d8f8b42008-11-21 22:22:41310 else
311 delete chunk_deletes;
initial.commit09911bf2008-07-26 23:55:29312
313 break;
314 }
315 case CHUNK_REQUEST: {
[email protected]22573822008-11-14 00:40:47316 if (sb_service_->new_safe_browsing())
317 UMA_HISTOGRAM_TIMES(L"SB2.ChunkRequest",
318 base::Time::Now() - chunk_request_start_);
319
initial.commit09911bf2008-07-26 23:55:29320 const ChunkUrl chunk_url = chunk_request_urls_.front();
initial.commit09911bf2008-07-26 23:55:29321 bool re_key = false;
322 std::deque<SBChunk>* chunks = new std::deque<SBChunk>;
323 if (!parser.ParseChunk(data, length,
324 client_key_, chunk_url.mac,
325 &re_key, chunks)) {
326#ifndef NDEBUG
327 std::string data_str;
328 data_str.assign(data, length);
329 std::string encoded_chunk;
[email protected]a9bb6f692008-07-30 16:40:10330 net::Base64Encode(data, &encoded_chunk);
initial.commit09911bf2008-07-26 23:55:29331 SB_DLOG(INFO) << "ParseChunk error for chunk: " << chunk_url.url
332 << ", client_key: " << client_key_
333 << ", wrapped_key: " << wrapped_key_
334 << ", mac: " << chunk_url.mac
335 << ", Base64Encode(data): " << encoded_chunk
336 << ", length: " << length;
337#endif
338 safe_browsing_util::FreeChunks(chunks);
339 delete chunks;
340 return false;
341 }
342
343 if (re_key)
344 HandleReKey();
345
346 if (chunks->empty()) {
347 delete chunks;
348 } else {
349 chunk_pending_to_write_ = true;
[email protected]8b02bb8a2008-10-22 02:05:09350 sb_service_->HandleChunk(chunk_url.list_name, chunks);
initial.commit09911bf2008-07-26 23:55:29351 }
352
353 break;
354 }
355 case GETKEY_REQUEST: {
356 std::string client_key, wrapped_key;
357 if (!parser.ParseNewKey(data, length, &client_key, &wrapped_key))
358 return false;
359
360 client_key_ = client_key;
361 wrapped_key_ = wrapped_key;
362 notify_loop_->PostTask(FROM_HERE, NewRunnableMethod(
363 sb_service_, &SafeBrowsingService::OnNewMacKeys, client_key_,
364 wrapped_key_));
365 break;
366 }
367
368 default:
369 return false;
370 }
371
372 return true;
373}
374
375void SafeBrowsingProtocolManager::Initialize() {
376 // Don't want to hit the safe browsing servers on build/chrome bots.
[email protected]05f9b682008-09-29 22:18:01377 if (base::SysInfo::HasEnvVar(env_vars::kHeadless))
initial.commit09911bf2008-07-26 23:55:29378 return;
379
380 ScheduleNextUpdate(false /* no back off */);
381}
382
383void SafeBrowsingProtocolManager::ScheduleNextUpdate(bool back_off) {
384 DCHECK(next_update_sec_ > 0);
385
[email protected]2d316662008-09-03 18:18:14386 // Unschedule any current timer.
387 update_timer_.Stop();
initial.commit09911bf2008-07-26 23:55:29388
389 // Reschedule with the new update.
390 const int next_update = GetNextUpdateTime(back_off);
[email protected]2d316662008-09-03 18:18:14391 update_timer_.Start(TimeDelta::FromMilliseconds(next_update), this,
392 &SafeBrowsingProtocolManager::GetNextUpdate);
initial.commit09911bf2008-07-26 23:55:29393}
394
395// According to section 5 of the SafeBrowsing protocol specification, we must
396// back off after a certain number of errors. We only change 'next_update_sec_'
397// when we receive a response from the SafeBrowsing service.
398int SafeBrowsingProtocolManager::GetNextUpdateTime(bool back_off) {
399 int next = next_update_sec_;
400 if (back_off) {
401 next = GetNextBackOffTime(&update_error_count_, &update_back_off_mult_);
402 } else {
403 // Successful response means error reset.
404 update_error_count_ = 0;
405 update_back_off_mult_ = 1;
406 }
407 return next * 1000; // milliseconds
408}
409
410int SafeBrowsingProtocolManager::GetNextBackOffTime(int* error_count,
411 int* multiplier) {
412 DCHECK(multiplier && error_count);
413 (*error_count)++;
414 if (*error_count > 1 && *error_count < 6) {
415 int next = static_cast<int>(*multiplier * (1 + back_off_fuzz_) * 30 * 60);
416 *multiplier *= 2;
417 if (*multiplier > kSbMaxBackOff)
418 *multiplier = kSbMaxBackOff;
419 return next;
420 }
421
422 if (*error_count >= 6)
423 return 60 * 60 * 8; // 8 hours
424
425 return 60; // 1 minute
426}
427
428// This request requires getting a list of all the chunks for each list from the
429// database asynchronously. The request will be issued when we're called back in
430// OnGetChunksComplete.
431// TODO(paulg): We should get this at start up and maintain a ChunkRange cache
432// to avoid hitting the database with each update request. On the
433// otherhand, this request will only occur ~20-30 minutes so there
434// isn't that much overhead. Measure!
435void SafeBrowsingProtocolManager::IssueUpdateRequest() {
436 request_type_ = UPDATE_REQUEST;
[email protected]57119c3f2008-12-04 00:33:04437 sb_service_->UpdateStarted();
initial.commit09911bf2008-07-26 23:55:29438}
439
440void SafeBrowsingProtocolManager::IssueChunkRequest() {
441 // We are only allowed to have one request outstanding at any time. Also,
442 // don't get the next url until the previous one has been written to disk so
443 // that we don't use too much memory.
444 if (request_.get() || chunk_request_urls_.empty() || chunk_pending_to_write_)
445 return;
446
447 ChunkUrl next_chunk = chunk_request_urls_.front();
448 DCHECK(!next_chunk.url.empty());
449 if (!StartsWithASCII(next_chunk.url, "http://", false) &&
450 !StartsWithASCII(next_chunk.url, "https://", false))
451 next_chunk.url = "http://" + next_chunk.url;
452 GURL chunk_url(next_chunk.url);
453 request_type_ = CHUNK_REQUEST;
454 request_.reset(new URLFetcher(chunk_url, URLFetcher::GET, this));
455 request_->set_load_flags(net::LOAD_DISABLE_CACHE);
456 request_->set_request_context(Profile::GetDefaultRequestContext());
[email protected]22573822008-11-14 00:40:47457 chunk_request_start_ = base::Time::Now();
initial.commit09911bf2008-07-26 23:55:29458 request_->Start();
459}
460
461void SafeBrowsingProtocolManager::IssueKeyRequest() {
462 GURL key_url(StringPrintf(kSbNewKeyUrl,
463 kSbClientName,
[email protected]484fce42008-10-01 00:37:18464 version_.c_str()));
initial.commit09911bf2008-07-26 23:55:29465 request_type_ = GETKEY_REQUEST;
466 request_.reset(new URLFetcher(key_url, URLFetcher::GET, this));
467 request_->set_load_flags(net::LOAD_DISABLE_CACHE);
468 request_->set_request_context(Profile::GetDefaultRequestContext());
469 request_->Start();
470}
471
472void SafeBrowsingProtocolManager::OnGetChunksComplete(
473 const std::vector<SBListChunkRanges>& lists, bool database_error) {
474 DCHECK(request_type_ == UPDATE_REQUEST);
475
476 if (database_error) {
477 ScheduleNextUpdate(false);
478 return;
479 }
480
481 const bool use_mac = !client_key_.empty();
482
483 // Format our stored chunks:
484 std::string list_data;
485 bool found_malware = false;
486 bool found_phishing = false;
487 for (size_t i = 0; i < lists.size(); ++i) {
488 list_data.append(FormatList(lists[i], use_mac));
[email protected]c3ff89492008-11-11 02:17:51489 if (lists[i].name == safe_browsing_util::kPhishingList)
initial.commit09911bf2008-07-26 23:55:29490 found_phishing = true;
491
[email protected]c3ff89492008-11-11 02:17:51492 if (lists[i].name == safe_browsing_util::kMalwareList)
initial.commit09911bf2008-07-26 23:55:29493 found_malware = true;
494 }
495
496 // If we have an empty database, let the server know we want data for these
497 // lists.
498 if (!found_phishing)
[email protected]c3ff89492008-11-11 02:17:51499 list_data.append(FormatList(
500 SBListChunkRanges(safe_browsing_util::kPhishingList), use_mac));
initial.commit09911bf2008-07-26 23:55:29501
502 if (!found_malware)
[email protected]c3ff89492008-11-11 02:17:51503 list_data.append(FormatList(
504 SBListChunkRanges(safe_browsing_util::kMalwareList), use_mac));
initial.commit09911bf2008-07-26 23:55:29505
506 std::string url = StringPrintf(kSbUpdateUrl,
507 kSbClientName,
[email protected]484fce42008-10-01 00:37:18508 version_.c_str());
initial.commit09911bf2008-07-26 23:55:29509 if (use_mac) {
510 url.append("&wrkey=");
511 url.append(wrapped_key_);
512 }
513
514 GURL update_url(url);
515 request_.reset(new URLFetcher(update_url, URLFetcher::POST, this));
516 request_->set_load_flags(net::LOAD_DISABLE_CACHE);
517 request_->set_request_context(Profile::GetDefaultRequestContext());
518 request_->set_upload_data("text/plain", list_data);
519 request_->Start();
520}
521
522void SafeBrowsingProtocolManager::OnChunkInserted() {
523 chunk_pending_to_write_ = false;
524
525 if (chunk_request_urls_.empty()) {
[email protected]22573822008-11-14 00:40:47526 // Don't pollute old implementation histograms with new implemetation data.
527 if (sb_service_->new_safe_browsing())
528 UMA_HISTOGRAM_LONG_TIMES(L"SB2.Update", Time::Now() - last_update_);
529 else
530 UMA_HISTOGRAM_LONG_TIMES(L"SB.Update", Time::Now() - last_update_);
[email protected]613a03b2008-10-24 23:02:00531 sb_service_->UpdateFinished(true);
initial.commit09911bf2008-07-26 23:55:29532 } else {
533 IssueChunkRequest();
534 }
535}
536
537// static
538std::string SafeBrowsingProtocolManager::FormatList(
539 const SBListChunkRanges& list, bool use_mac) {
540 std::string formatted_results;
541 formatted_results.append(list.name);
542 formatted_results.append(";");
543 if (!list.adds.empty()) {
544 formatted_results.append("a:" + list.adds);
545 if (!list.subs.empty() || use_mac)
546 formatted_results.append(":");
547 }
548 if (!list.subs.empty()) {
549 formatted_results.append("s:" + list.subs);
550 if (use_mac)
551 formatted_results.append(":");
552 }
553 if (use_mac)
554 formatted_results.append("mac");
555 formatted_results.append("\n");
556
557 return formatted_results;
558}
559
560void SafeBrowsingProtocolManager::HandleReKey() {
561 client_key_.clear();
562 wrapped_key_.clear();
563 IssueKeyRequest();
564}
565
566void SafeBrowsingProtocolManager::HandleGetHashError() {
567 int next = GetNextBackOffTime(&gethash_error_count_, &gethash_back_off_mult_);
568 next_gethash_time_ = Time::Now() + TimeDelta::FromSeconds(next);
569}