[email protected] | c83dd91 | 2010-04-06 18:50:51 | [diff] [blame] | 1 | // Copyright (c) 2010 The Chromium Authors. All rights reserved. |
license.bot | bf09a50 | 2008-08-24 00:55:55 | [diff] [blame] | 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 4 | |
| 5 | #include "chrome/browser/safe_browsing/protocol_manager.h" |
| 6 | |
[email protected] | bd7e7044 | 2010-08-07 07:24:21 | [diff] [blame] | 7 | #ifndef NDEBUG |
[email protected] | 978df34 | 2009-11-24 06:21:53 | [diff] [blame] | 8 | #include "base/base64.h" |
[email protected] | bd7e7044 | 2010-08-07 07:24:21 | [diff] [blame] | 9 | #endif |
[email protected] | 76b90d31 | 2010-08-03 03:00:50 | [diff] [blame] | 10 | #include "base/environment.h" |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 11 | #include "base/histogram.h" |
| 12 | #include "base/logging.h" |
[email protected] | 05f9b68 | 2008-09-29 22:18:01 | [diff] [blame] | 13 | #include "base/rand_util.h" |
[email protected] | 80720414 | 2009-05-05 03:31:44 | [diff] [blame] | 14 | #include "base/stl_util-inl.h" |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 15 | #include "base/string_util.h" |
| 16 | #include "base/task.h" |
| 17 | #include "base/timer.h" |
[email protected] | d83d03aa | 2009-11-02 21:44:37 | [diff] [blame] | 18 | #include "chrome/browser/chrome_thread.h" |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 19 | #include "chrome/browser/profile.h" |
| 20 | #include "chrome/browser/safe_browsing/protocol_parser.h" |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 21 | #include "chrome/browser/safe_browsing/safe_browsing_service.h" |
[email protected] | 1eeb5e0 | 2010-07-20 23:02:11 | [diff] [blame] | 22 | #include "chrome/common/chrome_version_info.h" |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 23 | #include "chrome/common/env_vars.h" |
[email protected] | 68d2a05f | 2010-05-07 21:39:55 | [diff] [blame] | 24 | #include "chrome/common/net/url_request_context_getter.h" |
[email protected] | dfdb0de7 | 2009-02-19 21:58:14 | [diff] [blame] | 25 | #include "net/base/escape.h" |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 26 | #include "net/base/load_flags.h" |
[email protected] | 3c3f4ac5 | 2009-12-15 20:22:17 | [diff] [blame] | 27 | #include "net/url_request/url_request_status.h" |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 28 | |
[email protected] | e1acf6f | 2008-10-27 20:43:33 | [diff] [blame] | 29 | using base::Time; |
| 30 | using base::TimeDelta; |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 31 | |
| 32 | // Maximum time, in seconds, from start up before we must issue an update query. |
[email protected] | 05f9b68 | 2008-09-29 22:18:01 | [diff] [blame] | 33 | static const int kSbTimerStartIntervalSec = 5 * 60; |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 34 | |
[email protected] | a11c2c6 | 2009-08-07 22:47:56 | [diff] [blame] | 35 | // The maximum time, in seconds, to wait for a response to an update request. |
| 36 | static const int kSbMaxUpdateWaitSec = 10; |
| 37 | |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 38 | // Maximum back off multiplier. |
| 39 | static const int kSbMaxBackOff = 8; |
| 40 | |
| 41 | |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 42 | // SafeBrowsingProtocolManager implementation ---------------------------------- |
| 43 | |
| 44 | SafeBrowsingProtocolManager::SafeBrowsingProtocolManager( |
| 45 | SafeBrowsingService* sb_service, |
[email protected] | 1a87151 | 2009-11-06 06:11:18 | [diff] [blame] | 46 | const std::string& client_name, |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 47 | const std::string& client_key, |
[email protected] | d11f566 | 2009-11-12 20:52:56 | [diff] [blame] | 48 | const std::string& wrapped_key, |
[email protected] | 894c4e8 | 2010-06-29 21:53:18 | [diff] [blame] | 49 | URLRequestContextGetter* request_context_getter, |
| 50 | const std::string& info_url_prefix, |
| 51 | const std::string& mackey_url_prefix, |
| 52 | bool disable_auto_update) |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 53 | : sb_service_(sb_service), |
| 54 | request_type_(NO_REQUEST), |
| 55 | update_error_count_(0), |
| 56 | gethash_error_count_(0), |
| 57 | update_back_off_mult_(1), |
| 58 | gethash_back_off_mult_(1), |
| 59 | next_update_sec_(-1), |
| 60 | update_state_(FIRST_REQUEST), |
| 61 | initial_request_(true), |
| 62 | chunk_pending_to_write_(false), |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 63 | client_key_(client_key), |
[email protected] | 6e3b12ff | 2009-01-06 22:17:57 | [diff] [blame] | 64 | wrapped_key_(wrapped_key), |
[email protected] | 1a87151 | 2009-11-06 06:11:18 | [diff] [blame] | 65 | update_size_(0), |
[email protected] | d11f566 | 2009-11-12 20:52:56 | [diff] [blame] | 66 | client_name_(client_name), |
[email protected] | 894c4e8 | 2010-06-29 21:53:18 | [diff] [blame] | 67 | request_context_getter_(request_context_getter), |
| 68 | info_url_prefix_(info_url_prefix), |
| 69 | mackey_url_prefix_(mackey_url_prefix), |
| 70 | disable_auto_update_(disable_auto_update) { |
| 71 | DCHECK(!info_url_prefix_.empty() && !mackey_url_prefix_.empty()); |
| 72 | |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 73 | // Set the backoff multiplier fuzz to a random value between 0 and 1. |
[email protected] | 05f9b68 | 2008-09-29 22:18:01 | [diff] [blame] | 74 | back_off_fuzz_ = static_cast<float>(base::RandDouble()); |
[email protected] | efbb6048 | 2009-11-12 21:38:55 | [diff] [blame] | 75 | // The first update must happen between 1-5 minutes of start up. |
[email protected] | 05f9b68 | 2008-09-29 22:18:01 | [diff] [blame] | 76 | next_update_sec_ = base::RandInt(60, kSbTimerStartIntervalSec); |
[email protected] | 484fce4 | 2008-10-01 00:37:18 | [diff] [blame] | 77 | |
[email protected] | 0211f57e | 2010-08-27 20:28:42 | [diff] [blame^] | 78 | chrome::VersionInfo version_info; |
| 79 | if (!version_info.is_valid()) |
[email protected] | 484fce4 | 2008-10-01 00:37:18 | [diff] [blame] | 80 | version_ = "0.1"; |
| 81 | else |
[email protected] | 0211f57e | 2010-08-27 20:28:42 | [diff] [blame^] | 82 | version_ = version_info.Version(); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 83 | } |
| 84 | |
| 85 | SafeBrowsingProtocolManager::~SafeBrowsingProtocolManager() { |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 86 | // Delete in-progress SafeBrowsing requests. |
| 87 | STLDeleteContainerPairFirstPointers(hash_requests_.begin(), |
| 88 | hash_requests_.end()); |
| 89 | hash_requests_.clear(); |
[email protected] | dfdb0de7 | 2009-02-19 21:58:14 | [diff] [blame] | 90 | |
| 91 | // Delete in-progress malware reports. |
| 92 | STLDeleteContainerPointers(malware_reports_.begin(), malware_reports_.end()); |
| 93 | malware_reports_.clear(); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 94 | } |
| 95 | |
| 96 | // Public API used by the SafeBrowsingService ---------------------------------- |
| 97 | |
| 98 | // We can only have one update or chunk request outstanding, but there may be |
| 99 | // multiple GetHash requests pending since we don't want to serialize them and |
| 100 | // slow down the user. |
| 101 | void SafeBrowsingProtocolManager::GetFullHash( |
| 102 | SafeBrowsingService::SafeBrowsingCheck* check, |
| 103 | const std::vector<SBPrefix>& prefixes) { |
| 104 | // If we are in GetHash backoff, we need to check if we're past the next |
| 105 | // allowed time. If we are, we can proceed with the request. If not, we are |
| 106 | // required to return empty results (i.e. treat the page as safe). |
| 107 | if (gethash_error_count_ && Time::Now() <= next_gethash_time_) { |
| 108 | std::vector<SBFullHashResult> full_hashes; |
[email protected] | 200abc3 | 2008-09-05 01:44:33 | [diff] [blame] | 109 | sb_service_->HandleGetHashResults(check, full_hashes, false); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 110 | return; |
| 111 | } |
[email protected] | 894c4e8 | 2010-06-29 21:53:18 | [diff] [blame] | 112 | bool use_mac = !client_key_.empty(); |
| 113 | GURL gethash_url = GetHashUrl(use_mac); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 114 | URLFetcher* fetcher = new URLFetcher(gethash_url, URLFetcher::POST, this); |
| 115 | hash_requests_[fetcher] = check; |
| 116 | |
| 117 | std::string get_hash; |
| 118 | SafeBrowsingProtocolParser parser; |
| 119 | parser.FormatGetHash(prefixes, &get_hash); |
| 120 | |
| 121 | fetcher->set_load_flags(net::LOAD_DISABLE_CACHE); |
[email protected] | d11f566 | 2009-11-12 20:52:56 | [diff] [blame] | 122 | fetcher->set_request_context(request_context_getter_); |
[email protected] | d36e3c8e | 2008-08-29 23:42:20 | [diff] [blame] | 123 | fetcher->set_upload_data("text/plain", get_hash); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 124 | fetcher->Start(); |
| 125 | } |
| 126 | |
| 127 | void SafeBrowsingProtocolManager::GetNextUpdate() { |
| 128 | if (initial_request_) { |
| 129 | if (client_key_.empty() || wrapped_key_.empty()) { |
| 130 | IssueKeyRequest(); |
| 131 | return; |
| 132 | } else { |
| 133 | initial_request_ = false; |
| 134 | } |
| 135 | } |
| 136 | |
| 137 | if (!request_.get()) |
| 138 | IssueUpdateRequest(); |
| 139 | } |
| 140 | |
| 141 | // URLFetcher::Delegate implementation ----------------------------------------- |
| 142 | |
| 143 | // All SafeBrowsing request responses are handled here. |
| 144 | // TODO(paulg): Clarify with the SafeBrowsing team whether a failed parse of a |
| 145 | // chunk should retry the download and parse of that chunk (and |
| 146 | // what back off / how many times to try), and if that effects the |
| 147 | // update back off. For now, a failed parse of the chunk means we |
| 148 | // drop it. This isn't so bad because the next UPDATE_REQUEST we |
| 149 | // do will report all the chunks we have. If that chunk is still |
| 150 | // required, the SafeBrowsing servers will tell us to get it again. |
| 151 | void SafeBrowsingProtocolManager::OnURLFetchComplete( |
| 152 | const URLFetcher* source, |
| 153 | const GURL& url, |
| 154 | const URLRequestStatus& status, |
| 155 | int response_code, |
| 156 | const ResponseCookies& cookies, |
| 157 | const std::string& data) { |
| 158 | scoped_ptr<const URLFetcher> fetcher; |
| 159 | bool parsed_ok = true; |
| 160 | bool must_back_off = false; // Reduce SafeBrowsing service query frequency. |
| 161 | |
[email protected] | dfdb0de7 | 2009-02-19 21:58:14 | [diff] [blame] | 162 | // See if this is a malware report fetcher. We don't take any action for |
| 163 | // the response to those. |
| 164 | std::set<const URLFetcher*>::iterator mit = malware_reports_.find(source); |
| 165 | if (mit != malware_reports_.end()) { |
| 166 | const URLFetcher* report = *mit; |
| 167 | malware_reports_.erase(mit); |
| 168 | delete report; |
| 169 | return; |
| 170 | } |
| 171 | |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 172 | HashRequests::iterator it = hash_requests_.find(source); |
| 173 | if (it != hash_requests_.end()) { |
| 174 | // GetHash response. |
| 175 | fetcher.reset(it->first); |
| 176 | SafeBrowsingService::SafeBrowsingCheck* check = it->second; |
| 177 | std::vector<SBFullHashResult> full_hashes; |
[email protected] | 200abc3 | 2008-09-05 01:44:33 | [diff] [blame] | 178 | bool can_cache = false; |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 179 | if (response_code == 200 || response_code == 204) { |
[email protected] | 682343d | 2009-04-17 19:51:40 | [diff] [blame] | 180 | // For tracking our GetHash false positive (204) rate, compared to real |
| 181 | // (200) responses. |
| 182 | if (response_code == 200) |
| 183 | UMA_HISTOGRAM_COUNTS("SB2.GetHash200", 1); |
| 184 | else |
| 185 | UMA_HISTOGRAM_COUNTS("SB2.GetHash204", 1); |
[email protected] | 200abc3 | 2008-09-05 01:44:33 | [diff] [blame] | 186 | can_cache = true; |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 187 | gethash_error_count_ = 0; |
| 188 | gethash_back_off_mult_ = 1; |
| 189 | bool re_key = false; |
| 190 | SafeBrowsingProtocolParser parser; |
| 191 | parsed_ok = parser.ParseGetHash(data.data(), |
| 192 | static_cast<int>(data.length()), |
| 193 | client_key_, |
| 194 | &re_key, |
| 195 | &full_hashes); |
| 196 | if (!parsed_ok) { |
| 197 | // If we fail to parse it, we must still inform the SafeBrowsingService |
| 198 | // so that it doesn't hold up the user's request indefinitely. Not sure |
| 199 | // what to do at that point though! |
| 200 | full_hashes.clear(); |
| 201 | } else { |
| 202 | if (re_key) |
| 203 | HandleReKey(); |
| 204 | } |
[email protected] | 3c3f4ac5 | 2009-12-15 20:22:17 | [diff] [blame] | 205 | } else { |
[email protected] | 7bdc1bf | 2009-07-28 15:48:03 | [diff] [blame] | 206 | HandleGetHashError(Time::Now()); |
[email protected] | 3c3f4ac5 | 2009-12-15 20:22:17 | [diff] [blame] | 207 | if (status.status() == URLRequestStatus::FAILED) { |
| 208 | SB_DLOG(INFO) << "SafeBrowsing GetHash request for: " << source->url() |
| 209 | << " failed with os error: " << status.os_error(); |
| 210 | } else { |
| 211 | SB_DLOG(INFO) << "SafeBrowsing GetHash request for: " << source->url() |
| 212 | << " failed with error: " << response_code; |
| 213 | } |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 214 | } |
| 215 | |
| 216 | // Call back the SafeBrowsingService with full_hashes, even if there was a |
| 217 | // parse error or an error response code (in which case full_hashes will be |
| 218 | // empty). We can't block the user regardless of the error status. |
[email protected] | 200abc3 | 2008-09-05 01:44:33 | [diff] [blame] | 219 | sb_service_->HandleGetHashResults(check, full_hashes, can_cache); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 220 | |
| 221 | hash_requests_.erase(it); |
| 222 | } else { |
| 223 | // Update, chunk or key response. |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 224 | fetcher.reset(request_.release()); |
| 225 | |
[email protected] | a11c2c6 | 2009-08-07 22:47:56 | [diff] [blame] | 226 | if (request_type_ == UPDATE_REQUEST) { |
| 227 | if (!fetcher.get()) { |
| 228 | // We've timed out waiting for an update response, so we've cancelled |
| 229 | // the update request and scheduled a new one. Ignore this response. |
| 230 | return; |
| 231 | } |
| 232 | |
| 233 | // Cancel the update response timeout now that we have the response. |
| 234 | update_timer_.Stop(); |
| 235 | } |
| 236 | |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 237 | if (response_code == 200) { |
| 238 | // We have data from the SafeBrowsing service. |
| 239 | parsed_ok = HandleServiceResponse(source->url(), |
| 240 | data.data(), |
| 241 | static_cast<int>(data.length())); |
| 242 | if (!parsed_ok) { |
| 243 | SB_DLOG(INFO) << "SafeBrowsing request for: " << source->url() |
| 244 | << "failed parse."; |
[email protected] | 22717d1e | 2008-10-15 21:55:32 | [diff] [blame] | 245 | must_back_off = true; |
| 246 | chunk_request_urls_.clear(); |
[email protected] | 6e3b12ff | 2009-01-06 22:17:57 | [diff] [blame] | 247 | UpdateFinished(false); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 248 | } |
| 249 | |
[email protected] | cb1cdf49 | 2009-01-16 23:51:44 | [diff] [blame] | 250 | switch (request_type_) { |
| 251 | case CHUNK_REQUEST: |
| 252 | if (parsed_ok) |
| 253 | chunk_request_urls_.pop_front(); |
| 254 | break; |
| 255 | case GETKEY_REQUEST: |
| 256 | if (initial_request_) { |
| 257 | // This is the first request we've made this session. Now that we |
| 258 | // have the keys, do the regular update request. |
| 259 | initial_request_ = false; |
| 260 | GetNextUpdate(); |
| 261 | return; |
| 262 | } |
| 263 | break; |
| 264 | case UPDATE_REQUEST: |
| 265 | if (chunk_request_urls_.empty() && parsed_ok) { |
| 266 | // We are up to date since the servers gave us nothing new, so we |
| 267 | // are done with this update cycle. |
| 268 | UpdateFinished(true); |
| 269 | } |
| 270 | break; |
[email protected] | 7e242b5 | 2009-02-05 12:31:02 | [diff] [blame] | 271 | default: |
| 272 | NOTREACHED(); |
| 273 | break; |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 274 | } |
[email protected] | 3c3f4ac5 | 2009-12-15 20:22:17 | [diff] [blame] | 275 | } else { |
| 276 | // The SafeBrowsing service error, or very bad response code: back off. |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 277 | must_back_off = true; |
| 278 | if (request_type_ == CHUNK_REQUEST) |
| 279 | chunk_request_urls_.clear(); |
[email protected] | 6e3b12ff | 2009-01-06 22:17:57 | [diff] [blame] | 280 | UpdateFinished(false); |
[email protected] | 3c3f4ac5 | 2009-12-15 20:22:17 | [diff] [blame] | 281 | if (status.status() == URLRequestStatus::FAILED) { |
| 282 | SB_DLOG(INFO) << "SafeBrowsing request for: " << source->url() |
| 283 | << " failed with os error: " << status.os_error(); |
| 284 | } else { |
| 285 | SB_DLOG(INFO) << "SafeBrowsing request for: " << source->url() |
| 286 | << " failed with error: " << response_code; |
| 287 | } |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 288 | } |
| 289 | } |
| 290 | |
| 291 | // Schedule a new update request if we've finished retrieving all the chunks |
| 292 | // from the previous update. We treat the update request and the chunk URLs it |
| 293 | // contains as an atomic unit as far as back off is concerned. |
| 294 | if (chunk_request_urls_.empty() && |
| 295 | (request_type_ == CHUNK_REQUEST || request_type_ == UPDATE_REQUEST)) |
| 296 | ScheduleNextUpdate(must_back_off); |
| 297 | |
| 298 | // Get the next chunk if available. |
| 299 | IssueChunkRequest(); |
| 300 | } |
| 301 | |
| 302 | bool SafeBrowsingProtocolManager::HandleServiceResponse(const GURL& url, |
| 303 | const char* data, |
| 304 | int length) { |
| 305 | SafeBrowsingProtocolParser parser; |
| 306 | |
| 307 | switch (request_type_) { |
| 308 | case UPDATE_REQUEST: { |
| 309 | int next_update_sec = -1; |
| 310 | bool re_key = false; |
| 311 | bool reset = false; |
[email protected] | 7b1e3710 | 2010-03-08 21:43:16 | [diff] [blame] | 312 | scoped_ptr<std::vector<SBChunkDelete> > chunk_deletes( |
| 313 | new std::vector<SBChunkDelete>); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 314 | std::vector<ChunkUrl> chunk_urls; |
| 315 | if (!parser.ParseUpdate(data, length, client_key_, |
| 316 | &next_update_sec, &re_key, |
[email protected] | 7b1e3710 | 2010-03-08 21:43:16 | [diff] [blame] | 317 | &reset, chunk_deletes.get(), &chunk_urls)) { |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 318 | return false; |
| 319 | } |
| 320 | |
| 321 | last_update_ = Time::Now(); |
| 322 | |
| 323 | if (update_state_ == FIRST_REQUEST) |
| 324 | update_state_ = SECOND_REQUEST; |
| 325 | else if (update_state_ == SECOND_REQUEST) |
| 326 | update_state_ = NORMAL_REQUEST; |
| 327 | |
| 328 | // New time for the next update. |
| 329 | if (next_update_sec > 0) { |
| 330 | next_update_sec_ = next_update_sec; |
| 331 | } else if (update_state_ == SECOND_REQUEST) { |
[email protected] | 05f9b68 | 2008-09-29 22:18:01 | [diff] [blame] | 332 | next_update_sec_ = base::RandInt(15 * 60, 45 * 60); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 333 | } |
| 334 | |
| 335 | // We need to request a new set of keys for MAC. |
| 336 | if (re_key) |
| 337 | HandleReKey(); |
| 338 | |
| 339 | // New chunks to download. |
| 340 | if (!chunk_urls.empty()) { |
[email protected] | 553dba6 | 2009-02-24 19:08:23 | [diff] [blame] | 341 | UMA_HISTOGRAM_COUNTS("SB2.UpdateUrls", chunk_urls.size()); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 342 | for (size_t i = 0; i < chunk_urls.size(); ++i) |
| 343 | chunk_request_urls_.push_back(chunk_urls[i]); |
| 344 | } |
| 345 | |
| 346 | // Handle the case were the SafeBrowsing service tells us to dump our |
| 347 | // database. |
| 348 | if (reset) { |
| 349 | sb_service_->ResetDatabase(); |
| 350 | return true; |
| 351 | } |
| 352 | |
[email protected] | 7b1e3710 | 2010-03-08 21:43:16 | [diff] [blame] | 353 | // Chunks to delete from our storage. Pass ownership of |
| 354 | // |chunk_deletes|. |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 355 | if (!chunk_deletes->empty()) |
[email protected] | 7b1e3710 | 2010-03-08 21:43:16 | [diff] [blame] | 356 | sb_service_->HandleChunkDelete(chunk_deletes.release()); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 357 | |
| 358 | break; |
| 359 | } |
| 360 | case CHUNK_REQUEST: { |
[email protected] | 484c57a | 2009-03-21 01:24:01 | [diff] [blame] | 361 | UMA_HISTOGRAM_TIMES("SB2.ChunkRequest", |
| 362 | base::Time::Now() - chunk_request_start_); |
[email protected] | 2257382 | 2008-11-14 00:40:47 | [diff] [blame] | 363 | |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 364 | const ChunkUrl chunk_url = chunk_request_urls_.front(); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 365 | bool re_key = false; |
[email protected] | 7b1e3710 | 2010-03-08 21:43:16 | [diff] [blame] | 366 | scoped_ptr<SBChunkList> chunks(new SBChunkList); |
[email protected] | 553dba6 | 2009-02-24 19:08:23 | [diff] [blame] | 367 | UMA_HISTOGRAM_COUNTS("SB2.ChunkSize", length); |
[email protected] | 6e3b12ff | 2009-01-06 22:17:57 | [diff] [blame] | 368 | update_size_ += length; |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 369 | if (!parser.ParseChunk(data, length, |
| 370 | client_key_, chunk_url.mac, |
[email protected] | 7b1e3710 | 2010-03-08 21:43:16 | [diff] [blame] | 371 | &re_key, chunks.get())) { |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 372 | #ifndef NDEBUG |
| 373 | std::string data_str; |
| 374 | data_str.assign(data, length); |
| 375 | std::string encoded_chunk; |
[email protected] | 978df34 | 2009-11-24 06:21:53 | [diff] [blame] | 376 | base::Base64Encode(data, &encoded_chunk); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 377 | SB_DLOG(INFO) << "ParseChunk error for chunk: " << chunk_url.url |
| 378 | << ", client_key: " << client_key_ |
| 379 | << ", wrapped_key: " << wrapped_key_ |
| 380 | << ", mac: " << chunk_url.mac |
| 381 | << ", Base64Encode(data): " << encoded_chunk |
| 382 | << ", length: " << length; |
| 383 | #endif |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 384 | return false; |
| 385 | } |
| 386 | |
| 387 | if (re_key) |
| 388 | HandleReKey(); |
| 389 | |
[email protected] | 7b1e3710 | 2010-03-08 21:43:16 | [diff] [blame] | 390 | // Chunks to add to storage. Pass ownership of |chunks|. |
| 391 | if (!chunks->empty()) { |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 392 | chunk_pending_to_write_ = true; |
[email protected] | 7b1e3710 | 2010-03-08 21:43:16 | [diff] [blame] | 393 | sb_service_->HandleChunk(chunk_url.list_name, chunks.release()); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 394 | } |
| 395 | |
| 396 | break; |
| 397 | } |
| 398 | case GETKEY_REQUEST: { |
| 399 | std::string client_key, wrapped_key; |
| 400 | if (!parser.ParseNewKey(data, length, &client_key, &wrapped_key)) |
| 401 | return false; |
| 402 | |
| 403 | client_key_ = client_key; |
| 404 | wrapped_key_ = wrapped_key; |
[email protected] | d83d03aa | 2009-11-02 21:44:37 | [diff] [blame] | 405 | ChromeThread::PostTask( |
| 406 | ChromeThread::UI, FROM_HERE, |
| 407 | NewRunnableMethod( |
| 408 | sb_service_, &SafeBrowsingService::OnNewMacKeys, client_key_, |
| 409 | wrapped_key_)); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 410 | break; |
| 411 | } |
| 412 | |
| 413 | default: |
| 414 | return false; |
| 415 | } |
| 416 | |
| 417 | return true; |
| 418 | } |
| 419 | |
| 420 | void SafeBrowsingProtocolManager::Initialize() { |
| 421 | // Don't want to hit the safe browsing servers on build/chrome bots. |
[email protected] | 76b90d31 | 2010-08-03 03:00:50 | [diff] [blame] | 422 | scoped_ptr<base::Environment> env(base::Environment::Create()); |
[email protected] | 9432ade | 2010-08-04 23:43:20 | [diff] [blame] | 423 | if (env->HasVar(env_vars::kHeadless)) |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 424 | return; |
| 425 | |
| 426 | ScheduleNextUpdate(false /* no back off */); |
| 427 | } |
| 428 | |
| 429 | void SafeBrowsingProtocolManager::ScheduleNextUpdate(bool back_off) { |
[email protected] | 894c4e8 | 2010-06-29 21:53:18 | [diff] [blame] | 430 | DCHECK_GT(next_update_sec_, 0); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 431 | |
[email protected] | 894c4e8 | 2010-06-29 21:53:18 | [diff] [blame] | 432 | if (disable_auto_update_) { |
| 433 | // Unschedule any current timer. |
| 434 | update_timer_.Stop(); |
| 435 | return; |
| 436 | } |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 437 | // Reschedule with the new update. |
| 438 | const int next_update = GetNextUpdateTime(back_off); |
[email protected] | 894c4e8 | 2010-06-29 21:53:18 | [diff] [blame] | 439 | ForceScheduleNextUpdate(next_update); |
| 440 | } |
| 441 | |
| 442 | void SafeBrowsingProtocolManager::ForceScheduleNextUpdate( |
| 443 | const int next_update_msec) { |
| 444 | DCHECK_GE(next_update_msec, 0); |
| 445 | // Unschedule any current timer. |
| 446 | update_timer_.Stop(); |
| 447 | update_timer_.Start(TimeDelta::FromMilliseconds(next_update_msec), this, |
[email protected] | 2d31666 | 2008-09-03 18:18:14 | [diff] [blame] | 448 | &SafeBrowsingProtocolManager::GetNextUpdate); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 449 | } |
| 450 | |
| 451 | // According to section 5 of the SafeBrowsing protocol specification, we must |
| 452 | // back off after a certain number of errors. We only change 'next_update_sec_' |
| 453 | // when we receive a response from the SafeBrowsing service. |
| 454 | int SafeBrowsingProtocolManager::GetNextUpdateTime(bool back_off) { |
| 455 | int next = next_update_sec_; |
| 456 | if (back_off) { |
| 457 | next = GetNextBackOffTime(&update_error_count_, &update_back_off_mult_); |
| 458 | } else { |
| 459 | // Successful response means error reset. |
| 460 | update_error_count_ = 0; |
| 461 | update_back_off_mult_ = 1; |
| 462 | } |
| 463 | return next * 1000; // milliseconds |
| 464 | } |
| 465 | |
| 466 | int SafeBrowsingProtocolManager::GetNextBackOffTime(int* error_count, |
| 467 | int* multiplier) { |
| 468 | DCHECK(multiplier && error_count); |
| 469 | (*error_count)++; |
| 470 | if (*error_count > 1 && *error_count < 6) { |
| 471 | int next = static_cast<int>(*multiplier * (1 + back_off_fuzz_) * 30 * 60); |
| 472 | *multiplier *= 2; |
| 473 | if (*multiplier > kSbMaxBackOff) |
| 474 | *multiplier = kSbMaxBackOff; |
| 475 | return next; |
| 476 | } |
| 477 | |
| 478 | if (*error_count >= 6) |
| 479 | return 60 * 60 * 8; // 8 hours |
| 480 | |
| 481 | return 60; // 1 minute |
| 482 | } |
| 483 | |
| 484 | // This request requires getting a list of all the chunks for each list from the |
| 485 | // database asynchronously. The request will be issued when we're called back in |
| 486 | // OnGetChunksComplete. |
| 487 | // TODO(paulg): We should get this at start up and maintain a ChunkRange cache |
| 488 | // to avoid hitting the database with each update request. On the |
| 489 | // otherhand, this request will only occur ~20-30 minutes so there |
| 490 | // isn't that much overhead. Measure! |
| 491 | void SafeBrowsingProtocolManager::IssueUpdateRequest() { |
| 492 | request_type_ = UPDATE_REQUEST; |
[email protected] | 57119c3f | 2008-12-04 00:33:04 | [diff] [blame] | 493 | sb_service_->UpdateStarted(); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 494 | } |
| 495 | |
| 496 | void SafeBrowsingProtocolManager::IssueChunkRequest() { |
| 497 | // We are only allowed to have one request outstanding at any time. Also, |
| 498 | // don't get the next url until the previous one has been written to disk so |
| 499 | // that we don't use too much memory. |
| 500 | if (request_.get() || chunk_request_urls_.empty() || chunk_pending_to_write_) |
| 501 | return; |
| 502 | |
| 503 | ChunkUrl next_chunk = chunk_request_urls_.front(); |
| 504 | DCHECK(!next_chunk.url.empty()); |
[email protected] | 894c4e8 | 2010-06-29 21:53:18 | [diff] [blame] | 505 | GURL chunk_url = NextChunkUrl(next_chunk.url); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 506 | request_type_ = CHUNK_REQUEST; |
| 507 | request_.reset(new URLFetcher(chunk_url, URLFetcher::GET, this)); |
| 508 | request_->set_load_flags(net::LOAD_DISABLE_CACHE); |
[email protected] | d11f566 | 2009-11-12 20:52:56 | [diff] [blame] | 509 | request_->set_request_context(request_context_getter_); |
[email protected] | 2257382 | 2008-11-14 00:40:47 | [diff] [blame] | 510 | chunk_request_start_ = base::Time::Now(); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 511 | request_->Start(); |
| 512 | } |
| 513 | |
| 514 | void SafeBrowsingProtocolManager::IssueKeyRequest() { |
[email protected] | 894c4e8 | 2010-06-29 21:53:18 | [diff] [blame] | 515 | GURL key_url = MacKeyUrl(); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 516 | request_type_ = GETKEY_REQUEST; |
| 517 | request_.reset(new URLFetcher(key_url, URLFetcher::GET, this)); |
| 518 | request_->set_load_flags(net::LOAD_DISABLE_CACHE); |
[email protected] | d11f566 | 2009-11-12 20:52:56 | [diff] [blame] | 519 | request_->set_request_context(request_context_getter_); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 520 | request_->Start(); |
| 521 | } |
| 522 | |
| 523 | void SafeBrowsingProtocolManager::OnGetChunksComplete( |
| 524 | const std::vector<SBListChunkRanges>& lists, bool database_error) { |
[email protected] | 894c4e8 | 2010-06-29 21:53:18 | [diff] [blame] | 525 | DCHECK_EQ(request_type_, UPDATE_REQUEST); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 526 | if (database_error) { |
[email protected] | a11c2c6 | 2009-08-07 22:47:56 | [diff] [blame] | 527 | UpdateFinished(false); |
[email protected] | 3c3f4ac5 | 2009-12-15 20:22:17 | [diff] [blame] | 528 | ScheduleNextUpdate(false); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 529 | return; |
| 530 | } |
| 531 | |
| 532 | const bool use_mac = !client_key_.empty(); |
| 533 | |
| 534 | // Format our stored chunks: |
| 535 | std::string list_data; |
| 536 | bool found_malware = false; |
| 537 | bool found_phishing = false; |
| 538 | for (size_t i = 0; i < lists.size(); ++i) { |
| 539 | list_data.append(FormatList(lists[i], use_mac)); |
[email protected] | c3ff8949 | 2008-11-11 02:17:51 | [diff] [blame] | 540 | if (lists[i].name == safe_browsing_util::kPhishingList) |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 541 | found_phishing = true; |
| 542 | |
[email protected] | c3ff8949 | 2008-11-11 02:17:51 | [diff] [blame] | 543 | if (lists[i].name == safe_browsing_util::kMalwareList) |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 544 | found_malware = true; |
| 545 | } |
| 546 | |
| 547 | // If we have an empty database, let the server know we want data for these |
| 548 | // lists. |
| 549 | if (!found_phishing) |
[email protected] | c3ff8949 | 2008-11-11 02:17:51 | [diff] [blame] | 550 | list_data.append(FormatList( |
| 551 | SBListChunkRanges(safe_browsing_util::kPhishingList), use_mac)); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 552 | |
| 553 | if (!found_malware) |
[email protected] | c3ff8949 | 2008-11-11 02:17:51 | [diff] [blame] | 554 | list_data.append(FormatList( |
| 555 | SBListChunkRanges(safe_browsing_util::kMalwareList), use_mac)); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 556 | |
[email protected] | 894c4e8 | 2010-06-29 21:53:18 | [diff] [blame] | 557 | GURL update_url = UpdateUrl(use_mac); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 558 | request_.reset(new URLFetcher(update_url, URLFetcher::POST, this)); |
| 559 | request_->set_load_flags(net::LOAD_DISABLE_CACHE); |
[email protected] | d11f566 | 2009-11-12 20:52:56 | [diff] [blame] | 560 | request_->set_request_context(request_context_getter_); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 561 | request_->set_upload_data("text/plain", list_data); |
| 562 | request_->Start(); |
[email protected] | a11c2c6 | 2009-08-07 22:47:56 | [diff] [blame] | 563 | |
| 564 | // Begin the update request timeout. |
| 565 | update_timer_.Start(TimeDelta::FromSeconds(kSbMaxUpdateWaitSec), this, |
| 566 | &SafeBrowsingProtocolManager::UpdateResponseTimeout); |
| 567 | } |
| 568 | |
| 569 | // If we haven't heard back from the server with an update response, this method |
| 570 | // will run. Close the current update session and schedule another update. |
| 571 | void SafeBrowsingProtocolManager::UpdateResponseTimeout() { |
[email protected] | 894c4e8 | 2010-06-29 21:53:18 | [diff] [blame] | 572 | DCHECK_EQ(request_type_, UPDATE_REQUEST); |
[email protected] | a11c2c6 | 2009-08-07 22:47:56 | [diff] [blame] | 573 | request_.reset(); |
[email protected] | a11c2c6 | 2009-08-07 22:47:56 | [diff] [blame] | 574 | UpdateFinished(false); |
[email protected] | 3c3f4ac5 | 2009-12-15 20:22:17 | [diff] [blame] | 575 | ScheduleNextUpdate(false); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 576 | } |
| 577 | |
| 578 | void SafeBrowsingProtocolManager::OnChunkInserted() { |
| 579 | chunk_pending_to_write_ = false; |
| 580 | |
| 581 | if (chunk_request_urls_.empty()) { |
[email protected] | 484c57a | 2009-03-21 01:24:01 | [diff] [blame] | 582 | UMA_HISTOGRAM_LONG_TIMES("SB2.Update", Time::Now() - last_update_); |
[email protected] | 6e3b12ff | 2009-01-06 22:17:57 | [diff] [blame] | 583 | UpdateFinished(true); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 584 | } else { |
| 585 | IssueChunkRequest(); |
| 586 | } |
| 587 | } |
| 588 | |
[email protected] | dfdb0de7 | 2009-02-19 21:58:14 | [diff] [blame] | 589 | void SafeBrowsingProtocolManager::ReportMalware(const GURL& malware_url, |
| 590 | const GURL& page_url, |
[email protected] | ee4d3e80 | 2010-08-12 23:59:51 | [diff] [blame] | 591 | const GURL& referrer_url, |
| 592 | bool is_subresource) { |
| 593 | GURL report_url = MalwareReportUrl(malware_url, page_url, referrer_url, |
| 594 | is_subresource); |
[email protected] | dfdb0de7 | 2009-02-19 21:58:14 | [diff] [blame] | 595 | URLFetcher* report = new URLFetcher(report_url, URLFetcher::GET, this); |
| 596 | report->set_load_flags(net::LOAD_DISABLE_CACHE); |
[email protected] | d11f566 | 2009-11-12 20:52:56 | [diff] [blame] | 597 | report->set_request_context(request_context_getter_); |
[email protected] | dfdb0de7 | 2009-02-19 21:58:14 | [diff] [blame] | 598 | report->Start(); |
| 599 | malware_reports_.insert(report); |
| 600 | } |
| 601 | |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 602 | // static |
| 603 | std::string SafeBrowsingProtocolManager::FormatList( |
| 604 | const SBListChunkRanges& list, bool use_mac) { |
| 605 | std::string formatted_results; |
| 606 | formatted_results.append(list.name); |
| 607 | formatted_results.append(";"); |
| 608 | if (!list.adds.empty()) { |
| 609 | formatted_results.append("a:" + list.adds); |
| 610 | if (!list.subs.empty() || use_mac) |
| 611 | formatted_results.append(":"); |
| 612 | } |
| 613 | if (!list.subs.empty()) { |
| 614 | formatted_results.append("s:" + list.subs); |
| 615 | if (use_mac) |
| 616 | formatted_results.append(":"); |
| 617 | } |
| 618 | if (use_mac) |
| 619 | formatted_results.append("mac"); |
| 620 | formatted_results.append("\n"); |
| 621 | |
| 622 | return formatted_results; |
| 623 | } |
| 624 | |
| 625 | void SafeBrowsingProtocolManager::HandleReKey() { |
| 626 | client_key_.clear(); |
| 627 | wrapped_key_.clear(); |
| 628 | IssueKeyRequest(); |
| 629 | } |
| 630 | |
[email protected] | 7bdc1bf | 2009-07-28 15:48:03 | [diff] [blame] | 631 | void SafeBrowsingProtocolManager::HandleGetHashError(const Time& now) { |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 632 | int next = GetNextBackOffTime(&gethash_error_count_, &gethash_back_off_mult_); |
[email protected] | 7bdc1bf | 2009-07-28 15:48:03 | [diff] [blame] | 633 | next_gethash_time_ = now + TimeDelta::FromSeconds(next); |
initial.commit | 09911bf | 2008-07-26 23:55:29 | [diff] [blame] | 634 | } |
[email protected] | 6e3b12ff | 2009-01-06 22:17:57 | [diff] [blame] | 635 | |
| 636 | void SafeBrowsingProtocolManager::UpdateFinished(bool success) { |
[email protected] | 553dba6 | 2009-02-24 19:08:23 | [diff] [blame] | 637 | UMA_HISTOGRAM_COUNTS("SB2.UpdateSize", update_size_); |
[email protected] | 6e3b12ff | 2009-01-06 22:17:57 | [diff] [blame] | 638 | update_size_ = 0; |
| 639 | sb_service_->UpdateFinished(success); |
[email protected] | 7e242b5 | 2009-02-05 12:31:02 | [diff] [blame] | 640 | } |
[email protected] | 894c4e8 | 2010-06-29 21:53:18 | [diff] [blame] | 641 | |
| 642 | std::string SafeBrowsingProtocolManager::ComposeUrl( |
| 643 | const std::string& prefix, const std::string& method, |
| 644 | const std::string& client_name, const std::string& version, |
| 645 | const std::string& additional_query) { |
| 646 | DCHECK(!prefix.empty() && !method.empty() && |
| 647 | !client_name.empty() && !version.empty()); |
| 648 | std::string url = StringPrintf("%s/%s?client=%s&appver=%s&pver=2.2", |
| 649 | prefix.c_str(), method.c_str(), |
| 650 | client_name.c_str(), version.c_str()); |
| 651 | if (!additional_query.empty()) { |
| 652 | url.append(additional_query); |
| 653 | } |
| 654 | return url; |
| 655 | } |
| 656 | |
| 657 | GURL SafeBrowsingProtocolManager::UpdateUrl(bool use_mac) const { |
| 658 | std::string url = ComposeUrl(info_url_prefix_, "downloads", client_name_, |
| 659 | version_, additional_query_); |
| 660 | if (use_mac) { |
| 661 | url.append("&wrkey="); |
| 662 | url.append(wrapped_key_); |
| 663 | } |
| 664 | return GURL(url); |
| 665 | } |
| 666 | |
| 667 | GURL SafeBrowsingProtocolManager::GetHashUrl(bool use_mac) const { |
| 668 | std::string url= ComposeUrl(info_url_prefix_, "gethash", client_name_, |
| 669 | version_, additional_query_); |
| 670 | if (use_mac) { |
| 671 | url.append("&wrkey="); |
| 672 | url.append(wrapped_key_); |
| 673 | } |
| 674 | return GURL(url); |
| 675 | } |
| 676 | |
| 677 | GURL SafeBrowsingProtocolManager::MacKeyUrl() const { |
| 678 | return GURL(ComposeUrl(mackey_url_prefix_, "newkey", client_name_, version_, |
| 679 | additional_query_)); |
| 680 | } |
| 681 | |
| 682 | GURL SafeBrowsingProtocolManager::MalwareReportUrl( |
| 683 | const GURL& malware_url, const GURL& page_url, |
[email protected] | ee4d3e80 | 2010-08-12 23:59:51 | [diff] [blame] | 684 | const GURL& referrer_url, bool is_subresource) const { |
[email protected] | 894c4e8 | 2010-06-29 21:53:18 | [diff] [blame] | 685 | std::string url = ComposeUrl(info_url_prefix_, "report", client_name_, |
| 686 | version_, additional_query_); |
[email protected] | ee4d3e80 | 2010-08-12 23:59:51 | [diff] [blame] | 687 | return GURL(StringPrintf("%s&evts=malblhit&evtd=%s&evtr=%s&evhr=%s&evtb=%d", |
[email protected] | 894c4e8 | 2010-06-29 21:53:18 | [diff] [blame] | 688 | url.c_str(), EscapeQueryParamValue(malware_url.spec(), true).c_str(), |
| 689 | EscapeQueryParamValue(page_url.spec(), true).c_str(), |
[email protected] | ee4d3e80 | 2010-08-12 23:59:51 | [diff] [blame] | 690 | EscapeQueryParamValue(referrer_url.spec(), true).c_str(), |
| 691 | is_subresource)); |
[email protected] | 894c4e8 | 2010-06-29 21:53:18 | [diff] [blame] | 692 | } |
| 693 | |
| 694 | GURL SafeBrowsingProtocolManager::NextChunkUrl(const std::string& url) const { |
| 695 | std::string next_url; |
| 696 | if (!StartsWithASCII(url, "http://", false) && |
| 697 | !StartsWithASCII(url, "https://", false)) { |
| 698 | next_url = "http://" + url; |
| 699 | } else { |
| 700 | next_url = url; |
| 701 | } |
| 702 | if (!additional_query_.empty()) |
| 703 | next_url += additional_query_; |
| 704 | return GURL(next_url); |
| 705 | } |