[email protected] | 5a3b4d3 | 2011-03-17 01:24:05 | [diff] [blame] | 1 | // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
license.bot | bf09a50 | 2008-08-24 00:55:55 | [diff] [blame] | 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 4 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 5 | #include "net/url_request/url_request_http_job.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 6 | |
[email protected] | 4ed2755f | 2008-12-15 09:01:33 | [diff] [blame] | 7 | #include "base/base_switches.h" |
| 8 | #include "base/command_line.h" |
[email protected] | 39ce5c0 | 2008-08-22 04:03:44 | [diff] [blame] | 9 | #include "base/compiler_specific.h" |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 10 | #include "base/file_util.h" |
| 11 | #include "base/file_version_info.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 12 | #include "base/message_loop.h" |
[email protected] | 8684a881 | 2011-03-22 13:59:38 | [diff] [blame] | 13 | #include "base/metrics/field_trial.h" |
[email protected] | ec23f52 | 2011-02-22 21:01:38 | [diff] [blame] | 14 | #include "base/metrics/histogram.h" |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 15 | #include "base/rand_util.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 16 | #include "base/string_util.h" |
[email protected] | dd29bcd7 | 2011-03-24 00:03:44 | [diff] [blame] | 17 | #include "base/time.h" |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 18 | #include "net/base/cert_status_flags.h" |
[email protected] | 9349cfb | 2010-08-31 18:00:53 | [diff] [blame] | 19 | #include "net/base/cookie_store.h" |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 20 | #include "net/base/filter.h" |
[email protected] | 6d81b48 | 2011-02-22 19:47:19 | [diff] [blame] | 21 | #include "net/base/host_port_pair.h" |
[email protected] | b843072 | 2008-09-17 20:05:44 | [diff] [blame] | 22 | #include "net/base/load_flags.h" |
[email protected] | dd29bcd7 | 2011-03-24 00:03:44 | [diff] [blame] | 23 | #include "net/base/mime_util.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 24 | #include "net/base/net_errors.h" |
| 25 | #include "net/base/net_util.h" |
[email protected] | 636eccd | 2011-06-28 12:28:01 | [diff] [blame] | 26 | #include "net/base/network_delegate.h" |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 27 | #include "net/base/sdch_manager.h" |
[email protected] | 0b45559b | 2009-06-12 21:45:11 | [diff] [blame] | 28 | #include "net/base/ssl_cert_request_info.h" |
[email protected] | ee1edb47 | 2011-05-05 23:31:46 | [diff] [blame] | 29 | #include "net/base/ssl_config_service.h" |
[email protected] | 6d81b48 | 2011-02-22 19:47:19 | [diff] [blame] | 30 | #include "net/base/transport_security_state.h" |
[email protected] | 87c99b6a | 2011-05-13 20:06:48 | [diff] [blame] | 31 | #include "net/http/http_mac_signature.h" |
[email protected] | 8c76ae2 | 2010-04-20 22:15:43 | [diff] [blame] | 32 | #include "net/http/http_request_headers.h" |
[email protected] | 319d9e6f | 2009-02-18 19:47:21 | [diff] [blame] | 33 | #include "net/http/http_response_headers.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 34 | #include "net/http/http_response_info.h" |
| 35 | #include "net/http/http_transaction.h" |
| 36 | #include "net/http/http_transaction_factory.h" |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 37 | #include "net/http/http_util.h" |
[email protected] | be4d55fe | 2010-06-01 13:40:02 | [diff] [blame] | 38 | #include "net/url_request/https_prober.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 39 | #include "net/url_request/url_request.h" |
[email protected] | 319d9e6f | 2009-02-18 19:47:21 | [diff] [blame] | 40 | #include "net/url_request/url_request_context.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 41 | #include "net/url_request/url_request_error_job.h" |
[email protected] | 06965e0 | 2009-09-04 21:36:42 | [diff] [blame] | 42 | #include "net/url_request/url_request_redirect_job.h" |
[email protected] | 6b3f964 | 2010-11-25 02:29:06 | [diff] [blame] | 43 | #include "net/url_request/url_request_throttler_header_adapter.h" |
| 44 | #include "net/url_request/url_request_throttler_manager.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 45 | |
[email protected] | 8c76ae2 | 2010-04-20 22:15:43 | [diff] [blame] | 46 | static const char kAvailDictionaryHeader[] = "Avail-Dictionary"; |
| 47 | |
[email protected] | dd29bcd7 | 2011-03-24 00:03:44 | [diff] [blame] | 48 | // When histogramming results related to SDCH and/or an SDCH latency test, the |
| 49 | // number of packets for which we need to record arrival times so as to |
| 50 | // calculate interpacket latencies. We currently are only looking at the |
| 51 | // first few packets, as we're monitoring the impact of the initial TCP |
| 52 | // congestion window on stalling of transmissions. |
| 53 | static const size_t kSdchPacketHistogramCount = 5; |
| 54 | |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 55 | namespace net { |
| 56 | |
| 57 | namespace { |
| 58 | |
[email protected] | 87c99b6a | 2011-05-13 20:06:48 | [diff] [blame] | 59 | void AddAuthorizationHeader( |
| 60 | const std::vector<CookieStore::CookieInfo>& cookie_infos, |
| 61 | HttpRequestInfo* request_info) { |
| 62 | const GURL& url = request_info->url; |
| 63 | const std::string& method = request_info->method; |
| 64 | std::string request_uri = HttpUtil::PathForRequest(url); |
| 65 | const std::string& host = url.host(); |
| 66 | int port = url.EffectiveIntPort(); |
| 67 | for (size_t i = 0; i < cookie_infos.size(); ++i) { |
| 68 | HttpMacSignature signature; |
| 69 | if (!signature.AddStateInfo(cookie_infos[i].name, |
[email protected] | c416fa8 | 2011-05-16 22:18:41 | [diff] [blame] | 70 | cookie_infos[i].creation_date, |
[email protected] | 87c99b6a | 2011-05-13 20:06:48 | [diff] [blame] | 71 | cookie_infos[i].mac_key, |
| 72 | cookie_infos[i].mac_algorithm)) { |
| 73 | continue; |
| 74 | } |
| 75 | if (!signature.AddHttpInfo(method, request_uri, host, port)) |
| 76 | continue; |
| 77 | request_info->extra_headers.SetHeader( |
| 78 | HttpRequestHeaders::kAuthorization, |
| 79 | signature.GenerateAuthorizationHeader()); |
| 80 | return; // Only add the first valid header. |
| 81 | } |
| 82 | } |
| 83 | |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 84 | class HTTPSProberDelegateImpl : public HTTPSProberDelegate { |
| 85 | public: |
| 86 | HTTPSProberDelegateImpl(const std::string& host, int max_age, |
| 87 | bool include_subdomains, |
| 88 | TransportSecurityState* sts) |
| 89 | : host_(host), |
| 90 | max_age_(max_age), |
| 91 | include_subdomains_(include_subdomains), |
| 92 | sts_(sts) { } |
| 93 | |
| 94 | virtual void ProbeComplete(bool result) { |
| 95 | if (result) { |
| 96 | base::Time current_time(base::Time::Now()); |
| 97 | base::TimeDelta max_age_delta = base::TimeDelta::FromSeconds(max_age_); |
| 98 | |
| 99 | TransportSecurityState::DomainState domain_state; |
| 100 | domain_state.expiry = current_time + max_age_delta; |
| 101 | domain_state.mode = |
| 102 | TransportSecurityState::DomainState::MODE_OPPORTUNISTIC; |
| 103 | domain_state.include_subdomains = include_subdomains_; |
| 104 | |
| 105 | sts_->EnableHost(host_, domain_state); |
| 106 | } |
| 107 | |
| 108 | delete this; |
| 109 | } |
| 110 | |
| 111 | private: |
| 112 | const std::string host_; |
| 113 | const int max_age_; |
| 114 | const bool include_subdomains_; |
| 115 | scoped_refptr<TransportSecurityState> sts_; |
| 116 | }; |
| 117 | |
| 118 | } // namespace |
| 119 | |
[email protected] | 2e92354c | 2011-03-25 20:49:53 | [diff] [blame] | 120 | class URLRequestHttpJob::HttpFilterContext : public FilterContext { |
| 121 | public: |
| 122 | explicit HttpFilterContext(URLRequestHttpJob* job); |
| 123 | virtual ~HttpFilterContext(); |
| 124 | |
| 125 | // FilterContext implementation. |
| 126 | virtual bool GetMimeType(std::string* mime_type) const; |
| 127 | virtual bool GetURL(GURL* gurl) const; |
| 128 | virtual base::Time GetRequestTime() const; |
| 129 | virtual bool IsCachedContent() const; |
| 130 | virtual bool IsDownload() const; |
| 131 | virtual bool IsSdchResponse() const; |
| 132 | virtual int64 GetByteReadCount() const; |
| 133 | virtual int GetResponseCode() const; |
| 134 | virtual void RecordPacketStats(StatisticSelector statistic) const; |
| 135 | |
[email protected] | 46668fe5 | 2011-05-04 19:03:23 | [diff] [blame] | 136 | // Method to allow us to reset filter context for a response that should have |
| 137 | // been SDCH encoded when there is an update due to an explicit HTTP header. |
| 138 | void ResetSdchResponseToFalse(); |
| 139 | |
[email protected] | 2e92354c | 2011-03-25 20:49:53 | [diff] [blame] | 140 | private: |
| 141 | URLRequestHttpJob* job_; |
| 142 | |
| 143 | DISALLOW_COPY_AND_ASSIGN(HttpFilterContext); |
| 144 | }; |
| 145 | |
[email protected] | fc01f23 | 2011-03-17 19:06:01 | [diff] [blame] | 146 | URLRequestHttpJob::HttpFilterContext::HttpFilterContext(URLRequestHttpJob* job) |
| 147 | : job_(job) { |
| 148 | DCHECK(job_); |
| 149 | } |
| 150 | |
| 151 | URLRequestHttpJob::HttpFilterContext::~HttpFilterContext() { |
| 152 | } |
| 153 | |
| 154 | bool URLRequestHttpJob::HttpFilterContext::GetMimeType( |
| 155 | std::string* mime_type) const { |
| 156 | return job_->GetMimeType(mime_type); |
| 157 | } |
| 158 | |
| 159 | bool URLRequestHttpJob::HttpFilterContext::GetURL(GURL* gurl) const { |
| 160 | if (!job_->request()) |
| 161 | return false; |
| 162 | *gurl = job_->request()->url(); |
| 163 | return true; |
| 164 | } |
| 165 | |
| 166 | base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const { |
| 167 | return job_->request() ? job_->request()->request_time() : base::Time(); |
| 168 | } |
| 169 | |
| 170 | bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const { |
[email protected] | dd29bcd7 | 2011-03-24 00:03:44 | [diff] [blame] | 171 | return job_->is_cached_content_; |
[email protected] | fc01f23 | 2011-03-17 19:06:01 | [diff] [blame] | 172 | } |
| 173 | |
| 174 | bool URLRequestHttpJob::HttpFilterContext::IsDownload() const { |
| 175 | return (job_->request_info_.load_flags & LOAD_IS_DOWNLOAD) != 0; |
| 176 | } |
| 177 | |
[email protected] | 46668fe5 | 2011-05-04 19:03:23 | [diff] [blame] | 178 | void URLRequestHttpJob::HttpFilterContext::ResetSdchResponseToFalse() { |
| 179 | DCHECK(job_->sdch_dictionary_advertised_); |
| 180 | job_->sdch_dictionary_advertised_ = false; |
| 181 | } |
| 182 | |
[email protected] | fc01f23 | 2011-03-17 19:06:01 | [diff] [blame] | 183 | bool URLRequestHttpJob::HttpFilterContext::IsSdchResponse() const { |
| 184 | return job_->sdch_dictionary_advertised_; |
| 185 | } |
| 186 | |
| 187 | int64 URLRequestHttpJob::HttpFilterContext::GetByteReadCount() const { |
| 188 | return job_->filter_input_byte_count(); |
| 189 | } |
| 190 | |
| 191 | int URLRequestHttpJob::HttpFilterContext::GetResponseCode() const { |
| 192 | return job_->GetResponseCode(); |
| 193 | } |
| 194 | |
| 195 | void URLRequestHttpJob::HttpFilterContext::RecordPacketStats( |
| 196 | StatisticSelector statistic) const { |
| 197 | job_->RecordPacketStats(statistic); |
| 198 | } |
| 199 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 200 | // TODO(darin): make sure the port blocking code is not lost |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 201 | // static |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 202 | URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request, |
| 203 | const std::string& scheme) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 204 | DCHECK(scheme == "http" || scheme == "https"); |
| 205 | |
[email protected] | bcb84f8b | 2009-08-31 16:20:14 | [diff] [blame] | 206 | int port = request->url().IntPort(); |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 207 | if (!IsPortAllowedByDefault(port) && !IsPortAllowedByOverride(port)) |
| 208 | return new URLRequestErrorJob(request, ERR_UNSAFE_PORT); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 209 | |
| 210 | if (!request->context() || |
| 211 | !request->context()->http_transaction_factory()) { |
| 212 | NOTREACHED() << "requires a valid context"; |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 213 | return new URLRequestErrorJob(request, ERR_INVALID_ARGUMENT); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 214 | } |
| 215 | |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 216 | TransportSecurityState::DomainState domain_state; |
[email protected] | 90b15301 | 2009-09-10 18:35:16 | [diff] [blame] | 217 | if (scheme == "http" && |
[email protected] | 326e679 | 2009-12-11 21:04:42 | [diff] [blame] | 218 | request->context()->transport_security_state() && |
| 219 | request->context()->transport_security_state()->IsEnabledForHost( |
[email protected] | b7f9fb2 | 2011-04-09 20:28:47 | [diff] [blame] | 220 | &domain_state, |
| 221 | request->url().host(), |
[email protected] | 2b83813 | 2011-05-05 22:00:24 | [diff] [blame] | 222 | SSLConfigService::IsSNIAvailable( |
| 223 | request->context()->ssl_config_service()))) { |
[email protected] | 326e679 | 2009-12-11 21:04:42 | [diff] [blame] | 224 | if (domain_state.mode == |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 225 | TransportSecurityState::DomainState::MODE_STRICT) { |
[email protected] | 326e679 | 2009-12-11 21:04:42 | [diff] [blame] | 226 | DCHECK_EQ(request->url().scheme(), "http"); |
| 227 | url_canon::Replacements<char> replacements; |
| 228 | static const char kNewScheme[] = "https"; |
| 229 | replacements.SetScheme(kNewScheme, |
| 230 | url_parse::Component(0, strlen(kNewScheme))); |
| 231 | GURL new_location = request->url().ReplaceComponents(replacements); |
| 232 | return new URLRequestRedirectJob(request, new_location); |
| 233 | } else { |
| 234 | // TODO(agl): implement opportunistic HTTPS upgrade. |
| 235 | } |
[email protected] | 06965e0 | 2009-09-04 21:36:42 | [diff] [blame] | 236 | } |
[email protected] | 4ed2755f | 2008-12-15 09:01:33 | [diff] [blame] | 237 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 238 | return new URLRequestHttpJob(request); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 239 | } |
| 240 | |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 241 | |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 242 | URLRequestHttpJob::URLRequestHttpJob(URLRequest* request) |
| 243 | : URLRequestJob(request), |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 244 | response_info_(NULL), |
[email protected] | 3460228 | 2010-02-03 22:14:15 | [diff] [blame] | 245 | response_cookies_save_index_(0), |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 246 | proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH), |
| 247 | server_auth_state_(AUTH_STATE_DONT_NEED_AUTH), |
[email protected] | 3460228 | 2010-02-03 22:14:15 | [diff] [blame] | 248 | ALLOW_THIS_IN_INITIALIZER_LIST(start_callback_( |
| 249 | this, &URLRequestHttpJob::OnStartCompleted)), |
| 250 | ALLOW_THIS_IN_INITIALIZER_LIST(read_callback_( |
| 251 | this, &URLRequestHttpJob::OnReadCompleted)), |
[email protected] | 636eccd | 2011-06-28 12:28:01 | [diff] [blame] | 252 | ALLOW_THIS_IN_INITIALIZER_LIST(notify_before_headers_sent_callback_( |
| 253 | this, &URLRequestHttpJob::NotifyBeforeSendHeadersCallback)), |
[email protected] | 3589e55 | 2008-08-20 23:11:34 | [diff] [blame] | 254 | read_in_progress_(false), |
[email protected] | 2aecf738 | 2009-06-17 04:14:27 | [diff] [blame] | 255 | transaction_(NULL), |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 256 | throttling_entry_(URLRequestThrottlerManager::GetInstance()-> |
[email protected] | 6b3f964 | 2010-11-25 02:29:06 | [diff] [blame] | 257 | RegisterRequestUrl(request->url())), |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 258 | sdch_dictionary_advertised_(false), |
| 259 | sdch_test_activated_(false), |
[email protected] | d8fd513 | 2009-05-15 01:06:53 | [diff] [blame] | 260 | sdch_test_control_(false), |
[email protected] | 00e48bf | 2010-12-03 06:15:42 | [diff] [blame] | 261 | is_cached_content_(false), |
[email protected] | ec23f52 | 2011-02-22 21:01:38 | [diff] [blame] | 262 | request_creation_time_(), |
[email protected] | dd29bcd7 | 2011-03-24 00:03:44 | [diff] [blame] | 263 | packet_timing_enabled_(false), |
[email protected] | bbaea8f | 2011-06-24 00:11:01 | [diff] [blame] | 264 | done_(false), |
[email protected] | dd29bcd7 | 2011-03-24 00:03:44 | [diff] [blame] | 265 | bytes_observed_in_packets_(0), |
| 266 | packet_times_(), |
| 267 | request_time_snapshot_(), |
| 268 | final_packet_time_(), |
| 269 | observed_packet_count_(0), |
[email protected] | 2e92354c | 2011-03-25 20:49:53 | [diff] [blame] | 270 | ALLOW_THIS_IN_INITIALIZER_LIST( |
| 271 | filter_context_(new HttpFilterContext(this))), |
[email protected] | 00e48bf | 2010-12-03 06:15:42 | [diff] [blame] | 272 | ALLOW_THIS_IN_INITIALIZER_LIST(method_factory_(this)) { |
[email protected] | ec23f52 | 2011-02-22 21:01:38 | [diff] [blame] | 273 | ResetTimer(); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 274 | } |
| 275 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 276 | void URLRequestHttpJob::NotifyHeadersComplete() { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 277 | DCHECK(!response_info_); |
| 278 | |
| 279 | response_info_ = transaction_->GetResponseInfo(); |
| 280 | |
[email protected] | d8fd513 | 2009-05-15 01:06:53 | [diff] [blame] | 281 | // Save boolean, as we'll need this info at destruction time, and filters may |
| 282 | // also need this info. |
| 283 | is_cached_content_ = response_info_->was_cached; |
| 284 | |
[email protected] | 6b3f964 | 2010-11-25 02:29:06 | [diff] [blame] | 285 | if (!is_cached_content_) { |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 286 | URLRequestThrottlerHeaderAdapter response_adapter( |
[email protected] | 6b3f964 | 2010-11-25 02:29:06 | [diff] [blame] | 287 | response_info_->headers); |
[email protected] | 2fd33ee9 | 2011-03-25 22:30:21 | [diff] [blame] | 288 | throttling_entry_->UpdateWithResponse(request_info_.url.host(), |
| 289 | &response_adapter); |
[email protected] | 6b3f964 | 2010-11-25 02:29:06 | [diff] [blame] | 290 | } |
| 291 | |
[email protected] | 77f6fb43 | 2009-09-05 14:21:09 | [diff] [blame] | 292 | ProcessStrictTransportSecurityHeader(); |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 293 | |
[email protected] | fe21987 | 2008-09-23 02:17:00 | [diff] [blame] | 294 | if (SdchManager::Global() && |
| 295 | SdchManager::Global()->IsInSupportedDomain(request_->url())) { |
[email protected] | c7bef94c | 2011-06-21 18:05:51 | [diff] [blame] | 296 | static const std::string name = "Get-Dictionary"; |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 297 | std::string url_text; |
| 298 | void* iter = NULL; |
| 299 | // TODO(jar): We need to not fetch dictionaries the first time they are |
| 300 | // seen, but rather wait until we can justify their usefulness. |
| 301 | // For now, we will only fetch the first dictionary, which will at least |
| 302 | // require multiple suggestions before we get additional ones for this site. |
| 303 | // Eventually we should wait until a dictionary is requested several times |
| 304 | // before we even download it (so that we don't waste memory or bandwidth). |
[email protected] | c7bef94c | 2011-06-21 18:05:51 | [diff] [blame] | 305 | if (response_info_->headers->EnumerateHeader(&iter, name, &url_text)) { |
[email protected] | d55ad15d | 2009-02-17 19:40:50 | [diff] [blame] | 306 | // request_->url() won't be valid in the destructor, so we use an |
| 307 | // alternate copy. |
[email protected] | dd29bcd7 | 2011-03-24 00:03:44 | [diff] [blame] | 308 | DCHECK_EQ(request_->url(), request_info_.url); |
[email protected] | d55ad15d | 2009-02-17 19:40:50 | [diff] [blame] | 309 | // Resolve suggested URL relative to request url. |
| 310 | sdch_dictionary_url_ = request_info_.url.Resolve(url_text); |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 311 | } |
| 312 | } |
| 313 | |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 314 | // The HTTP transaction may be restarted several times for the purposes |
| 315 | // of sending authorization information. Each time it restarts, we get |
| 316 | // notified of the headers completion so that we can update the cookie store. |
| 317 | if (transaction_->IsReadyToRestartForAuth()) { |
| 318 | DCHECK(!response_info_->auth_challenge.get()); |
[email protected] | 87a09a9 | 2011-07-14 15:50:50 | [diff] [blame^] | 319 | // TODO(battre): This breaks the webrequest API for |
| 320 | // URLRequestTestHTTP.BasicAuthWithCookies |
| 321 | // where OnBeforeSendHeaders -> OnRequestSent -> OnBeforeSendHeaders |
| 322 | // occurs. |
[email protected] | 13c8a09 | 2010-07-29 06:15:44 | [diff] [blame] | 323 | RestartTransactionWithAuth(string16(), string16()); |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 324 | return; |
| 325 | } |
| 326 | |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 327 | URLRequestJob::NotifyHeadersComplete(); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 328 | } |
| 329 | |
[email protected] | 85c1dce | 2011-07-06 12:01:29 | [diff] [blame] | 330 | void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) { |
[email protected] | bbaea8f | 2011-06-24 00:11:01 | [diff] [blame] | 331 | DoneWithRequest(FINISHED); |
[email protected] | dd29bcd7 | 2011-03-24 00:03:44 | [diff] [blame] | 332 | URLRequestJob::NotifyDone(status); |
| 333 | } |
| 334 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 335 | void URLRequestHttpJob::DestroyTransaction() { |
[email protected] | c6a4eb9 | 2010-03-03 23:51:19 | [diff] [blame] | 336 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 337 | |
[email protected] | bbaea8f | 2011-06-24 00:11:01 | [diff] [blame] | 338 | DoneWithRequest(ABORTED); |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 339 | transaction_.reset(); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 340 | response_info_ = NULL; |
[email protected] | fa4332d | 2010-11-23 09:59:09 | [diff] [blame] | 341 | context_ = NULL; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 342 | } |
| 343 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 344 | void URLRequestHttpJob::StartTransaction() { |
[email protected] | 636eccd | 2011-06-28 12:28:01 | [diff] [blame] | 345 | if (request_->context() && request_->context()->network_delegate()) { |
| 346 | int rv = request_->context()->network_delegate()->NotifyBeforeSendHeaders( |
| 347 | request_, ¬ify_before_headers_sent_callback_, |
| 348 | &request_info_.extra_headers); |
| 349 | // If an extension blocks the request, we rely on the callback to |
| 350 | // StartTransactionInternal(). |
| 351 | if (rv == ERR_IO_PENDING) { |
| 352 | request_->net_log().BeginEvent( |
| 353 | NetLog::TYPE_URL_REQUEST_BLOCKED_ON_DELEGATE, NULL); |
| 354 | return; |
| 355 | } |
| 356 | } |
| 357 | StartTransactionInternal(); |
| 358 | } |
| 359 | |
| 360 | void URLRequestHttpJob::NotifyBeforeSendHeadersCallback(int result) { |
| 361 | request_->net_log().EndEvent( |
| 362 | NetLog::TYPE_URL_REQUEST_BLOCKED_ON_DELEGATE, NULL); |
| 363 | |
| 364 | if (result == OK) { |
| 365 | StartTransactionInternal(); |
| 366 | } else { |
| 367 | // TODO(battre): Allow passing information of the extension that canceled |
| 368 | // the event. |
| 369 | request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, |
| 370 | make_scoped_refptr(new NetLogStringParameter("source", "delegate"))); |
| 371 | NotifyCanceled(); |
| 372 | } |
| 373 | } |
| 374 | |
| 375 | void URLRequestHttpJob::StartTransactionInternal() { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 376 | // NOTE: This method assumes that request_info_ is already setup properly. |
| 377 | |
[email protected] | 3460228 | 2010-02-03 22:14:15 | [diff] [blame] | 378 | // If we already have a transaction, then we should restart the transaction |
| 379 | // with auth provided by username_ and password_. |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 380 | |
[email protected] | 99c0790 | 2010-08-17 18:59:52 | [diff] [blame] | 381 | int rv; |
[email protected] | 6b3f964 | 2010-11-25 02:29:06 | [diff] [blame] | 382 | |
[email protected] | 3460228 | 2010-02-03 22:14:15 | [diff] [blame] | 383 | if (transaction_.get()) { |
[email protected] | 99c0790 | 2010-08-17 18:59:52 | [diff] [blame] | 384 | rv = transaction_->RestartWithAuth(username_, password_, &start_callback_); |
[email protected] | 3460228 | 2010-02-03 22:14:15 | [diff] [blame] | 385 | username_.clear(); |
| 386 | password_.clear(); |
| 387 | } else { |
| 388 | DCHECK(request_->context()); |
| 389 | DCHECK(request_->context()->http_transaction_factory()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 390 | |
[email protected] | 99c0790 | 2010-08-17 18:59:52 | [diff] [blame] | 391 | rv = request_->context()->http_transaction_factory()->CreateTransaction( |
| 392 | &transaction_); |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 393 | if (rv == OK) { |
[email protected] | 227b0e8 | 2011-03-25 21:11:53 | [diff] [blame] | 394 | if (!URLRequestThrottlerManager::GetInstance()->enforce_throttling() || |
| 395 | !throttling_entry_->IsDuringExponentialBackoff()) { |
| 396 | rv = transaction_->Start( |
| 397 | &request_info_, &start_callback_, request_->net_log()); |
[email protected] | bbaea8f | 2011-06-24 00:11:01 | [diff] [blame] | 398 | start_time_ = base::TimeTicks::Now(); |
[email protected] | 227b0e8 | 2011-03-25 21:11:53 | [diff] [blame] | 399 | } else { |
| 400 | // Special error code for the exponential back-off module. |
| 401 | rv = ERR_TEMPORARILY_THROTTLED; |
| 402 | } |
[email protected] | fa4332d | 2010-11-23 09:59:09 | [diff] [blame] | 403 | // Make sure the context is alive for the duration of the |
| 404 | // transaction. |
| 405 | context_ = request_->context(); |
[email protected] | 3460228 | 2010-02-03 22:14:15 | [diff] [blame] | 406 | } |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 407 | } |
| 408 | |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 409 | if (rv == ERR_IO_PENDING) |
[email protected] | 3460228 | 2010-02-03 22:14:15 | [diff] [blame] | 410 | return; |
| 411 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 412 | // The transaction started synchronously, but we need to notify the |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 413 | // URLRequest delegate via the message loop. |
[email protected] | 00e48bf | 2010-12-03 06:15:42 | [diff] [blame] | 414 | MessageLoop::current()->PostTask( |
| 415 | FROM_HERE, |
| 416 | method_factory_.NewRunnableMethod( |
| 417 | &URLRequestHttpJob::OnStartCompleted, rv)); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 418 | } |
| 419 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 420 | void URLRequestHttpJob::AddExtraHeaders() { |
[email protected] | c7bef94c | 2011-06-21 18:05:51 | [diff] [blame] | 421 | // Supply Accept-Encoding field only if it is not already provided. |
| 422 | // It should be provided IF the content is known to have restrictions on |
| 423 | // potential encoding, such as streaming multi-media. |
| 424 | // For details see bug 47381. |
| 425 | // TODO(jar, enal): jpeg files etc. should set up a request header if |
| 426 | // possible. Right now it is done only by buffered_resource_loader and |
| 427 | // simple_data_source. |
| 428 | if (!request_info_.extra_headers.HasHeader( |
| 429 | HttpRequestHeaders::kAcceptEncoding)) { |
| 430 | bool advertise_sdch = SdchManager::Global() && |
| 431 | SdchManager::Global()->IsInSupportedDomain(request_->url()); |
| 432 | std::string avail_dictionaries; |
| 433 | if (advertise_sdch) { |
| 434 | SdchManager::Global()->GetAvailDictionaryList(request_->url(), |
| 435 | &avail_dictionaries); |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 436 | |
[email protected] | c7bef94c | 2011-06-21 18:05:51 | [diff] [blame] | 437 | // The AllowLatencyExperiment() is only true if we've successfully done a |
| 438 | // full SDCH compression recently in this browser session for this host. |
| 439 | // Note that for this path, there might be no applicable dictionaries, |
| 440 | // and hence we can't participate in the experiment. |
| 441 | if (!avail_dictionaries.empty() && |
| 442 | SdchManager::Global()->AllowLatencyExperiment(request_->url())) { |
| 443 | // We are participating in the test (or control), and hence we'll |
| 444 | // eventually record statistics via either SDCH_EXPERIMENT_DECODE or |
| 445 | // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data. |
| 446 | packet_timing_enabled_ = true; |
| 447 | if (base::RandDouble() < .01) { |
| 448 | sdch_test_control_ = true; // 1% probability. |
| 449 | advertise_sdch = false; |
| 450 | } else { |
| 451 | sdch_test_activated_ = true; |
| 452 | } |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 453 | } |
| 454 | } |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 455 | |
[email protected] | c7bef94c | 2011-06-21 18:05:51 | [diff] [blame] | 456 | // Supply Accept-Encoding headers first so that it is more likely that they |
| 457 | // will be in the first transmitted packet. This can sometimes make it |
| 458 | // easier to filter and analyze the streams to assure that a proxy has not |
| 459 | // damaged these headers. Some proxies deliberately corrupt Accept-Encoding |
| 460 | // headers. |
| 461 | if (!advertise_sdch) { |
| 462 | // Tell the server what compression formats we support (other than SDCH). |
[email protected] | 8c76ae2 | 2010-04-20 22:15:43 | [diff] [blame] | 463 | request_info_.extra_headers.SetHeader( |
[email protected] | c7bef94c | 2011-06-21 18:05:51 | [diff] [blame] | 464 | HttpRequestHeaders::kAcceptEncoding, "gzip,deflate"); |
| 465 | } else { |
| 466 | // Include SDCH in acceptable list. |
| 467 | request_info_.extra_headers.SetHeader( |
| 468 | HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch"); |
| 469 | if (!avail_dictionaries.empty()) { |
| 470 | request_info_.extra_headers.SetHeader( |
| 471 | kAvailDictionaryHeader, |
| 472 | avail_dictionaries); |
| 473 | sdch_dictionary_advertised_ = true; |
| 474 | // Since we're tagging this transaction as advertising a dictionary, |
| 475 | // we'll definitely employ an SDCH filter (or tentative sdch filter) |
| 476 | // when we get a response. When done, we'll record histograms via |
| 477 | // SDCH_DECODE or SDCH_PASSTHROUGH. Hence we need to record packet |
| 478 | // arrival times. |
| 479 | packet_timing_enabled_ = true; |
| 480 | } |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 481 | } |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 482 | } |
| 483 | |
[email protected] | ede9666 | 2011-07-14 12:34:18 | [diff] [blame] | 484 | const URLRequestContext* context = request_->context(); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 485 | if (context) { |
[email protected] | 09a362d3 | 2009-09-24 18:01:33 | [diff] [blame] | 486 | // Only add default Accept-Language and Accept-Charset if the request |
| 487 | // didn't have them specified. |
[email protected] | 6dae6b8 | 2011-03-30 00:35:34 | [diff] [blame] | 488 | if (!context->accept_language().empty()) { |
| 489 | request_info_.extra_headers.SetHeaderIfMissing( |
| 490 | HttpRequestHeaders::kAcceptLanguage, |
| 491 | context->accept_language()); |
| 492 | } |
| 493 | if (!context->accept_charset().empty()) { |
| 494 | request_info_.extra_headers.SetHeaderIfMissing( |
| 495 | HttpRequestHeaders::kAcceptCharset, |
| 496 | context->accept_charset()); |
| 497 | } |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 498 | } |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 499 | } |
| 500 | |
[email protected] | 3460228 | 2010-02-03 22:14:15 | [diff] [blame] | 501 | void URLRequestHttpJob::AddCookieHeaderAndStart() { |
| 502 | // No matter what, we want to report our status as IO pending since we will |
| 503 | // be notifying our consumer asynchronously via OnStartCompleted. |
| 504 | SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
[email protected] | 861fcd5 | 2009-08-26 02:33:46 | [diff] [blame] | 505 | |
[email protected] | ed24fad | 2011-05-10 22:44:01 | [diff] [blame] | 506 | // If the request was destroyed, then there is no more work to do. |
| 507 | if (!request_) |
| 508 | return; |
[email protected] | 3460228 | 2010-02-03 22:14:15 | [diff] [blame] | 509 | |
[email protected] | ed24fad | 2011-05-10 22:44:01 | [diff] [blame] | 510 | bool allow = true; |
[email protected] | a83dd33 | 2011-07-13 10:41:01 | [diff] [blame] | 511 | if ((request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES) || |
| 512 | !CanGetCookies()) { |
[email protected] | ed24fad | 2011-05-10 22:44:01 | [diff] [blame] | 513 | allow = false; |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 514 | } |
[email protected] | 3460228 | 2010-02-03 22:14:15 | [diff] [blame] | 515 | |
[email protected] | 9025016c | 2011-05-12 15:51:23 | [diff] [blame] | 516 | if (request_->context()->cookie_store() && allow) { |
| 517 | CookieOptions options; |
| 518 | options.set_include_httponly(); |
[email protected] | 87c99b6a | 2011-05-13 20:06:48 | [diff] [blame] | 519 | std::string cookie_line; |
| 520 | std::vector<CookieStore::CookieInfo> cookie_infos; |
| 521 | request_->context()->cookie_store()->GetCookiesWithInfo( |
| 522 | request_->url(), options, &cookie_line, &cookie_infos); |
| 523 | if (!cookie_line.empty()) { |
[email protected] | 9025016c | 2011-05-12 15:51:23 | [diff] [blame] | 524 | request_info_.extra_headers.SetHeader( |
[email protected] | 87c99b6a | 2011-05-13 20:06:48 | [diff] [blame] | 525 | HttpRequestHeaders::kCookie, cookie_line); |
[email protected] | 9025016c | 2011-05-12 15:51:23 | [diff] [blame] | 526 | } |
[email protected] | 87c99b6a | 2011-05-13 20:06:48 | [diff] [blame] | 527 | if (URLRequest::AreMacCookiesEnabled()) |
| 528 | AddAuthorizationHeader(cookie_infos, &request_info_); |
[email protected] | 9025016c | 2011-05-12 15:51:23 | [diff] [blame] | 529 | } |
| 530 | // We may have been canceled within CanGetCookies. |
| 531 | if (GetStatus().is_success()) { |
| 532 | StartTransaction(); |
| 533 | } else { |
| 534 | NotifyCanceled(); |
| 535 | } |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 536 | } |
| 537 | |
[email protected] | 3460228 | 2010-02-03 22:14:15 | [diff] [blame] | 538 | void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete() { |
| 539 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 540 | |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 541 | const HttpResponseInfo* response_info = transaction_->GetResponseInfo(); |
[email protected] | 3460228 | 2010-02-03 22:14:15 | [diff] [blame] | 542 | DCHECK(response_info); |
| 543 | |
| 544 | response_cookies_.clear(); |
| 545 | response_cookies_save_index_ = 0; |
| 546 | |
| 547 | FetchResponseCookies(response_info, &response_cookies_); |
| 548 | |
| 549 | // Now, loop over the response cookies, and attempt to persist each. |
| 550 | SaveNextCookie(); |
| 551 | } |
| 552 | |
| 553 | void URLRequestHttpJob::SaveNextCookie() { |
| 554 | if (response_cookies_save_index_ == response_cookies_.size()) { |
| 555 | response_cookies_.clear(); |
| 556 | response_cookies_save_index_ = 0; |
| 557 | SetStatus(URLRequestStatus()); // Clear the IO_PENDING status |
| 558 | NotifyHeadersComplete(); |
| 559 | return; |
| 560 | } |
| 561 | |
| 562 | // No matter what, we want to report our status as IO pending since we will |
| 563 | // be notifying our consumer asynchronously via OnStartCompleted. |
| 564 | SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
| 565 | |
[email protected] | ed24fad | 2011-05-10 22:44:01 | [diff] [blame] | 566 | CookieOptions options; |
[email protected] | 7c75b4c | 2011-07-02 14:38:05 | [diff] [blame] | 567 | if (!(request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) && |
[email protected] | a83dd33 | 2011-07-13 10:41:01 | [diff] [blame] | 568 | request_->context()->cookie_store()) { |
[email protected] | ed24fad | 2011-05-10 22:44:01 | [diff] [blame] | 569 | CookieOptions options; |
| 570 | options.set_include_httponly(); |
[email protected] | a83dd33 | 2011-07-13 10:41:01 | [diff] [blame] | 571 | if (CanSetCookie( |
| 572 | response_cookies_[response_cookies_save_index_], &options)) { |
[email protected] | ed24fad | 2011-05-10 22:44:01 | [diff] [blame] | 573 | request_->context()->cookie_store()->SetCookieWithOptions( |
| 574 | request_->url(), response_cookies_[response_cookies_save_index_], |
| 575 | options); |
| 576 | } |
[email protected] | 3460228 | 2010-02-03 22:14:15 | [diff] [blame] | 577 | } |
| 578 | |
[email protected] | 9025016c | 2011-05-12 15:51:23 | [diff] [blame] | 579 | response_cookies_save_index_++; |
| 580 | // We may have been canceled within OnSetCookie. |
| 581 | if (GetStatus().is_success()) { |
| 582 | SaveNextCookie(); |
| 583 | } else { |
| 584 | NotifyCanceled(); |
| 585 | } |
[email protected] | 3460228 | 2010-02-03 22:14:15 | [diff] [blame] | 586 | } |
| 587 | |
| 588 | void URLRequestHttpJob::FetchResponseCookies( |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 589 | const HttpResponseInfo* response_info, |
[email protected] | 3460228 | 2010-02-03 22:14:15 | [diff] [blame] | 590 | std::vector<std::string>* cookies) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 591 | std::string name = "Set-Cookie"; |
| 592 | std::string value; |
| 593 | |
| 594 | void* iter = NULL; |
[email protected] | 2adf288 | 2010-09-27 08:30:37 | [diff] [blame] | 595 | while (response_info->headers->EnumerateHeader(&iter, name, &value)) { |
| 596 | if (!value.empty()) |
| 597 | cookies->push_back(value); |
| 598 | } |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 599 | } |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 600 | |
[email protected] | 77f6fb43 | 2009-09-05 14:21:09 | [diff] [blame] | 601 | void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() { |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 602 | DCHECK(response_info_); |
| 603 | |
[email protected] | ede9666 | 2011-07-14 12:34:18 | [diff] [blame] | 604 | const URLRequestContext* ctx = request_->context(); |
[email protected] | 326e679 | 2009-12-11 21:04:42 | [diff] [blame] | 605 | if (!ctx || !ctx->transport_security_state()) |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 606 | return; |
| 607 | |
[email protected] | 326e679 | 2009-12-11 21:04:42 | [diff] [blame] | 608 | const bool https = response_info_->ssl_info.is_valid(); |
| 609 | const bool valid_https = |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 610 | https && !IsCertStatusError(response_info_->ssl_info.cert_status); |
[email protected] | 326e679 | 2009-12-11 21:04:42 | [diff] [blame] | 611 | |
[email protected] | 77f6fb43 | 2009-09-05 14:21:09 | [diff] [blame] | 612 | std::string name = "Strict-Transport-Security"; |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 613 | std::string value; |
| 614 | |
[email protected] | 326e679 | 2009-12-11 21:04:42 | [diff] [blame] | 615 | int max_age; |
| 616 | bool include_subdomains; |
| 617 | |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 618 | void* iter = NULL; |
[email protected] | 77f6fb43 | 2009-09-05 14:21:09 | [diff] [blame] | 619 | while (response_info_->headers->EnumerateHeader(&iter, name, &value)) { |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 620 | const bool ok = TransportSecurityState::ParseHeader( |
[email protected] | 326e679 | 2009-12-11 21:04:42 | [diff] [blame] | 621 | value, &max_age, &include_subdomains); |
| 622 | if (!ok) |
| 623 | continue; |
| 624 | // We will only accept strict mode if we saw the header from an HTTPS |
| 625 | // connection with no certificate problems. |
| 626 | if (!valid_https) |
| 627 | continue; |
| 628 | base::Time current_time(base::Time::Now()); |
| 629 | base::TimeDelta max_age_delta = base::TimeDelta::FromSeconds(max_age); |
| 630 | |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 631 | TransportSecurityState::DomainState domain_state; |
[email protected] | 326e679 | 2009-12-11 21:04:42 | [diff] [blame] | 632 | domain_state.expiry = current_time + max_age_delta; |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 633 | domain_state.mode = TransportSecurityState::DomainState::MODE_STRICT; |
[email protected] | 326e679 | 2009-12-11 21:04:42 | [diff] [blame] | 634 | domain_state.include_subdomains = include_subdomains; |
| 635 | |
| 636 | ctx->transport_security_state()->EnableHost(request_info_.url.host(), |
| 637 | domain_state); |
| 638 | } |
| 639 | |
| 640 | // TODO(agl): change this over when we have fixed things at the server end. |
| 641 | // The string should be "Opportunistic-Transport-Security"; |
| 642 | name = "X-Bodge-Transport-Security"; |
| 643 | |
| 644 | while (response_info_->headers->EnumerateHeader(&iter, name, &value)) { |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 645 | const bool ok = TransportSecurityState::ParseHeader( |
[email protected] | 326e679 | 2009-12-11 21:04:42 | [diff] [blame] | 646 | value, &max_age, &include_subdomains); |
| 647 | if (!ok) |
| 648 | continue; |
| 649 | // If we saw an opportunistic request over HTTPS, then clearly we can make |
| 650 | // HTTPS connections to the host so we should remember this. |
| 651 | if (https) { |
| 652 | base::Time current_time(base::Time::Now()); |
| 653 | base::TimeDelta max_age_delta = base::TimeDelta::FromSeconds(max_age); |
| 654 | |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 655 | TransportSecurityState::DomainState domain_state; |
[email protected] | 326e679 | 2009-12-11 21:04:42 | [diff] [blame] | 656 | domain_state.expiry = current_time + max_age_delta; |
| 657 | domain_state.mode = |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 658 | TransportSecurityState::DomainState::MODE_SPDY_ONLY; |
[email protected] | 326e679 | 2009-12-11 21:04:42 | [diff] [blame] | 659 | domain_state.include_subdomains = include_subdomains; |
| 660 | |
| 661 | ctx->transport_security_state()->EnableHost(request_info_.url.host(), |
| 662 | domain_state); |
| 663 | continue; |
| 664 | } |
| 665 | |
| 666 | if (!request()) |
| 667 | break; |
| 668 | |
| 669 | // At this point, we have a request for opportunistic encryption over HTTP. |
| 670 | // In this case we need to probe to check that we can make HTTPS |
| 671 | // connections to that host. |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 672 | HTTPSProber* const prober = HTTPSProber::GetInstance(); |
[email protected] | 326e679 | 2009-12-11 21:04:42 | [diff] [blame] | 673 | if (prober->HaveProbed(request_info_.url.host()) || |
| 674 | prober->InFlight(request_info_.url.host())) { |
| 675 | continue; |
| 676 | } |
| 677 | |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 678 | HTTPSProberDelegateImpl* delegate = |
| 679 | new HTTPSProberDelegateImpl(request_info_.url.host(), max_age, |
| 680 | include_subdomains, |
| 681 | ctx->transport_security_state()); |
[email protected] | 326e679 | 2009-12-11 21:04:42 | [diff] [blame] | 682 | if (!prober->ProbeHost(request_info_.url.host(), request()->context(), |
| 683 | delegate)) { |
| 684 | delete delegate; |
| 685 | } |
[email protected] | 77f6fb43 | 2009-09-05 14:21:09 | [diff] [blame] | 686 | } |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 687 | } |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 688 | |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 689 | void URLRequestHttpJob::OnStartCompleted(int result) { |
[email protected] | ec23f52 | 2011-02-22 21:01:38 | [diff] [blame] | 690 | RecordTimer(); |
| 691 | |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 692 | // If the request was destroyed, then there is no more work to do. |
[email protected] | a83dd33 | 2011-07-13 10:41:01 | [diff] [blame] | 693 | if (!request_) |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 694 | return; |
| 695 | |
| 696 | // If the transaction was destroyed, then the job was cancelled, and |
| 697 | // we can just ignore this notification. |
| 698 | if (!transaction_.get()) |
| 699 | return; |
| 700 | |
| 701 | // Clear the IO_PENDING status |
| 702 | SetStatus(URLRequestStatus()); |
| 703 | |
[email protected] | 381e885 | 2011-04-14 14:30:58 | [diff] [blame] | 704 | // Take care of any mandates for public key pinning. |
| 705 | // TODO(agl): we might have an issue here where a request for foo.example.com |
| 706 | // merges into a SPDY connection to www.example.com, and gets a different |
| 707 | // certificate. |
| 708 | const SSLInfo& ssl_info = transaction_->GetResponseInfo()->ssl_info; |
| 709 | if (result == OK && |
| 710 | ssl_info.is_valid() && |
[email protected] | a84396e | 2011-04-28 20:26:58 | [diff] [blame] | 711 | ssl_info.is_issued_by_known_root && |
[email protected] | 381e885 | 2011-04-14 14:30:58 | [diff] [blame] | 712 | context_->transport_security_state()) { |
| 713 | TransportSecurityState::DomainState domain_state; |
[email protected] | dee9ae9 | 2011-04-26 03:58:30 | [diff] [blame] | 714 | if (context_->transport_security_state()->HasPinsForHost( |
[email protected] | 381e885 | 2011-04-14 14:30:58 | [diff] [blame] | 715 | &domain_state, |
| 716 | request_->url().host(), |
[email protected] | 2b83813 | 2011-05-05 22:00:24 | [diff] [blame] | 717 | SSLConfigService::IsSNIAvailable( |
| 718 | context_->ssl_config_service()))) { |
[email protected] | a84396e | 2011-04-28 20:26:58 | [diff] [blame] | 719 | if (!domain_state.IsChainOfPublicKeysPermitted( |
| 720 | ssl_info.public_key_hashes)) { |
| 721 | result = ERR_CERT_INVALID; |
| 722 | UMA_HISTOGRAM_BOOLEAN("Net.CertificatePinSuccess", false); |
| 723 | } else { |
| 724 | UMA_HISTOGRAM_BOOLEAN("Net.CertificatePinSuccess", true); |
| 725 | } |
[email protected] | 381e885 | 2011-04-14 14:30:58 | [diff] [blame] | 726 | } |
| 727 | } |
| 728 | |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 729 | if (result == OK) { |
| 730 | SaveCookiesAndNotifyHeadersComplete(); |
| 731 | } else if (ShouldTreatAsCertificateError(result)) { |
| 732 | // We encountered an SSL certificate error. Ask our delegate to decide |
| 733 | // what we should do. |
| 734 | // TODO(wtc): also pass ssl_info.cert_status, or just pass the whole |
| 735 | // ssl_info. |
[email protected] | a83dd33 | 2011-07-13 10:41:01 | [diff] [blame] | 736 | NotifySSLCertificateError( |
| 737 | result, transaction_->GetResponseInfo()->ssl_info.cert); |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 738 | } else if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED) { |
[email protected] | a83dd33 | 2011-07-13 10:41:01 | [diff] [blame] | 739 | NotifyCertificateRequested( |
| 740 | transaction_->GetResponseInfo()->cert_request_info); |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 741 | } else { |
| 742 | NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); |
| 743 | } |
| 744 | } |
| 745 | |
| 746 | void URLRequestHttpJob::OnReadCompleted(int result) { |
| 747 | read_in_progress_ = false; |
| 748 | |
[email protected] | 85c1dce | 2011-07-06 12:01:29 | [diff] [blame] | 749 | if (ShouldFixMismatchedContentLength(result)) |
| 750 | result = 0; |
| 751 | |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 752 | if (result == 0) { |
| 753 | NotifyDone(URLRequestStatus()); |
| 754 | } else if (result < 0) { |
| 755 | NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); |
| 756 | } else { |
| 757 | // Clear the IO_PENDING status |
| 758 | SetStatus(URLRequestStatus()); |
| 759 | } |
| 760 | |
| 761 | NotifyReadComplete(result); |
| 762 | } |
| 763 | |
| 764 | bool URLRequestHttpJob::ShouldTreatAsCertificateError(int result) { |
| 765 | if (!IsCertificateError(result)) |
| 766 | return false; |
| 767 | |
[email protected] | 047c272 | 2011-03-28 15:07:22 | [diff] [blame] | 768 | // Revocation check failures are always certificate errors, even if the host |
| 769 | // is using Strict-Transport-Security. |
[email protected] | 9e72e8c | 2011-03-28 15:03:16 | [diff] [blame] | 770 | if (result == ERR_CERT_UNABLE_TO_CHECK_REVOCATION) |
| 771 | return true; |
| 772 | |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 773 | // Check whether our context is using Strict-Transport-Security. |
| 774 | if (!context_->transport_security_state()) |
| 775 | return true; |
| 776 | |
| 777 | TransportSecurityState::DomainState domain_state; |
| 778 | // TODO(agl): don't ignore opportunistic mode. |
| 779 | const bool r = context_->transport_security_state()->IsEnabledForHost( |
[email protected] | 2b83813 | 2011-05-05 22:00:24 | [diff] [blame] | 780 | &domain_state, request_info_.url.host(), |
| 781 | SSLConfigService::IsSNIAvailable(context_->ssl_config_service())); |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 782 | |
| 783 | return !r || domain_state.mode == |
| 784 | TransportSecurityState::DomainState::MODE_OPPORTUNISTIC; |
| 785 | } |
| 786 | |
| 787 | void URLRequestHttpJob::RestartTransactionWithAuth( |
| 788 | const string16& username, |
| 789 | const string16& password) { |
| 790 | username_ = username; |
| 791 | password_ = password; |
| 792 | |
| 793 | // These will be reset in OnStartCompleted. |
| 794 | response_info_ = NULL; |
| 795 | response_cookies_.clear(); |
| 796 | |
[email protected] | ec23f52 | 2011-02-22 21:01:38 | [diff] [blame] | 797 | ResetTimer(); |
| 798 | |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 799 | // Update the cookies, since the cookie store may have been updated from the |
| 800 | // headers in the 401/407. Since cookies were already appended to |
| 801 | // extra_headers, we need to strip them out before adding them again. |
| 802 | request_info_.extra_headers.RemoveHeader( |
| 803 | HttpRequestHeaders::kCookie); |
| 804 | |
| 805 | AddCookieHeaderAndStart(); |
| 806 | } |
| 807 | |
| 808 | void URLRequestHttpJob::SetUpload(UploadData* upload) { |
| 809 | DCHECK(!transaction_.get()) << "cannot change once started"; |
| 810 | request_info_.upload_data = upload; |
| 811 | } |
| 812 | |
| 813 | void URLRequestHttpJob::SetExtraRequestHeaders( |
| 814 | const HttpRequestHeaders& headers) { |
| 815 | DCHECK(!transaction_.get()) << "cannot change once started"; |
| 816 | request_info_.extra_headers.CopyFrom(headers); |
| 817 | } |
| 818 | |
| 819 | void URLRequestHttpJob::Start() { |
| 820 | DCHECK(!transaction_.get()); |
| 821 | |
| 822 | // Ensure that we do not send username and password fields in the referrer. |
| 823 | GURL referrer(request_->GetSanitizedReferrer()); |
| 824 | |
| 825 | request_info_.url = request_->url(); |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 826 | request_info_.method = request_->method(); |
| 827 | request_info_.load_flags = request_->load_flags(); |
| 828 | request_info_.priority = request_->priority(); |
[email protected] | 4875ba1 | 2011-03-30 22:31:51 | [diff] [blame] | 829 | request_info_.request_id = request_->identifier(); |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 830 | |
[email protected] | c1045010 | 2011-06-27 09:06:16 | [diff] [blame] | 831 | // Strip Referer from request_info_.extra_headers to prevent, e.g., plugins |
| 832 | // from overriding headers that are controlled using other means. Otherwise a |
| 833 | // plugin could set a referrer although sending the referrer is inhibited. |
| 834 | request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kReferer); |
| 835 | |
| 836 | // Our consumer should have made sure that this is a safe referrer. See for |
| 837 | // instance WebCore::FrameLoader::HideReferrer. |
| 838 | if (referrer.is_valid()) { |
| 839 | request_info_.extra_headers.SetHeader(HttpRequestHeaders::kReferer, |
| 840 | referrer.spec()); |
| 841 | } |
| 842 | |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 843 | if (request_->context()) { |
[email protected] | 50ba404b | 2011-01-21 13:38:45 | [diff] [blame] | 844 | request_info_.extra_headers.SetHeaderIfMissing( |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 845 | HttpRequestHeaders::kUserAgent, |
| 846 | request_->context()->GetUserAgent(request_->url())); |
| 847 | } |
| 848 | |
| 849 | AddExtraHeaders(); |
| 850 | AddCookieHeaderAndStart(); |
| 851 | } |
| 852 | |
| 853 | void URLRequestHttpJob::Kill() { |
| 854 | if (!transaction_.get()) |
| 855 | return; |
| 856 | |
| 857 | DestroyTransaction(); |
| 858 | URLRequestJob::Kill(); |
| 859 | } |
| 860 | |
| 861 | LoadState URLRequestHttpJob::GetLoadState() const { |
| 862 | return transaction_.get() ? |
| 863 | transaction_->GetLoadState() : LOAD_STATE_IDLE; |
| 864 | } |
| 865 | |
| 866 | uint64 URLRequestHttpJob::GetUploadProgress() const { |
| 867 | return transaction_.get() ? transaction_->GetUploadProgress() : 0; |
| 868 | } |
| 869 | |
| 870 | bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const { |
| 871 | DCHECK(transaction_.get()); |
| 872 | |
| 873 | if (!response_info_) |
| 874 | return false; |
| 875 | |
| 876 | return response_info_->headers->GetMimeType(mime_type); |
| 877 | } |
| 878 | |
| 879 | bool URLRequestHttpJob::GetCharset(std::string* charset) { |
| 880 | DCHECK(transaction_.get()); |
| 881 | |
| 882 | if (!response_info_) |
| 883 | return false; |
| 884 | |
| 885 | return response_info_->headers->GetCharset(charset); |
| 886 | } |
| 887 | |
| 888 | void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) { |
| 889 | DCHECK(request_); |
| 890 | DCHECK(transaction_.get()); |
| 891 | |
| 892 | if (response_info_) |
| 893 | *info = *response_info_; |
| 894 | } |
| 895 | |
| 896 | bool URLRequestHttpJob::GetResponseCookies( |
| 897 | std::vector<std::string>* cookies) { |
| 898 | DCHECK(transaction_.get()); |
| 899 | |
| 900 | if (!response_info_) |
| 901 | return false; |
| 902 | |
| 903 | // TODO(darin): Why are we extracting response cookies again? Perhaps we |
| 904 | // should just leverage response_cookies_. |
| 905 | |
| 906 | cookies->clear(); |
| 907 | FetchResponseCookies(response_info_, cookies); |
| 908 | return true; |
| 909 | } |
| 910 | |
| 911 | int URLRequestHttpJob::GetResponseCode() const { |
| 912 | DCHECK(transaction_.get()); |
| 913 | |
| 914 | if (!response_info_) |
| 915 | return -1; |
| 916 | |
| 917 | return response_info_->headers->response_code(); |
| 918 | } |
| 919 | |
[email protected] | 5a3b4d3 | 2011-03-17 01:24:05 | [diff] [blame] | 920 | Filter* URLRequestHttpJob::SetupFilter() const { |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 921 | DCHECK(transaction_.get()); |
| 922 | if (!response_info_) |
[email protected] | 5a3b4d3 | 2011-03-17 01:24:05 | [diff] [blame] | 923 | return NULL; |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 924 | |
[email protected] | 5a3b4d3 | 2011-03-17 01:24:05 | [diff] [blame] | 925 | std::vector<Filter::FilterType> encoding_types; |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 926 | std::string encoding_type; |
| 927 | void* iter = NULL; |
| 928 | while (response_info_->headers->EnumerateHeader(&iter, "Content-Encoding", |
| 929 | &encoding_type)) { |
[email protected] | 5a3b4d3 | 2011-03-17 01:24:05 | [diff] [blame] | 930 | encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type)); |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 931 | } |
| 932 | |
[email protected] | 46668fe5 | 2011-05-04 19:03:23 | [diff] [blame] | 933 | if (filter_context_->IsSdchResponse()) { |
| 934 | // We are wary of proxies that discard or damage SDCH encoding. If a server |
| 935 | // explicitly states that this is not SDCH content, then we can correct our |
| 936 | // assumption that this is an SDCH response, and avoid the need to recover |
| 937 | // as though the content is corrupted (when we discover it is not SDCH |
| 938 | // encoded). |
| 939 | std::string sdch_response_status; |
| 940 | iter = NULL; |
| 941 | while (response_info_->headers->EnumerateHeader(&iter, "X-Sdch-Encode", |
| 942 | &sdch_response_status)) { |
| 943 | if (sdch_response_status == "0") { |
| 944 | filter_context_->ResetSdchResponseToFalse(); |
| 945 | break; |
| 946 | } |
| 947 | } |
| 948 | } |
| 949 | |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 950 | // Even if encoding types are empty, there is a chance that we need to add |
| 951 | // some decoding, as some proxies strip encoding completely. In such cases, |
| 952 | // we may need to add (for example) SDCH filtering (when the context suggests |
| 953 | // it is appropriate). |
[email protected] | 2e92354c | 2011-03-25 20:49:53 | [diff] [blame] | 954 | Filter::FixupEncodingTypes(*filter_context_, &encoding_types); |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 955 | |
[email protected] | 5a3b4d3 | 2011-03-17 01:24:05 | [diff] [blame] | 956 | return !encoding_types.empty() |
[email protected] | 2e92354c | 2011-03-25 20:49:53 | [diff] [blame] | 957 | ? Filter::Factory(encoding_types, *filter_context_) : NULL; |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 958 | } |
| 959 | |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 960 | bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) { |
| 961 | // We only allow redirects to certain "safe" protocols. This does not |
| 962 | // restrict redirects to externally handled protocols. Our consumer would |
| 963 | // need to take care of those. |
| 964 | |
| 965 | if (!URLRequest::IsHandledURL(location)) |
| 966 | return true; |
| 967 | |
| 968 | static const char* kSafeSchemes[] = { |
| 969 | "http", |
| 970 | "https", |
| 971 | "ftp" |
| 972 | }; |
| 973 | |
| 974 | for (size_t i = 0; i < arraysize(kSafeSchemes); ++i) { |
| 975 | if (location.SchemeIs(kSafeSchemes[i])) |
| 976 | return true; |
| 977 | } |
| 978 | |
| 979 | return false; |
| 980 | } |
| 981 | |
| 982 | bool URLRequestHttpJob::NeedsAuth() { |
| 983 | int code = GetResponseCode(); |
| 984 | if (code == -1) |
| 985 | return false; |
| 986 | |
| 987 | // Check if we need either Proxy or WWW Authentication. This could happen |
| 988 | // because we either provided no auth info, or provided incorrect info. |
| 989 | switch (code) { |
| 990 | case 407: |
| 991 | if (proxy_auth_state_ == AUTH_STATE_CANCELED) |
| 992 | return false; |
| 993 | proxy_auth_state_ = AUTH_STATE_NEED_AUTH; |
| 994 | return true; |
| 995 | case 401: |
| 996 | if (server_auth_state_ == AUTH_STATE_CANCELED) |
| 997 | return false; |
| 998 | server_auth_state_ = AUTH_STATE_NEED_AUTH; |
| 999 | return true; |
| 1000 | } |
| 1001 | return false; |
| 1002 | } |
| 1003 | |
| 1004 | void URLRequestHttpJob::GetAuthChallengeInfo( |
| 1005 | scoped_refptr<AuthChallengeInfo>* result) { |
| 1006 | DCHECK(transaction_.get()); |
| 1007 | DCHECK(response_info_); |
| 1008 | |
| 1009 | // sanity checks: |
| 1010 | DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH || |
| 1011 | server_auth_state_ == AUTH_STATE_NEED_AUTH); |
| 1012 | DCHECK(response_info_->headers->response_code() == 401 || |
| 1013 | response_info_->headers->response_code() == 407); |
| 1014 | |
| 1015 | *result = response_info_->auth_challenge; |
| 1016 | } |
| 1017 | |
| 1018 | void URLRequestHttpJob::SetAuth(const string16& username, |
| 1019 | const string16& password) { |
| 1020 | DCHECK(transaction_.get()); |
| 1021 | |
| 1022 | // Proxy gets set first, then WWW. |
| 1023 | if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { |
| 1024 | proxy_auth_state_ = AUTH_STATE_HAVE_AUTH; |
| 1025 | } else { |
[email protected] | dd29bcd7 | 2011-03-24 00:03:44 | [diff] [blame] | 1026 | DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 1027 | server_auth_state_ = AUTH_STATE_HAVE_AUTH; |
| 1028 | } |
| 1029 | |
| 1030 | RestartTransactionWithAuth(username, password); |
| 1031 | } |
| 1032 | |
| 1033 | void URLRequestHttpJob::CancelAuth() { |
| 1034 | // Proxy gets set first, then WWW. |
| 1035 | if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { |
| 1036 | proxy_auth_state_ = AUTH_STATE_CANCELED; |
| 1037 | } else { |
[email protected] | dd29bcd7 | 2011-03-24 00:03:44 | [diff] [blame] | 1038 | DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 1039 | server_auth_state_ = AUTH_STATE_CANCELED; |
| 1040 | } |
| 1041 | |
| 1042 | // These will be reset in OnStartCompleted. |
| 1043 | response_info_ = NULL; |
| 1044 | response_cookies_.clear(); |
| 1045 | |
[email protected] | ec23f52 | 2011-02-22 21:01:38 | [diff] [blame] | 1046 | ResetTimer(); |
| 1047 | |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 1048 | // OK, let the consumer read the error page... |
| 1049 | // |
| 1050 | // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false, |
| 1051 | // which will cause the consumer to receive OnResponseStarted instead of |
| 1052 | // OnAuthRequired. |
| 1053 | // |
| 1054 | // We have to do this via InvokeLater to avoid "recursing" the consumer. |
| 1055 | // |
| 1056 | MessageLoop::current()->PostTask( |
| 1057 | FROM_HERE, |
| 1058 | method_factory_.NewRunnableMethod( |
| 1059 | &URLRequestHttpJob::OnStartCompleted, OK)); |
| 1060 | } |
| 1061 | |
| 1062 | void URLRequestHttpJob::ContinueWithCertificate( |
| 1063 | X509Certificate* client_cert) { |
| 1064 | DCHECK(transaction_.get()); |
| 1065 | |
| 1066 | DCHECK(!response_info_) << "should not have a response yet"; |
| 1067 | |
[email protected] | ec23f52 | 2011-02-22 21:01:38 | [diff] [blame] | 1068 | ResetTimer(); |
| 1069 | |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 1070 | // No matter what, we want to report our status as IO pending since we will |
| 1071 | // be notifying our consumer asynchronously via OnStartCompleted. |
| 1072 | SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
| 1073 | |
| 1074 | int rv = transaction_->RestartWithCertificate(client_cert, &start_callback_); |
| 1075 | if (rv == ERR_IO_PENDING) |
| 1076 | return; |
| 1077 | |
| 1078 | // The transaction started synchronously, but we need to notify the |
| 1079 | // URLRequest delegate via the message loop. |
| 1080 | MessageLoop::current()->PostTask( |
| 1081 | FROM_HERE, |
| 1082 | method_factory_.NewRunnableMethod( |
| 1083 | &URLRequestHttpJob::OnStartCompleted, rv)); |
| 1084 | } |
| 1085 | |
| 1086 | void URLRequestHttpJob::ContinueDespiteLastError() { |
| 1087 | // If the transaction was destroyed, then the job was cancelled. |
| 1088 | if (!transaction_.get()) |
| 1089 | return; |
| 1090 | |
| 1091 | DCHECK(!response_info_) << "should not have a response yet"; |
| 1092 | |
[email protected] | ec23f52 | 2011-02-22 21:01:38 | [diff] [blame] | 1093 | ResetTimer(); |
| 1094 | |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 1095 | // No matter what, we want to report our status as IO pending since we will |
| 1096 | // be notifying our consumer asynchronously via OnStartCompleted. |
| 1097 | SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
| 1098 | |
| 1099 | int rv = transaction_->RestartIgnoringLastError(&start_callback_); |
| 1100 | if (rv == ERR_IO_PENDING) |
| 1101 | return; |
| 1102 | |
| 1103 | // The transaction started synchronously, but we need to notify the |
| 1104 | // URLRequest delegate via the message loop. |
| 1105 | MessageLoop::current()->PostTask( |
| 1106 | FROM_HERE, |
| 1107 | method_factory_.NewRunnableMethod( |
| 1108 | &URLRequestHttpJob::OnStartCompleted, rv)); |
| 1109 | } |
| 1110 | |
[email protected] | 85c1dce | 2011-07-06 12:01:29 | [diff] [blame] | 1111 | bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const { |
| 1112 | // Some servers send the body compressed, but specify the content length as |
| 1113 | // the uncompressed size. Although this violates the HTTP spec we want to |
| 1114 | // support it (as IE and FireFox do), but *only* for an exact match. |
| 1115 | // See http://crbug.com/79694. |
| 1116 | if (rv == net::ERR_CONNECTION_CLOSED) { |
| 1117 | if (request_ && request_->response_headers()) { |
| 1118 | int64 expected_length = request_->response_headers()->GetContentLength(); |
| 1119 | VLOG(1) << __FUNCTION__ << "() " |
| 1120 | << "\"" << request_->url().spec() << "\"" |
| 1121 | << " content-length = " << expected_length |
| 1122 | << " pre total = " << prefilter_bytes_read() |
| 1123 | << " post total = " << postfilter_bytes_read(); |
| 1124 | if (postfilter_bytes_read() == expected_length) { |
| 1125 | // Clear the error. |
| 1126 | return true; |
| 1127 | } |
| 1128 | } |
| 1129 | } |
| 1130 | return false; |
| 1131 | } |
| 1132 | |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 1133 | bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size, |
| 1134 | int *bytes_read) { |
| 1135 | DCHECK_NE(buf_size, 0); |
| 1136 | DCHECK(bytes_read); |
| 1137 | DCHECK(!read_in_progress_); |
| 1138 | |
| 1139 | int rv = transaction_->Read(buf, buf_size, &read_callback_); |
[email protected] | 85c1dce | 2011-07-06 12:01:29 | [diff] [blame] | 1140 | |
| 1141 | if (ShouldFixMismatchedContentLength(rv)) |
| 1142 | rv = 0; |
| 1143 | |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 1144 | if (rv >= 0) { |
| 1145 | *bytes_read = rv; |
[email protected] | bbaea8f | 2011-06-24 00:11:01 | [diff] [blame] | 1146 | if (!rv) |
| 1147 | DoneWithRequest(FINISHED); |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 1148 | return true; |
| 1149 | } |
| 1150 | |
| 1151 | if (rv == ERR_IO_PENDING) { |
| 1152 | read_in_progress_ = true; |
| 1153 | SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
| 1154 | } else { |
| 1155 | NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); |
| 1156 | } |
| 1157 | |
| 1158 | return false; |
| 1159 | } |
| 1160 | |
| 1161 | void URLRequestHttpJob::StopCaching() { |
| 1162 | if (transaction_.get()) |
| 1163 | transaction_->StopCaching(); |
| 1164 | } |
| 1165 | |
[email protected] | 6d81b48 | 2011-02-22 19:47:19 | [diff] [blame] | 1166 | HostPortPair URLRequestHttpJob::GetSocketAddress() const { |
| 1167 | return response_info_ ? response_info_->socket_address : HostPortPair(); |
| 1168 | } |
| 1169 | |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 1170 | URLRequestHttpJob::~URLRequestHttpJob() { |
| 1171 | DCHECK(!sdch_test_control_ || !sdch_test_activated_); |
[email protected] | dd29bcd7 | 2011-03-24 00:03:44 | [diff] [blame] | 1172 | if (!is_cached_content_) { |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 1173 | if (sdch_test_control_) |
[email protected] | fc01f23 | 2011-03-17 19:06:01 | [diff] [blame] | 1174 | RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK); |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 1175 | if (sdch_test_activated_) |
[email protected] | fc01f23 | 2011-03-17 19:06:01 | [diff] [blame] | 1176 | RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE); |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 1177 | } |
[email protected] | dd29bcd7 | 2011-03-24 00:03:44 | [diff] [blame] | 1178 | // Make sure SDCH filters are told to emit histogram data while |
| 1179 | // filter_context_ is still alive. |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 1180 | DestroyFilters(); |
| 1181 | |
| 1182 | if (sdch_dictionary_url_.is_valid()) { |
| 1183 | // Prior to reaching the destructor, request_ has been set to a NULL |
| 1184 | // pointer, so request_->url() is no longer valid in the destructor, and we |
| 1185 | // use an alternate copy |request_info_.url|. |
| 1186 | SdchManager* manager = SdchManager::Global(); |
| 1187 | // To be extra safe, since this is a "different time" from when we decided |
| 1188 | // to get the dictionary, we'll validate that an SdchManager is available. |
| 1189 | // At shutdown time, care is taken to be sure that we don't delete this |
| 1190 | // globally useful instance "too soon," so this check is just defensive |
| 1191 | // coding to assure that IF the system is shutting down, we don't have any |
| 1192 | // problem if the manager was deleted ahead of time. |
| 1193 | if (manager) // Defensive programming. |
| 1194 | manager->FetchDictionary(request_info_.url, sdch_dictionary_url_); |
| 1195 | } |
[email protected] | bbaea8f | 2011-06-24 00:11:01 | [diff] [blame] | 1196 | DoneWithRequest(ABORTED); |
[email protected] | 5394e42 | 2011-01-20 22:07:43 | [diff] [blame] | 1197 | } |
| 1198 | |
[email protected] | ec23f52 | 2011-02-22 21:01:38 | [diff] [blame] | 1199 | void URLRequestHttpJob::RecordTimer() { |
| 1200 | if (request_creation_time_.is_null()) { |
| 1201 | NOTREACHED() |
| 1202 | << "The same transaction shouldn't start twice without new timing."; |
| 1203 | return; |
| 1204 | } |
| 1205 | |
[email protected] | 320a29f1 | 2011-03-21 14:47:41 | [diff] [blame] | 1206 | base::TimeDelta to_start = base::Time::Now() - request_creation_time_; |
[email protected] | ec23f52 | 2011-02-22 21:01:38 | [diff] [blame] | 1207 | request_creation_time_ = base::Time(); |
[email protected] | 8684a881 | 2011-03-22 13:59:38 | [diff] [blame] | 1208 | |
| 1209 | static const bool use_prefetch_histogram = |
[email protected] | edafd4c | 2011-05-10 17:18:53 | [diff] [blame] | 1210 | base::FieldTrialList::TrialExists("Prefetch"); |
[email protected] | 8684a881 | 2011-03-22 13:59:38 | [diff] [blame] | 1211 | |
[email protected] | ec23f52 | 2011-02-22 21:01:38 | [diff] [blame] | 1212 | UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte", to_start); |
[email protected] | 8684a881 | 2011-03-22 13:59:38 | [diff] [blame] | 1213 | if (use_prefetch_histogram) { |
| 1214 | UMA_HISTOGRAM_MEDIUM_TIMES( |
| 1215 | base::FieldTrial::MakeName("Net.HttpTimeToFirstByte", |
| 1216 | "Prefetch"), |
| 1217 | to_start); |
| 1218 | } |
| 1219 | |
[email protected] | 61a99dd8 | 2011-05-24 19:19:47 | [diff] [blame] | 1220 | const bool is_prerender = !!(request_info_.load_flags & LOAD_PRERENDERING); |
[email protected] | 8684a881 | 2011-03-22 13:59:38 | [diff] [blame] | 1221 | if (is_prerender) { |
| 1222 | UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte_Prerender", |
| 1223 | to_start); |
| 1224 | if (use_prefetch_histogram) { |
| 1225 | UMA_HISTOGRAM_MEDIUM_TIMES( |
| 1226 | base::FieldTrial::MakeName("Net.HttpTimeToFirstByte_Prerender", |
| 1227 | "Prefetch"), |
| 1228 | to_start); |
| 1229 | } |
| 1230 | } else { |
| 1231 | UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte_NonPrerender", |
| 1232 | to_start); |
| 1233 | if (use_prefetch_histogram) { |
| 1234 | UMA_HISTOGRAM_MEDIUM_TIMES( |
| 1235 | base::FieldTrial::MakeName("Net.HttpTimeToFirstByte_NonPrerender", |
| 1236 | "Prefetch"), |
| 1237 | to_start); |
| 1238 | } |
| 1239 | } |
[email protected] | ec23f52 | 2011-02-22 21:01:38 | [diff] [blame] | 1240 | } |
| 1241 | |
| 1242 | void URLRequestHttpJob::ResetTimer() { |
| 1243 | if (!request_creation_time_.is_null()) { |
| 1244 | NOTREACHED() |
| 1245 | << "The timer was reset before it was recorded."; |
| 1246 | return; |
| 1247 | } |
| 1248 | request_creation_time_ = base::Time::Now(); |
| 1249 | } |
| 1250 | |
[email protected] | dd29bcd7 | 2011-03-24 00:03:44 | [diff] [blame] | 1251 | void URLRequestHttpJob::UpdatePacketReadTimes() { |
| 1252 | if (!packet_timing_enabled_) |
| 1253 | return; |
| 1254 | |
| 1255 | if (filter_input_byte_count() <= bytes_observed_in_packets_) { |
| 1256 | DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_); |
| 1257 | return; // No new bytes have arrived. |
| 1258 | } |
| 1259 | |
| 1260 | if (!bytes_observed_in_packets_) |
| 1261 | request_time_snapshot_ = request_ ? request_->request_time() : base::Time(); |
| 1262 | |
| 1263 | final_packet_time_ = base::Time::Now(); |
| 1264 | const size_t kTypicalPacketSize = 1430; |
| 1265 | while (filter_input_byte_count() > bytes_observed_in_packets_) { |
| 1266 | ++observed_packet_count_; |
| 1267 | if (packet_times_.size() < kSdchPacketHistogramCount) { |
| 1268 | packet_times_.push_back(final_packet_time_); |
| 1269 | DCHECK_EQ(static_cast<size_t>(observed_packet_count_), |
| 1270 | packet_times_.size()); |
| 1271 | } |
| 1272 | bytes_observed_in_packets_ += kTypicalPacketSize; |
| 1273 | } |
| 1274 | // Since packets may not be full, we'll remember the number of bytes we've |
| 1275 | // accounted for in packets thus far. |
| 1276 | bytes_observed_in_packets_ = filter_input_byte_count(); |
| 1277 | } |
| 1278 | |
| 1279 | void URLRequestHttpJob::RecordPacketStats( |
| 1280 | FilterContext::StatisticSelector statistic) const { |
| 1281 | if (!packet_timing_enabled_ || (final_packet_time_ == base::Time())) |
| 1282 | return; |
| 1283 | |
| 1284 | base::TimeDelta duration = final_packet_time_ - request_time_snapshot_; |
| 1285 | switch (statistic) { |
| 1286 | case FilterContext::SDCH_DECODE: { |
| 1287 | UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_Latency_F_a", duration, |
| 1288 | base::TimeDelta::FromMilliseconds(20), |
| 1289 | base::TimeDelta::FromMinutes(10), 100); |
| 1290 | UMA_HISTOGRAM_COUNTS_100("Sdch3.Network_Decode_Packets_b", |
| 1291 | static_cast<int>(observed_packet_count_)); |
| 1292 | UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b", |
| 1293 | static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100); |
| 1294 | if (packet_times_.empty()) |
| 1295 | return; |
| 1296 | UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_1st_To_Last_a", |
| 1297 | final_packet_time_ - packet_times_[0], |
| 1298 | base::TimeDelta::FromMilliseconds(20), |
| 1299 | base::TimeDelta::FromMinutes(10), 100); |
| 1300 | |
| 1301 | DCHECK_GT(kSdchPacketHistogramCount, 4u); |
| 1302 | if (packet_times_.size() <= 4) |
| 1303 | return; |
| 1304 | UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_1st_To_2nd_c", |
| 1305 | packet_times_[1] - packet_times_[0], |
| 1306 | base::TimeDelta::FromMilliseconds(1), |
| 1307 | base::TimeDelta::FromSeconds(10), 100); |
| 1308 | UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_2nd_To_3rd_c", |
| 1309 | packet_times_[2] - packet_times_[1], |
| 1310 | base::TimeDelta::FromMilliseconds(1), |
| 1311 | base::TimeDelta::FromSeconds(10), 100); |
| 1312 | UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_3rd_To_4th_c", |
| 1313 | packet_times_[3] - packet_times_[2], |
| 1314 | base::TimeDelta::FromMilliseconds(1), |
| 1315 | base::TimeDelta::FromSeconds(10), 100); |
| 1316 | UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_4th_To_5th_c", |
| 1317 | packet_times_[4] - packet_times_[3], |
| 1318 | base::TimeDelta::FromMilliseconds(1), |
| 1319 | base::TimeDelta::FromSeconds(10), 100); |
| 1320 | return; |
| 1321 | } |
| 1322 | case FilterContext::SDCH_PASSTHROUGH: { |
| 1323 | // Despite advertising a dictionary, we handled non-sdch compressed |
| 1324 | // content. |
| 1325 | UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_Latency_F_a", |
| 1326 | duration, |
| 1327 | base::TimeDelta::FromMilliseconds(20), |
| 1328 | base::TimeDelta::FromMinutes(10), 100); |
| 1329 | UMA_HISTOGRAM_COUNTS_100("Sdch3.Network_Pass-through_Packets_b", |
| 1330 | observed_packet_count_); |
| 1331 | if (packet_times_.empty()) |
| 1332 | return; |
| 1333 | UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_1st_To_Last_a", |
| 1334 | final_packet_time_ - packet_times_[0], |
| 1335 | base::TimeDelta::FromMilliseconds(20), |
| 1336 | base::TimeDelta::FromMinutes(10), 100); |
| 1337 | DCHECK_GT(kSdchPacketHistogramCount, 4u); |
| 1338 | if (packet_times_.size() <= 4) |
| 1339 | return; |
| 1340 | UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_1st_To_2nd_c", |
| 1341 | packet_times_[1] - packet_times_[0], |
| 1342 | base::TimeDelta::FromMilliseconds(1), |
| 1343 | base::TimeDelta::FromSeconds(10), 100); |
| 1344 | UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_2nd_To_3rd_c", |
| 1345 | packet_times_[2] - packet_times_[1], |
| 1346 | base::TimeDelta::FromMilliseconds(1), |
| 1347 | base::TimeDelta::FromSeconds(10), 100); |
| 1348 | UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_3rd_To_4th_c", |
| 1349 | packet_times_[3] - packet_times_[2], |
| 1350 | base::TimeDelta::FromMilliseconds(1), |
| 1351 | base::TimeDelta::FromSeconds(10), 100); |
| 1352 | UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_4th_To_5th_c", |
| 1353 | packet_times_[4] - packet_times_[3], |
| 1354 | base::TimeDelta::FromMilliseconds(1), |
| 1355 | base::TimeDelta::FromSeconds(10), 100); |
| 1356 | return; |
| 1357 | } |
| 1358 | |
| 1359 | case FilterContext::SDCH_EXPERIMENT_DECODE: { |
| 1360 | UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Decode", |
| 1361 | duration, |
| 1362 | base::TimeDelta::FromMilliseconds(20), |
| 1363 | base::TimeDelta::FromMinutes(10), 100); |
| 1364 | // We already provided interpacket histograms above in the SDCH_DECODE |
| 1365 | // case, so we don't need them here. |
| 1366 | return; |
| 1367 | } |
| 1368 | case FilterContext::SDCH_EXPERIMENT_HOLDBACK: { |
| 1369 | UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback", |
| 1370 | duration, |
| 1371 | base::TimeDelta::FromMilliseconds(20), |
| 1372 | base::TimeDelta::FromMinutes(10), 100); |
| 1373 | UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_1st_To_Last_a", |
| 1374 | final_packet_time_ - packet_times_[0], |
| 1375 | base::TimeDelta::FromMilliseconds(20), |
| 1376 | base::TimeDelta::FromMinutes(10), 100); |
| 1377 | |
| 1378 | DCHECK_GT(kSdchPacketHistogramCount, 4u); |
| 1379 | if (packet_times_.size() <= 4) |
| 1380 | return; |
| 1381 | UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_1st_To_2nd_c", |
| 1382 | packet_times_[1] - packet_times_[0], |
| 1383 | base::TimeDelta::FromMilliseconds(1), |
| 1384 | base::TimeDelta::FromSeconds(10), 100); |
| 1385 | UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_2nd_To_3rd_c", |
| 1386 | packet_times_[2] - packet_times_[1], |
| 1387 | base::TimeDelta::FromMilliseconds(1), |
| 1388 | base::TimeDelta::FromSeconds(10), 100); |
| 1389 | UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_3rd_To_4th_c", |
| 1390 | packet_times_[3] - packet_times_[2], |
| 1391 | base::TimeDelta::FromMilliseconds(1), |
| 1392 | base::TimeDelta::FromSeconds(10), 100); |
| 1393 | UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_4th_To_5th_c", |
| 1394 | packet_times_[4] - packet_times_[3], |
| 1395 | base::TimeDelta::FromMilliseconds(1), |
| 1396 | base::TimeDelta::FromSeconds(10), 100); |
| 1397 | return; |
| 1398 | } |
| 1399 | default: |
| 1400 | NOTREACHED(); |
| 1401 | return; |
| 1402 | } |
| 1403 | } |
| 1404 | |
| 1405 | // The common type of histogram we use for all compression-tracking histograms. |
| 1406 | #define COMPRESSION_HISTOGRAM(name, sample) \ |
| 1407 | do { \ |
| 1408 | UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \ |
| 1409 | 500, 1000000, 100); \ |
| 1410 | } while(0) |
| 1411 | |
| 1412 | void URLRequestHttpJob::RecordCompressionHistograms() { |
| 1413 | DCHECK(request_); |
| 1414 | if (!request_) |
| 1415 | return; |
| 1416 | |
| 1417 | if (is_cached_content_ || // Don't record cached content |
| 1418 | !GetStatus().is_success() || // Don't record failed content |
| 1419 | !IsCompressibleContent() || // Only record compressible content |
| 1420 | !prefilter_bytes_read()) // Zero-byte responses aren't useful. |
| 1421 | return; |
| 1422 | |
| 1423 | // Miniature requests aren't really compressible. Don't count them. |
| 1424 | const int kMinSize = 16; |
| 1425 | if (prefilter_bytes_read() < kMinSize) |
| 1426 | return; |
| 1427 | |
| 1428 | // Only record for http or https urls. |
| 1429 | bool is_http = request_->url().SchemeIs("http"); |
| 1430 | bool is_https = request_->url().SchemeIs("https"); |
| 1431 | if (!is_http && !is_https) |
| 1432 | return; |
| 1433 | |
| 1434 | int compressed_B = prefilter_bytes_read(); |
| 1435 | int decompressed_B = postfilter_bytes_read(); |
| 1436 | bool was_filtered = HasFilter(); |
| 1437 | |
| 1438 | // We want to record how often downloaded resources are compressed. |
| 1439 | // But, we recognize that different protocols may have different |
| 1440 | // properties. So, for each request, we'll put it into one of 3 |
| 1441 | // groups: |
| 1442 | // a) SSL resources |
| 1443 | // Proxies cannot tamper with compression headers with SSL. |
| 1444 | // b) Non-SSL, loaded-via-proxy resources |
| 1445 | // In this case, we know a proxy might have interfered. |
| 1446 | // c) Non-SSL, loaded-without-proxy resources |
| 1447 | // In this case, we know there was no explicit proxy. However, |
| 1448 | // it is possible that a transparent proxy was still interfering. |
| 1449 | // |
| 1450 | // For each group, we record the same 3 histograms. |
| 1451 | |
| 1452 | if (is_https) { |
| 1453 | if (was_filtered) { |
| 1454 | COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B); |
| 1455 | COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B); |
| 1456 | } else { |
| 1457 | COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B); |
| 1458 | } |
| 1459 | return; |
| 1460 | } |
| 1461 | |
| 1462 | if (request_->was_fetched_via_proxy()) { |
| 1463 | if (was_filtered) { |
| 1464 | COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B); |
| 1465 | COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B); |
| 1466 | } else { |
| 1467 | COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B); |
| 1468 | } |
| 1469 | return; |
| 1470 | } |
| 1471 | |
| 1472 | if (was_filtered) { |
| 1473 | COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B); |
| 1474 | COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B); |
| 1475 | } else { |
| 1476 | COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B); |
| 1477 | } |
| 1478 | } |
| 1479 | |
| 1480 | bool URLRequestHttpJob::IsCompressibleContent() const { |
| 1481 | std::string mime_type; |
| 1482 | return GetMimeType(&mime_type) && |
| 1483 | (IsSupportedJavascriptMimeType(mime_type.c_str()) || |
| 1484 | IsSupportedNonImageMimeType(mime_type.c_str())); |
| 1485 | } |
| 1486 | |
[email protected] | bbaea8f | 2011-06-24 00:11:01 | [diff] [blame] | 1487 | void URLRequestHttpJob::RecordPerfHistograms(CompletionCause reason) { |
| 1488 | if (start_time_.is_null()) |
| 1489 | return; |
| 1490 | |
| 1491 | base::TimeDelta total_time = base::TimeTicks::Now() - start_time_; |
| 1492 | UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTime", total_time); |
| 1493 | |
| 1494 | if (reason == FINISHED) { |
| 1495 | UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeSuccess", total_time); |
| 1496 | } else { |
| 1497 | UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCancel", total_time); |
| 1498 | } |
| 1499 | |
| 1500 | static bool cache_experiment = false; |
| 1501 | if (!cache_experiment) |
| 1502 | cache_experiment = base::FieldTrialList::TrialExists("CacheListSize"); |
| 1503 | if (cache_experiment) { |
| 1504 | UMA_HISTOGRAM_TIMES( |
| 1505 | base::FieldTrial::MakeName("Net.HttpJob.TotalTime", "CacheListSize"), |
| 1506 | total_time); |
| 1507 | if (reason == FINISHED) { |
| 1508 | UMA_HISTOGRAM_TIMES( |
| 1509 | base::FieldTrial::MakeName("Net.HttpJob.TotalTimeSuccess", |
| 1510 | "CacheListSize"), |
| 1511 | total_time); |
| 1512 | } else { |
| 1513 | UMA_HISTOGRAM_TIMES( |
| 1514 | base::FieldTrial::MakeName("Net.HttpJob.TotalTimeCancel", |
| 1515 | "CacheListSize"), |
| 1516 | total_time); |
| 1517 | } |
| 1518 | } |
| 1519 | |
| 1520 | start_time_ = base::TimeTicks(); |
| 1521 | } |
| 1522 | |
| 1523 | void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) { |
| 1524 | if (done_) |
| 1525 | return; |
| 1526 | done_ = true; |
| 1527 | |
| 1528 | RecordPerfHistograms(reason); |
| 1529 | if (reason == FINISHED) |
| 1530 | RecordCompressionHistograms(); |
| 1531 | } |
| 1532 | |
[email protected] | 4f5656c6 | 2010-12-13 10:47:09 | [diff] [blame] | 1533 | } // namespace net |