blob: de9a0449a11c2375f712cbb364e45f9e876b5e87 [file] [log] [blame]
[email protected]5a3b4d32011-03-17 01:24:051// Copyright (c) 2011 The Chromium Authors. All rights reserved.
license.botbf09a502008-08-24 00:55:552// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
initial.commit586acc5fe2008-07-26 22:42:524
[email protected]175adac2008-07-30 17:28:045#include "net/url_request/url_request_http_job.h"
initial.commit586acc5fe2008-07-26 22:42:526
[email protected]4ed2755f2008-12-15 09:01:337#include "base/base_switches.h"
[email protected]4f9e5c82011-11-17 16:04:568#include "base/bind.h"
[email protected]084262c2011-12-01 21:12:479#include "base/bind_helpers.h"
[email protected]4ed2755f2008-12-15 09:01:3310#include "base/command_line.h"
[email protected]39ce5c02008-08-22 04:03:4411#include "base/compiler_specific.h"
[email protected]60889422008-09-23 01:18:1612#include "base/file_util.h"
13#include "base/file_version_info.h"
initial.commit586acc5fe2008-07-26 22:42:5214#include "base/message_loop.h"
[email protected]8684a8812011-03-22 13:59:3815#include "base/metrics/field_trial.h"
[email protected]ec23f522011-02-22 21:01:3816#include "base/metrics/histogram.h"
[email protected]5b90b5d2009-04-30 23:06:0117#include "base/rand_util.h"
initial.commit586acc5fe2008-07-26 22:42:5218#include "base/string_util.h"
[email protected]dd29bcd72011-03-24 00:03:4419#include "base/time.h"
[email protected]a9cea7542009-05-20 04:30:2320#include "net/base/cert_status_flags.h"
[email protected]54f4c9362011-07-25 21:54:4621#include "net/base/cookie_monster.h"
[email protected]423041b2008-10-27 17:39:2822#include "net/base/filter.h"
[email protected]6d81b482011-02-22 19:47:1923#include "net/base/host_port_pair.h"
[email protected]b8430722008-09-17 20:05:4424#include "net/base/load_flags.h"
[email protected]dd29bcd72011-03-24 00:03:4425#include "net/base/mime_util.h"
initial.commit586acc5fe2008-07-26 22:42:5226#include "net/base/net_errors.h"
27#include "net/base/net_util.h"
[email protected]636eccd2011-06-28 12:28:0128#include "net/base/network_delegate.h"
[email protected]60889422008-09-23 01:18:1629#include "net/base/sdch_manager.h"
[email protected]0b45559b2009-06-12 21:45:1130#include "net/base/ssl_cert_request_info.h"
[email protected]ee1edb472011-05-05 23:31:4631#include "net/base/ssl_config_service.h"
[email protected]87c99b6a2011-05-13 20:06:4832#include "net/http/http_mac_signature.h"
[email protected]8c76ae22010-04-20 22:15:4333#include "net/http/http_request_headers.h"
[email protected]319d9e6f2009-02-18 19:47:2134#include "net/http/http_response_headers.h"
initial.commit586acc5fe2008-07-26 22:42:5235#include "net/http/http_response_info.h"
36#include "net/http/http_transaction.h"
37#include "net/http/http_transaction_factory.h"
[email protected]0757e7702009-03-27 04:00:2238#include "net/http/http_util.h"
[email protected]7f269182011-10-16 01:58:3439#include "net/url_request/fraudulent_certificate_reporter.h"
initial.commit586acc5fe2008-07-26 22:42:5240#include "net/url_request/url_request.h"
[email protected]319d9e6f2009-02-18 19:47:2141#include "net/url_request/url_request_context.h"
initial.commit586acc5fe2008-07-26 22:42:5242#include "net/url_request/url_request_error_job.h"
[email protected]06965e02009-09-04 21:36:4243#include "net/url_request/url_request_redirect_job.h"
[email protected]6b3f9642010-11-25 02:29:0644#include "net/url_request/url_request_throttler_header_adapter.h"
45#include "net/url_request/url_request_throttler_manager.h"
initial.commit586acc5fe2008-07-26 22:42:5246
[email protected]8c76ae22010-04-20 22:15:4347static const char kAvailDictionaryHeader[] = "Avail-Dictionary";
48
[email protected]4f5656c62010-12-13 10:47:0949namespace net {
50
51namespace {
52
[email protected]87c99b6a2011-05-13 20:06:4853void AddAuthorizationHeader(
54 const std::vector<CookieStore::CookieInfo>& cookie_infos,
55 HttpRequestInfo* request_info) {
56 const GURL& url = request_info->url;
57 const std::string& method = request_info->method;
58 std::string request_uri = HttpUtil::PathForRequest(url);
59 const std::string& host = url.host();
60 int port = url.EffectiveIntPort();
61 for (size_t i = 0; i < cookie_infos.size(); ++i) {
62 HttpMacSignature signature;
63 if (!signature.AddStateInfo(cookie_infos[i].name,
[email protected]c416fa82011-05-16 22:18:4164 cookie_infos[i].creation_date,
[email protected]87c99b6a2011-05-13 20:06:4865 cookie_infos[i].mac_key,
66 cookie_infos[i].mac_algorithm)) {
67 continue;
68 }
69 if (!signature.AddHttpInfo(method, request_uri, host, port))
70 continue;
[email protected]6df5b9e2011-07-30 05:18:0171 std::string authorization_header;
72 if (!signature.GenerateAuthorizationHeader(&authorization_header))
73 continue;
74 request_info->extra_headers.SetHeader(HttpRequestHeaders::kAuthorization,
75 authorization_header);
[email protected]87c99b6a2011-05-13 20:06:4876 return; // Only add the first valid header.
77 }
78}
79
[email protected]4f5656c62010-12-13 10:47:0980} // namespace
81
[email protected]2e92354c2011-03-25 20:49:5382class URLRequestHttpJob::HttpFilterContext : public FilterContext {
83 public:
84 explicit HttpFilterContext(URLRequestHttpJob* job);
85 virtual ~HttpFilterContext();
86
87 // FilterContext implementation.
88 virtual bool GetMimeType(std::string* mime_type) const;
89 virtual bool GetURL(GURL* gurl) const;
90 virtual base::Time GetRequestTime() const;
91 virtual bool IsCachedContent() const;
92 virtual bool IsDownload() const;
93 virtual bool IsSdchResponse() const;
94 virtual int64 GetByteReadCount() const;
95 virtual int GetResponseCode() const;
96 virtual void RecordPacketStats(StatisticSelector statistic) const;
97
[email protected]46668fe52011-05-04 19:03:2398 // Method to allow us to reset filter context for a response that should have
99 // been SDCH encoded when there is an update due to an explicit HTTP header.
100 void ResetSdchResponseToFalse();
101
[email protected]2e92354c2011-03-25 20:49:53102 private:
103 URLRequestHttpJob* job_;
104
105 DISALLOW_COPY_AND_ASSIGN(HttpFilterContext);
106};
107
[email protected]fc01f232011-03-17 19:06:01108URLRequestHttpJob::HttpFilterContext::HttpFilterContext(URLRequestHttpJob* job)
109 : job_(job) {
110 DCHECK(job_);
111}
112
113URLRequestHttpJob::HttpFilterContext::~HttpFilterContext() {
114}
115
116bool URLRequestHttpJob::HttpFilterContext::GetMimeType(
117 std::string* mime_type) const {
118 return job_->GetMimeType(mime_type);
119}
120
121bool URLRequestHttpJob::HttpFilterContext::GetURL(GURL* gurl) const {
122 if (!job_->request())
123 return false;
124 *gurl = job_->request()->url();
125 return true;
126}
127
128base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const {
129 return job_->request() ? job_->request()->request_time() : base::Time();
130}
131
132bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const {
[email protected]dd29bcd72011-03-24 00:03:44133 return job_->is_cached_content_;
[email protected]fc01f232011-03-17 19:06:01134}
135
136bool URLRequestHttpJob::HttpFilterContext::IsDownload() const {
137 return (job_->request_info_.load_flags & LOAD_IS_DOWNLOAD) != 0;
138}
139
[email protected]46668fe52011-05-04 19:03:23140void URLRequestHttpJob::HttpFilterContext::ResetSdchResponseToFalse() {
141 DCHECK(job_->sdch_dictionary_advertised_);
142 job_->sdch_dictionary_advertised_ = false;
143}
144
[email protected]fc01f232011-03-17 19:06:01145bool URLRequestHttpJob::HttpFilterContext::IsSdchResponse() const {
146 return job_->sdch_dictionary_advertised_;
147}
148
149int64 URLRequestHttpJob::HttpFilterContext::GetByteReadCount() const {
150 return job_->filter_input_byte_count();
151}
152
153int URLRequestHttpJob::HttpFilterContext::GetResponseCode() const {
154 return job_->GetResponseCode();
155}
156
157void URLRequestHttpJob::HttpFilterContext::RecordPacketStats(
158 StatisticSelector statistic) const {
159 job_->RecordPacketStats(statistic);
160}
161
initial.commit586acc5fe2008-07-26 22:42:52162// TODO(darin): make sure the port blocking code is not lost
initial.commit586acc5fe2008-07-26 22:42:52163// static
[email protected]4f5656c62010-12-13 10:47:09164URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request,
165 const std::string& scheme) {
initial.commit586acc5fe2008-07-26 22:42:52166 DCHECK(scheme == "http" || scheme == "https");
167
initial.commit586acc5fe2008-07-26 22:42:52168 if (!request->context() ||
169 !request->context()->http_transaction_factory()) {
170 NOTREACHED() << "requires a valid context";
[email protected]4f5656c62010-12-13 10:47:09171 return new URLRequestErrorJob(request, ERR_INVALID_ARGUMENT);
initial.commit586acc5fe2008-07-26 22:42:52172 }
173
[email protected]4f5656c62010-12-13 10:47:09174 TransportSecurityState::DomainState domain_state;
[email protected]90b153012009-09-10 18:35:16175 if (scheme == "http" &&
[email protected]326e6792009-12-11 21:04:42176 request->context()->transport_security_state() &&
[email protected]dc694c32011-11-29 20:00:49177 request->context()->transport_security_state()->GetDomainState(
[email protected]b7f9fb22011-04-09 20:28:47178 &domain_state,
179 request->url().host(),
[email protected]2b838132011-05-05 22:00:24180 SSLConfigService::IsSNIAvailable(
[email protected]dc694c32011-11-29 20:00:49181 request->context()->ssl_config_service())) &&
182 domain_state.ShouldRedirectHTTPToHTTPS()) {
183 DCHECK_EQ(request->url().scheme(), "http");
184 url_canon::Replacements<char> replacements;
185 static const char kNewScheme[] = "https";
186 replacements.SetScheme(kNewScheme,
187 url_parse::Component(0, strlen(kNewScheme)));
188 GURL new_location = request->url().ReplaceComponents(replacements);
189 return new URLRequestRedirectJob(request, new_location);
[email protected]06965e02009-09-04 21:36:42190 }
[email protected]4ed2755f2008-12-15 09:01:33191
[email protected]175adac2008-07-30 17:28:04192 return new URLRequestHttpJob(request);
initial.commit586acc5fe2008-07-26 22:42:52193}
194
[email protected]5394e422011-01-20 22:07:43195
[email protected]4f5656c62010-12-13 10:47:09196URLRequestHttpJob::URLRequestHttpJob(URLRequest* request)
197 : URLRequestJob(request),
initial.commit586acc5fe2008-07-26 22:42:52198 response_info_(NULL),
[email protected]34602282010-02-03 22:14:15199 response_cookies_save_index_(0),
[email protected]4f5656c62010-12-13 10:47:09200 proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH),
201 server_auth_state_(AUTH_STATE_DONT_NEED_AUTH),
[email protected]34602282010-02-03 22:14:15202 ALLOW_THIS_IN_INITIALIZER_LIST(start_callback_(
[email protected]49639fa2011-12-20 23:22:41203 base::Bind(&URLRequestHttpJob::OnStartCompleted,
204 base::Unretained(this)))),
[email protected]636eccd2011-06-28 12:28:01205 ALLOW_THIS_IN_INITIALIZER_LIST(notify_before_headers_sent_callback_(
[email protected]084262c2011-12-01 21:12:47206 base::Bind(&URLRequestHttpJob::NotifyBeforeSendHeadersCallback,
207 base::Unretained(this)))),
[email protected]3589e552008-08-20 23:11:34208 read_in_progress_(false),
[email protected]2aecf7382009-06-17 04:14:27209 transaction_(NULL),
[email protected]4f5656c62010-12-13 10:47:09210 throttling_entry_(URLRequestThrottlerManager::GetInstance()->
[email protected]6b3f9642010-11-25 02:29:06211 RegisterRequestUrl(request->url())),
[email protected]5b90b5d2009-04-30 23:06:01212 sdch_dictionary_advertised_(false),
213 sdch_test_activated_(false),
[email protected]d8fd5132009-05-15 01:06:53214 sdch_test_control_(false),
[email protected]00e48bf2010-12-03 06:15:42215 is_cached_content_(false),
[email protected]ec23f522011-02-22 21:01:38216 request_creation_time_(),
[email protected]dd29bcd72011-03-24 00:03:44217 packet_timing_enabled_(false),
[email protected]bbaea8f2011-06-24 00:11:01218 done_(false),
[email protected]dd29bcd72011-03-24 00:03:44219 bytes_observed_in_packets_(0),
[email protected]dd29bcd72011-03-24 00:03:44220 request_time_snapshot_(),
221 final_packet_time_(),
[email protected]2e92354c2011-03-25 20:49:53222 ALLOW_THIS_IN_INITIALIZER_LIST(
223 filter_context_(new HttpFilterContext(this))),
[email protected]098b29112011-12-20 21:12:34224 ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)),
[email protected]084262c2011-12-01 21:12:47225 ALLOW_THIS_IN_INITIALIZER_LIST(on_headers_received_callback_(
226 base::Bind(&URLRequestHttpJob::OnHeadersReceivedCallback,
227 base::Unretained(this)))),
[email protected]ea8141e2011-10-05 13:12:51228 awaiting_callback_(false) {
[email protected]ec23f522011-02-22 21:01:38229 ResetTimer();
initial.commit586acc5fe2008-07-26 22:42:52230}
231
[email protected]175adac2008-07-30 17:28:04232void URLRequestHttpJob::NotifyHeadersComplete() {
initial.commit586acc5fe2008-07-26 22:42:52233 DCHECK(!response_info_);
234
235 response_info_ = transaction_->GetResponseInfo();
236
[email protected]d8fd5132009-05-15 01:06:53237 // Save boolean, as we'll need this info at destruction time, and filters may
238 // also need this info.
239 is_cached_content_ = response_info_->was_cached;
240
[email protected]6b3f9642010-11-25 02:29:06241 if (!is_cached_content_) {
[email protected]ea8141e2011-10-05 13:12:51242 URLRequestThrottlerHeaderAdapter response_adapter(GetResponseHeaders());
[email protected]2fd33ee92011-03-25 22:30:21243 throttling_entry_->UpdateWithResponse(request_info_.url.host(),
244 &response_adapter);
[email protected]6b3f9642010-11-25 02:29:06245 }
246
[email protected]77f6fb432009-09-05 14:21:09247 ProcessStrictTransportSecurityHeader();
[email protected]a9cea7542009-05-20 04:30:23248
[email protected]fe219872008-09-23 02:17:00249 if (SdchManager::Global() &&
250 SdchManager::Global()->IsInSupportedDomain(request_->url())) {
[email protected]264300242011-11-07 06:03:30251 const std::string name = "Get-Dictionary";
[email protected]60889422008-09-23 01:18:16252 std::string url_text;
253 void* iter = NULL;
254 // TODO(jar): We need to not fetch dictionaries the first time they are
255 // seen, but rather wait until we can justify their usefulness.
256 // For now, we will only fetch the first dictionary, which will at least
257 // require multiple suggestions before we get additional ones for this site.
258 // Eventually we should wait until a dictionary is requested several times
259 // before we even download it (so that we don't waste memory or bandwidth).
[email protected]ea8141e2011-10-05 13:12:51260 if (GetResponseHeaders()->EnumerateHeader(&iter, name, &url_text)) {
[email protected]d55ad15d2009-02-17 19:40:50261 // request_->url() won't be valid in the destructor, so we use an
262 // alternate copy.
[email protected]dd29bcd72011-03-24 00:03:44263 DCHECK_EQ(request_->url(), request_info_.url);
[email protected]d55ad15d2009-02-17 19:40:50264 // Resolve suggested URL relative to request url.
265 sdch_dictionary_url_ = request_info_.url.Resolve(url_text);
[email protected]60889422008-09-23 01:18:16266 }
267 }
268
[email protected]0757e7702009-03-27 04:00:22269 // The HTTP transaction may be restarted several times for the purposes
270 // of sending authorization information. Each time it restarts, we get
271 // notified of the headers completion so that we can update the cookie store.
272 if (transaction_->IsReadyToRestartForAuth()) {
273 DCHECK(!response_info_->auth_challenge.get());
[email protected]87a09a92011-07-14 15:50:50274 // TODO(battre): This breaks the webrequest API for
275 // URLRequestTestHTTP.BasicAuthWithCookies
[email protected]5796dc942011-07-14 19:26:10276 // where OnBeforeSendHeaders -> OnSendHeaders -> OnBeforeSendHeaders
[email protected]87a09a92011-07-14 15:50:50277 // occurs.
[email protected]f3cf9802011-10-28 18:44:58278 RestartTransactionWithAuth(AuthCredentials());
[email protected]0757e7702009-03-27 04:00:22279 return;
280 }
281
[email protected]4f5656c62010-12-13 10:47:09282 URLRequestJob::NotifyHeadersComplete();
initial.commit586acc5fe2008-07-26 22:42:52283}
284
[email protected]85c1dce2011-07-06 12:01:29285void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) {
[email protected]bbaea8f2011-06-24 00:11:01286 DoneWithRequest(FINISHED);
[email protected]dd29bcd72011-03-24 00:03:44287 URLRequestJob::NotifyDone(status);
288}
289
[email protected]175adac2008-07-30 17:28:04290void URLRequestHttpJob::DestroyTransaction() {
[email protected]c6a4eb92010-03-03 23:51:19291 DCHECK(transaction_.get());
initial.commit586acc5fe2008-07-26 22:42:52292
[email protected]bbaea8f2011-06-24 00:11:01293 DoneWithRequest(ABORTED);
[email protected]af4876d2008-10-21 23:10:57294 transaction_.reset();
initial.commit586acc5fe2008-07-26 22:42:52295 response_info_ = NULL;
[email protected]fa4332d2010-11-23 09:59:09296 context_ = NULL;
initial.commit586acc5fe2008-07-26 22:42:52297}
298
[email protected]175adac2008-07-30 17:28:04299void URLRequestHttpJob::StartTransaction() {
[email protected]636eccd2011-06-28 12:28:01300 if (request_->context() && request_->context()->network_delegate()) {
301 int rv = request_->context()->network_delegate()->NotifyBeforeSendHeaders(
[email protected]084262c2011-12-01 21:12:47302 request_, notify_before_headers_sent_callback_,
[email protected]636eccd2011-06-28 12:28:01303 &request_info_.extra_headers);
304 // If an extension blocks the request, we rely on the callback to
305 // StartTransactionInternal().
306 if (rv == ERR_IO_PENDING) {
[email protected]9c235f042011-08-10 22:28:21307 SetBlockedOnDelegate();
[email protected]636eccd2011-06-28 12:28:01308 return;
309 }
310 }
311 StartTransactionInternal();
312}
313
314void URLRequestHttpJob::NotifyBeforeSendHeadersCallback(int result) {
[email protected]9c235f042011-08-10 22:28:21315 SetUnblockedOnDelegate();
[email protected]636eccd2011-06-28 12:28:01316
317 if (result == OK) {
318 StartTransactionInternal();
319 } else {
[email protected]636eccd2011-06-28 12:28:01320 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
321 make_scoped_refptr(new NetLogStringParameter("source", "delegate")));
322 NotifyCanceled();
323 }
324}
325
326void URLRequestHttpJob::StartTransactionInternal() {
initial.commit586acc5fe2008-07-26 22:42:52327 // NOTE: This method assumes that request_info_ is already setup properly.
328
[email protected]34602282010-02-03 22:14:15329 // If we already have a transaction, then we should restart the transaction
[email protected]f3cf9802011-10-28 18:44:58330 // with auth provided by auth_credentials_.
initial.commit586acc5fe2008-07-26 22:42:52331
[email protected]99c07902010-08-17 18:59:52332 int rv;
[email protected]6b3f9642010-11-25 02:29:06333
[email protected]5796dc942011-07-14 19:26:10334 if (request_->context() && request_->context()->network_delegate()) {
335 request_->context()->network_delegate()->NotifySendHeaders(
336 request_, request_info_.extra_headers);
337 }
338
[email protected]34602282010-02-03 22:14:15339 if (transaction_.get()) {
[email protected]49639fa2011-12-20 23:22:41340 rv = transaction_->RestartWithAuth(auth_credentials_, start_callback_);
[email protected]f3cf9802011-10-28 18:44:58341 auth_credentials_ = AuthCredentials();
[email protected]34602282010-02-03 22:14:15342 } else {
343 DCHECK(request_->context());
344 DCHECK(request_->context()->http_transaction_factory());
initial.commit586acc5fe2008-07-26 22:42:52345
[email protected]99c07902010-08-17 18:59:52346 rv = request_->context()->http_transaction_factory()->CreateTransaction(
347 &transaction_);
[email protected]4f5656c62010-12-13 10:47:09348 if (rv == OK) {
[email protected]227b0e82011-03-25 21:11:53349 if (!URLRequestThrottlerManager::GetInstance()->enforce_throttling() ||
[email protected]8ec98cd22011-08-09 22:31:30350 !throttling_entry_->ShouldRejectRequest(request_info_.load_flags)) {
[email protected]227b0e82011-03-25 21:11:53351 rv = transaction_->Start(
[email protected]49639fa2011-12-20 23:22:41352 &request_info_, start_callback_, request_->net_log());
[email protected]bbaea8f2011-06-24 00:11:01353 start_time_ = base::TimeTicks::Now();
[email protected]227b0e82011-03-25 21:11:53354 } else {
355 // Special error code for the exponential back-off module.
356 rv = ERR_TEMPORARILY_THROTTLED;
357 }
[email protected]fa4332d2010-11-23 09:59:09358 // Make sure the context is alive for the duration of the
359 // transaction.
360 context_ = request_->context();
[email protected]34602282010-02-03 22:14:15361 }
initial.commit586acc5fe2008-07-26 22:42:52362 }
363
[email protected]4f5656c62010-12-13 10:47:09364 if (rv == ERR_IO_PENDING)
[email protected]34602282010-02-03 22:14:15365 return;
366
initial.commit586acc5fe2008-07-26 22:42:52367 // The transaction started synchronously, but we need to notify the
[email protected]4f5656c62010-12-13 10:47:09368 // URLRequest delegate via the message loop.
[email protected]00e48bf2010-12-03 06:15:42369 MessageLoop::current()->PostTask(
370 FROM_HERE,
[email protected]098b29112011-12-20 21:12:34371 base::Bind(&URLRequestHttpJob::OnStartCompleted,
372 weak_factory_.GetWeakPtr(), rv));
initial.commit586acc5fe2008-07-26 22:42:52373}
374
[email protected]175adac2008-07-30 17:28:04375void URLRequestHttpJob::AddExtraHeaders() {
[email protected]c7bef94c2011-06-21 18:05:51376 // Supply Accept-Encoding field only if it is not already provided.
377 // It should be provided IF the content is known to have restrictions on
378 // potential encoding, such as streaming multi-media.
379 // For details see bug 47381.
380 // TODO(jar, enal): jpeg files etc. should set up a request header if
381 // possible. Right now it is done only by buffered_resource_loader and
382 // simple_data_source.
383 if (!request_info_.extra_headers.HasHeader(
384 HttpRequestHeaders::kAcceptEncoding)) {
385 bool advertise_sdch = SdchManager::Global() &&
386 SdchManager::Global()->IsInSupportedDomain(request_->url());
387 std::string avail_dictionaries;
388 if (advertise_sdch) {
389 SdchManager::Global()->GetAvailDictionaryList(request_->url(),
390 &avail_dictionaries);
[email protected]5b90b5d2009-04-30 23:06:01391
[email protected]c7bef94c2011-06-21 18:05:51392 // The AllowLatencyExperiment() is only true if we've successfully done a
393 // full SDCH compression recently in this browser session for this host.
394 // Note that for this path, there might be no applicable dictionaries,
395 // and hence we can't participate in the experiment.
396 if (!avail_dictionaries.empty() &&
397 SdchManager::Global()->AllowLatencyExperiment(request_->url())) {
398 // We are participating in the test (or control), and hence we'll
399 // eventually record statistics via either SDCH_EXPERIMENT_DECODE or
400 // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data.
401 packet_timing_enabled_ = true;
402 if (base::RandDouble() < .01) {
403 sdch_test_control_ = true; // 1% probability.
404 advertise_sdch = false;
405 } else {
406 sdch_test_activated_ = true;
407 }
[email protected]5b90b5d2009-04-30 23:06:01408 }
409 }
[email protected]5b90b5d2009-04-30 23:06:01410
[email protected]c7bef94c2011-06-21 18:05:51411 // Supply Accept-Encoding headers first so that it is more likely that they
412 // will be in the first transmitted packet. This can sometimes make it
413 // easier to filter and analyze the streams to assure that a proxy has not
414 // damaged these headers. Some proxies deliberately corrupt Accept-Encoding
415 // headers.
416 if (!advertise_sdch) {
417 // Tell the server what compression formats we support (other than SDCH).
[email protected]8c76ae22010-04-20 22:15:43418 request_info_.extra_headers.SetHeader(
[email protected]c7bef94c2011-06-21 18:05:51419 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate");
420 } else {
421 // Include SDCH in acceptable list.
422 request_info_.extra_headers.SetHeader(
423 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch");
424 if (!avail_dictionaries.empty()) {
425 request_info_.extra_headers.SetHeader(
426 kAvailDictionaryHeader,
427 avail_dictionaries);
428 sdch_dictionary_advertised_ = true;
429 // Since we're tagging this transaction as advertising a dictionary,
430 // we'll definitely employ an SDCH filter (or tentative sdch filter)
431 // when we get a response. When done, we'll record histograms via
432 // SDCH_DECODE or SDCH_PASSTHROUGH. Hence we need to record packet
433 // arrival times.
434 packet_timing_enabled_ = true;
435 }
[email protected]423041b2008-10-27 17:39:28436 }
[email protected]423041b2008-10-27 17:39:28437 }
438
[email protected]ede96662011-07-14 12:34:18439 const URLRequestContext* context = request_->context();
initial.commit586acc5fe2008-07-26 22:42:52440 if (context) {
[email protected]09a362d32009-09-24 18:01:33441 // Only add default Accept-Language and Accept-Charset if the request
442 // didn't have them specified.
[email protected]6dae6b82011-03-30 00:35:34443 if (!context->accept_language().empty()) {
444 request_info_.extra_headers.SetHeaderIfMissing(
445 HttpRequestHeaders::kAcceptLanguage,
446 context->accept_language());
447 }
448 if (!context->accept_charset().empty()) {
449 request_info_.extra_headers.SetHeaderIfMissing(
450 HttpRequestHeaders::kAcceptCharset,
451 context->accept_charset());
452 }
initial.commit586acc5fe2008-07-26 22:42:52453 }
initial.commit586acc5fe2008-07-26 22:42:52454}
455
[email protected]34602282010-02-03 22:14:15456void URLRequestHttpJob::AddCookieHeaderAndStart() {
457 // No matter what, we want to report our status as IO pending since we will
458 // be notifying our consumer asynchronously via OnStartCompleted.
459 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
[email protected]861fcd52009-08-26 02:33:46460
[email protected]ed24fad2011-05-10 22:44:01461 // If the request was destroyed, then there is no more work to do.
462 if (!request_)
463 return;
[email protected]34602282010-02-03 22:14:15464
[email protected]03d845f2011-07-29 19:06:26465 CookieStore* cookie_store =
466 request_->context()->cookie_store();
[email protected]1a6fff52011-10-20 21:00:16467 if (cookie_store && !(request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES)) {
468 net::CookieMonster* cookie_monster = cookie_store->GetCookieMonster();
469 if (cookie_monster) {
470 cookie_monster->GetAllCookiesForURLAsync(
471 request_->url(),
472 base::Bind(&URLRequestHttpJob::CheckCookiePolicyAndLoad,
[email protected]098b29112011-12-20 21:12:34473 weak_factory_.GetWeakPtr()));
[email protected]1a6fff52011-10-20 21:00:16474 } else {
475 DoLoadCookies();
476 }
[email protected]03d845f2011-07-29 19:06:26477 } else {
478 DoStartTransaction();
479 }
480}
481
[email protected]1a6fff52011-10-20 21:00:16482void URLRequestHttpJob::DoLoadCookies() {
483 CookieOptions options;
484 options.set_include_httponly();
485 request_->context()->cookie_store()->GetCookiesWithInfoAsync(
486 request_->url(), options,
487 base::Bind(&URLRequestHttpJob::OnCookiesLoaded,
[email protected]098b29112011-12-20 21:12:34488 weak_factory_.GetWeakPtr()));
[email protected]1a6fff52011-10-20 21:00:16489}
490
[email protected]03d845f2011-07-29 19:06:26491void URLRequestHttpJob::CheckCookiePolicyAndLoad(
492 const CookieList& cookie_list) {
[email protected]1a6fff52011-10-20 21:00:16493 if (CanGetCookies(cookie_list))
494 DoLoadCookies();
495 else
[email protected]54f4c9362011-07-25 21:54:46496 DoStartTransaction();
[email protected]54f4c9362011-07-25 21:54:46497}
498
499void URLRequestHttpJob::OnCookiesLoaded(
[email protected]218aa6a12011-09-13 17:38:38500 const std::string& cookie_line,
501 const std::vector<net::CookieStore::CookieInfo>& cookie_infos) {
502 if (!cookie_line.empty()) {
[email protected]54f4c9362011-07-25 21:54:46503 request_info_.extra_headers.SetHeader(
[email protected]218aa6a12011-09-13 17:38:38504 HttpRequestHeaders::kCookie, cookie_line);
[email protected]54f4c9362011-07-25 21:54:46505 }
506 if (URLRequest::AreMacCookiesEnabled())
[email protected]218aa6a12011-09-13 17:38:38507 AddAuthorizationHeader(cookie_infos, &request_info_);
[email protected]54f4c9362011-07-25 21:54:46508 DoStartTransaction();
509}
510
511void URLRequestHttpJob::DoStartTransaction() {
[email protected]03d845f2011-07-29 19:06:26512 // We may have been canceled while retrieving cookies.
[email protected]9025016c2011-05-12 15:51:23513 if (GetStatus().is_success()) {
514 StartTransaction();
515 } else {
516 NotifyCanceled();
517 }
[email protected]0757e7702009-03-27 04:00:22518}
519
[email protected]ea8141e2011-10-05 13:12:51520void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete(int result) {
521 if (result != net::OK) {
522 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
523 make_scoped_refptr(new NetLogStringParameter("source", "delegate")));
524 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
525 return;
526 }
527
[email protected]34602282010-02-03 22:14:15528 DCHECK(transaction_.get());
initial.commit586acc5fe2008-07-26 22:42:52529
[email protected]4f5656c62010-12-13 10:47:09530 const HttpResponseInfo* response_info = transaction_->GetResponseInfo();
[email protected]34602282010-02-03 22:14:15531 DCHECK(response_info);
532
533 response_cookies_.clear();
534 response_cookies_save_index_ = 0;
535
[email protected]ea8141e2011-10-05 13:12:51536 FetchResponseCookies(&response_cookies_);
[email protected]34602282010-02-03 22:14:15537
538 // Now, loop over the response cookies, and attempt to persist each.
539 SaveNextCookie();
540}
541
542void URLRequestHttpJob::SaveNextCookie() {
543 if (response_cookies_save_index_ == response_cookies_.size()) {
544 response_cookies_.clear();
545 response_cookies_save_index_ = 0;
546 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status
547 NotifyHeadersComplete();
548 return;
549 }
550
551 // No matter what, we want to report our status as IO pending since we will
552 // be notifying our consumer asynchronously via OnStartCompleted.
553 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
554
[email protected]ed24fad2011-05-10 22:44:01555 CookieOptions options;
[email protected]7c75b4c2011-07-02 14:38:05556 if (!(request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) &&
[email protected]a83dd332011-07-13 10:41:01557 request_->context()->cookie_store()) {
[email protected]ed24fad2011-05-10 22:44:01558 CookieOptions options;
559 options.set_include_httponly();
[email protected]a83dd332011-07-13 10:41:01560 if (CanSetCookie(
561 response_cookies_[response_cookies_save_index_], &options)) {
[email protected]54f4c9362011-07-25 21:54:46562 request_->context()->cookie_store()->SetCookieWithOptionsAsync(
[email protected]ed24fad2011-05-10 22:44:01563 request_->url(), response_cookies_[response_cookies_save_index_],
[email protected]f6fc8f22011-07-27 17:54:25564 options, base::Bind(&URLRequestHttpJob::OnCookieSaved,
[email protected]098b29112011-12-20 21:12:34565 weak_factory_.GetWeakPtr()));
[email protected]54f4c9362011-07-25 21:54:46566 return;
[email protected]ed24fad2011-05-10 22:44:01567 }
[email protected]34602282010-02-03 22:14:15568 }
[email protected]54f4c9362011-07-25 21:54:46569 CookieHandled();
570}
[email protected]34602282010-02-03 22:14:15571
[email protected]54f4c9362011-07-25 21:54:46572void URLRequestHttpJob::OnCookieSaved(bool cookie_status) {
573 CookieHandled();
574}
575
576void URLRequestHttpJob::CookieHandled() {
[email protected]9025016c2011-05-12 15:51:23577 response_cookies_save_index_++;
578 // We may have been canceled within OnSetCookie.
579 if (GetStatus().is_success()) {
580 SaveNextCookie();
581 } else {
582 NotifyCanceled();
583 }
[email protected]34602282010-02-03 22:14:15584}
585
586void URLRequestHttpJob::FetchResponseCookies(
[email protected]34602282010-02-03 22:14:15587 std::vector<std::string>* cookies) {
[email protected]264300242011-11-07 06:03:30588 const std::string name = "Set-Cookie";
initial.commit586acc5fe2008-07-26 22:42:52589 std::string value;
590
591 void* iter = NULL;
[email protected]ea8141e2011-10-05 13:12:51592 HttpResponseHeaders* headers = GetResponseHeaders();
593 while (headers->EnumerateHeader(&iter, name, &value)) {
[email protected]2adf2882010-09-27 08:30:37594 if (!value.empty())
595 cookies->push_back(value);
596 }
initial.commit586acc5fe2008-07-26 22:42:52597}
[email protected]a9cea7542009-05-20 04:30:23598
[email protected]77f6fb432009-09-05 14:21:09599void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() {
[email protected]a9cea7542009-05-20 04:30:23600 DCHECK(response_info_);
601
[email protected]ede96662011-07-14 12:34:18602 const URLRequestContext* ctx = request_->context();
[email protected]326e6792009-12-11 21:04:42603 if (!ctx || !ctx->transport_security_state())
[email protected]a9cea7542009-05-20 04:30:23604 return;
605
[email protected]326e6792009-12-11 21:04:42606 const bool https = response_info_->ssl_info.is_valid();
607 const bool valid_https =
[email protected]4f5656c62010-12-13 10:47:09608 https && !IsCertStatusError(response_info_->ssl_info.cert_status);
[email protected]326e6792009-12-11 21:04:42609
[email protected]264300242011-11-07 06:03:30610 const std::string name = "Strict-Transport-Security";
[email protected]a9cea7542009-05-20 04:30:23611 std::string value;
612
[email protected]326e6792009-12-11 21:04:42613 int max_age;
614 bool include_subdomains;
615
[email protected]ea8141e2011-10-05 13:12:51616 HttpResponseHeaders* headers = GetResponseHeaders();
617
[email protected]a9cea7542009-05-20 04:30:23618 void* iter = NULL;
[email protected]ea8141e2011-10-05 13:12:51619 while (headers->EnumerateHeader(&iter, name, &value)) {
[email protected]4f5656c62010-12-13 10:47:09620 const bool ok = TransportSecurityState::ParseHeader(
[email protected]326e6792009-12-11 21:04:42621 value, &max_age, &include_subdomains);
622 if (!ok)
623 continue;
624 // We will only accept strict mode if we saw the header from an HTTPS
625 // connection with no certificate problems.
626 if (!valid_https)
627 continue;
628 base::Time current_time(base::Time::Now());
629 base::TimeDelta max_age_delta = base::TimeDelta::FromSeconds(max_age);
630
[email protected]4f5656c62010-12-13 10:47:09631 TransportSecurityState::DomainState domain_state;
[email protected]326e6792009-12-11 21:04:42632 domain_state.expiry = current_time + max_age_delta;
[email protected]4f5656c62010-12-13 10:47:09633 domain_state.mode = TransportSecurityState::DomainState::MODE_STRICT;
[email protected]326e6792009-12-11 21:04:42634 domain_state.include_subdomains = include_subdomains;
635
636 ctx->transport_security_state()->EnableHost(request_info_.url.host(),
637 domain_state);
638 }
[email protected]a9cea7542009-05-20 04:30:23639}
[email protected]4f5656c62010-12-13 10:47:09640
[email protected]5394e422011-01-20 22:07:43641void URLRequestHttpJob::OnStartCompleted(int result) {
[email protected]ec23f522011-02-22 21:01:38642 RecordTimer();
643
[email protected]5394e422011-01-20 22:07:43644 // If the request was destroyed, then there is no more work to do.
[email protected]a83dd332011-07-13 10:41:01645 if (!request_)
[email protected]5394e422011-01-20 22:07:43646 return;
647
648 // If the transaction was destroyed, then the job was cancelled, and
649 // we can just ignore this notification.
650 if (!transaction_.get())
651 return;
652
653 // Clear the IO_PENDING status
654 SetStatus(URLRequestStatus());
655
[email protected]f3572592011-12-12 21:36:31656 if (result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN &&
657 transaction_->GetResponseInfo() != NULL) {
658 FraudulentCertificateReporter* reporter =
659 context_->fraudulent_certificate_reporter();
660 if (reporter != NULL) {
661 const SSLInfo& ssl_info = transaction_->GetResponseInfo()->ssl_info;
[email protected]551ff5172011-11-10 18:35:31662 bool sni_available = SSLConfigService::IsSNIAvailable(
663 context_->ssl_config_service());
[email protected]f3572592011-12-12 21:36:31664 const std::string& host = request_->url().host();
[email protected]b7996452011-10-31 19:30:56665
[email protected]f3572592011-12-12 21:36:31666 reporter->SendReport(host, ssl_info, sni_available);
[email protected]381e8852011-04-14 14:30:58667 }
668 }
[email protected]551ff5172011-11-10 18:35:31669
[email protected]5394e422011-01-20 22:07:43670 if (result == OK) {
[email protected]ea8141e2011-10-05 13:12:51671 scoped_refptr<HttpResponseHeaders> headers = GetResponseHeaders();
672 if (request_->context() && request_->context()->network_delegate()) {
673 // Note that |this| may not be deleted until
674 // |on_headers_received_callback_| or
675 // |NetworkDelegate::URLRequestDestroyed()| has been called.
676 int error = request_->context()->network_delegate()->
[email protected]084262c2011-12-01 21:12:47677 NotifyHeadersReceived(request_, on_headers_received_callback_,
[email protected]ea8141e2011-10-05 13:12:51678 headers, &override_response_headers_);
679 if (error != net::OK) {
680 if (error == net::ERR_IO_PENDING) {
681 awaiting_callback_ = true;
682 request_->net_log().BeginEvent(
683 NetLog::TYPE_URL_REQUEST_BLOCKED_ON_DELEGATE, NULL);
684 } else {
685 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
686 make_scoped_refptr(
687 new NetLogStringParameter("source", "delegate")));
688 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error));
689 }
690 return;
691 }
692 }
693
694 SaveCookiesAndNotifyHeadersComplete(net::OK);
[email protected]e5624f02011-09-27 19:43:53695 } else if (IsCertificateError(result)) {
[email protected]5394e422011-01-20 22:07:43696 // We encountered an SSL certificate error. Ask our delegate to decide
697 // what we should do.
[email protected]e5624f02011-09-27 19:43:53698
699 TransportSecurityState::DomainState domain_state;
700 const bool is_hsts_host =
701 context_->transport_security_state() &&
[email protected]dc694c32011-11-29 20:00:49702 context_->transport_security_state()->GetDomainState(
[email protected]e5624f02011-09-27 19:43:53703 &domain_state, request_info_.url.host(),
[email protected]dc694c32011-11-29 20:00:49704 SSLConfigService::IsSNIAvailable(context_->ssl_config_service())) &&
705 domain_state.ShouldCertificateErrorsBeFatal();
[email protected]e5624f02011-09-27 19:43:53706 NotifySSLCertificateError(transaction_->GetResponseInfo()->ssl_info,
707 is_hsts_host);
[email protected]5394e422011-01-20 22:07:43708 } else if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED) {
[email protected]a83dd332011-07-13 10:41:01709 NotifyCertificateRequested(
710 transaction_->GetResponseInfo()->cert_request_info);
[email protected]5394e422011-01-20 22:07:43711 } else {
712 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
713 }
714}
715
[email protected]ea8141e2011-10-05 13:12:51716void URLRequestHttpJob::OnHeadersReceivedCallback(int result) {
717 request_->net_log().EndEvent(
718 NetLog::TYPE_URL_REQUEST_BLOCKED_ON_DELEGATE, NULL);
719 awaiting_callback_ = false;
720 SaveCookiesAndNotifyHeadersComplete(result);
721}
722
[email protected]5394e422011-01-20 22:07:43723void URLRequestHttpJob::OnReadCompleted(int result) {
724 read_in_progress_ = false;
725
[email protected]f001bd6a2011-12-08 04:31:37726 if (ShouldFixMismatchedContentLength(result))
727 result = 0;
728
[email protected]5394e422011-01-20 22:07:43729 if (result == 0) {
730 NotifyDone(URLRequestStatus());
731 } else if (result < 0) {
732 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
733 } else {
734 // Clear the IO_PENDING status
735 SetStatus(URLRequestStatus());
736 }
737
738 NotifyReadComplete(result);
739}
740
[email protected]5394e422011-01-20 22:07:43741void URLRequestHttpJob::RestartTransactionWithAuth(
[email protected]f3cf9802011-10-28 18:44:58742 const AuthCredentials& credentials) {
743 auth_credentials_ = credentials;
[email protected]5394e422011-01-20 22:07:43744
745 // These will be reset in OnStartCompleted.
746 response_info_ = NULL;
747 response_cookies_.clear();
748
[email protected]ec23f522011-02-22 21:01:38749 ResetTimer();
750
[email protected]5394e422011-01-20 22:07:43751 // Update the cookies, since the cookie store may have been updated from the
752 // headers in the 401/407. Since cookies were already appended to
753 // extra_headers, we need to strip them out before adding them again.
[email protected]ea8141e2011-10-05 13:12:51754 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kCookie);
[email protected]5394e422011-01-20 22:07:43755
756 AddCookieHeaderAndStart();
757}
758
759void URLRequestHttpJob::SetUpload(UploadData* upload) {
760 DCHECK(!transaction_.get()) << "cannot change once started";
761 request_info_.upload_data = upload;
762}
763
764void URLRequestHttpJob::SetExtraRequestHeaders(
765 const HttpRequestHeaders& headers) {
766 DCHECK(!transaction_.get()) << "cannot change once started";
767 request_info_.extra_headers.CopyFrom(headers);
768}
769
770void URLRequestHttpJob::Start() {
771 DCHECK(!transaction_.get());
772
773 // Ensure that we do not send username and password fields in the referrer.
774 GURL referrer(request_->GetSanitizedReferrer());
775
776 request_info_.url = request_->url();
[email protected]5394e422011-01-20 22:07:43777 request_info_.method = request_->method();
778 request_info_.load_flags = request_->load_flags();
779 request_info_.priority = request_->priority();
[email protected]4875ba12011-03-30 22:31:51780 request_info_.request_id = request_->identifier();
[email protected]5394e422011-01-20 22:07:43781
[email protected]c10450102011-06-27 09:06:16782 // Strip Referer from request_info_.extra_headers to prevent, e.g., plugins
783 // from overriding headers that are controlled using other means. Otherwise a
784 // plugin could set a referrer although sending the referrer is inhibited.
785 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kReferer);
786
787 // Our consumer should have made sure that this is a safe referrer. See for
788 // instance WebCore::FrameLoader::HideReferrer.
789 if (referrer.is_valid()) {
790 request_info_.extra_headers.SetHeader(HttpRequestHeaders::kReferer,
791 referrer.spec());
792 }
793
[email protected]5394e422011-01-20 22:07:43794 if (request_->context()) {
[email protected]50ba404b2011-01-21 13:38:45795 request_info_.extra_headers.SetHeaderIfMissing(
[email protected]5394e422011-01-20 22:07:43796 HttpRequestHeaders::kUserAgent,
797 request_->context()->GetUserAgent(request_->url()));
798 }
799
800 AddExtraHeaders();
801 AddCookieHeaderAndStart();
802}
803
804void URLRequestHttpJob::Kill() {
805 if (!transaction_.get())
806 return;
807
[email protected]098b29112011-12-20 21:12:34808 weak_factory_.InvalidateWeakPtrs();
[email protected]5394e422011-01-20 22:07:43809 DestroyTransaction();
810 URLRequestJob::Kill();
811}
812
813LoadState URLRequestHttpJob::GetLoadState() const {
814 return transaction_.get() ?
815 transaction_->GetLoadState() : LOAD_STATE_IDLE;
816}
817
818uint64 URLRequestHttpJob::GetUploadProgress() const {
819 return transaction_.get() ? transaction_->GetUploadProgress() : 0;
820}
821
822bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const {
823 DCHECK(transaction_.get());
824
825 if (!response_info_)
826 return false;
827
[email protected]ea8141e2011-10-05 13:12:51828 return GetResponseHeaders()->GetMimeType(mime_type);
[email protected]5394e422011-01-20 22:07:43829}
830
831bool URLRequestHttpJob::GetCharset(std::string* charset) {
832 DCHECK(transaction_.get());
833
834 if (!response_info_)
835 return false;
836
[email protected]ea8141e2011-10-05 13:12:51837 return GetResponseHeaders()->GetCharset(charset);
[email protected]5394e422011-01-20 22:07:43838}
839
840void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) {
841 DCHECK(request_);
842 DCHECK(transaction_.get());
843
[email protected]ea8141e2011-10-05 13:12:51844 if (response_info_) {
[email protected]5394e422011-01-20 22:07:43845 *info = *response_info_;
[email protected]ea8141e2011-10-05 13:12:51846 if (override_response_headers_)
847 info->headers = override_response_headers_;
848 }
[email protected]5394e422011-01-20 22:07:43849}
850
[email protected]ea8141e2011-10-05 13:12:51851bool URLRequestHttpJob::GetResponseCookies(std::vector<std::string>* cookies) {
[email protected]5394e422011-01-20 22:07:43852 DCHECK(transaction_.get());
853
854 if (!response_info_)
855 return false;
856
857 // TODO(darin): Why are we extracting response cookies again? Perhaps we
858 // should just leverage response_cookies_.
859
860 cookies->clear();
[email protected]ea8141e2011-10-05 13:12:51861 FetchResponseCookies(cookies);
[email protected]5394e422011-01-20 22:07:43862 return true;
863}
864
865int URLRequestHttpJob::GetResponseCode() const {
866 DCHECK(transaction_.get());
867
868 if (!response_info_)
869 return -1;
870
[email protected]ea8141e2011-10-05 13:12:51871 return GetResponseHeaders()->response_code();
[email protected]5394e422011-01-20 22:07:43872}
873
[email protected]5a3b4d32011-03-17 01:24:05874Filter* URLRequestHttpJob::SetupFilter() const {
[email protected]5394e422011-01-20 22:07:43875 DCHECK(transaction_.get());
876 if (!response_info_)
[email protected]5a3b4d32011-03-17 01:24:05877 return NULL;
[email protected]5394e422011-01-20 22:07:43878
[email protected]5a3b4d32011-03-17 01:24:05879 std::vector<Filter::FilterType> encoding_types;
[email protected]5394e422011-01-20 22:07:43880 std::string encoding_type;
[email protected]ea8141e2011-10-05 13:12:51881 HttpResponseHeaders* headers = GetResponseHeaders();
[email protected]5394e422011-01-20 22:07:43882 void* iter = NULL;
[email protected]ea8141e2011-10-05 13:12:51883 while (headers->EnumerateHeader(&iter, "Content-Encoding", &encoding_type)) {
[email protected]5a3b4d32011-03-17 01:24:05884 encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type));
[email protected]5394e422011-01-20 22:07:43885 }
886
[email protected]46668fe52011-05-04 19:03:23887 if (filter_context_->IsSdchResponse()) {
888 // We are wary of proxies that discard or damage SDCH encoding. If a server
889 // explicitly states that this is not SDCH content, then we can correct our
890 // assumption that this is an SDCH response, and avoid the need to recover
891 // as though the content is corrupted (when we discover it is not SDCH
892 // encoded).
893 std::string sdch_response_status;
894 iter = NULL;
[email protected]ea8141e2011-10-05 13:12:51895 while (headers->EnumerateHeader(&iter, "X-Sdch-Encode",
896 &sdch_response_status)) {
[email protected]46668fe52011-05-04 19:03:23897 if (sdch_response_status == "0") {
898 filter_context_->ResetSdchResponseToFalse();
899 break;
900 }
901 }
902 }
903
[email protected]5394e422011-01-20 22:07:43904 // Even if encoding types are empty, there is a chance that we need to add
905 // some decoding, as some proxies strip encoding completely. In such cases,
906 // we may need to add (for example) SDCH filtering (when the context suggests
907 // it is appropriate).
[email protected]2e92354c2011-03-25 20:49:53908 Filter::FixupEncodingTypes(*filter_context_, &encoding_types);
[email protected]5394e422011-01-20 22:07:43909
[email protected]5a3b4d32011-03-17 01:24:05910 return !encoding_types.empty()
[email protected]2e92354c2011-03-25 20:49:53911 ? Filter::Factory(encoding_types, *filter_context_) : NULL;
[email protected]5394e422011-01-20 22:07:43912}
913
[email protected]5394e422011-01-20 22:07:43914bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) {
915 // We only allow redirects to certain "safe" protocols. This does not
916 // restrict redirects to externally handled protocols. Our consumer would
917 // need to take care of those.
918
919 if (!URLRequest::IsHandledURL(location))
920 return true;
921
922 static const char* kSafeSchemes[] = {
923 "http",
924 "https",
925 "ftp"
926 };
927
928 for (size_t i = 0; i < arraysize(kSafeSchemes); ++i) {
929 if (location.SchemeIs(kSafeSchemes[i]))
930 return true;
931 }
932
933 return false;
934}
935
936bool URLRequestHttpJob::NeedsAuth() {
937 int code = GetResponseCode();
938 if (code == -1)
939 return false;
940
941 // Check if we need either Proxy or WWW Authentication. This could happen
942 // because we either provided no auth info, or provided incorrect info.
943 switch (code) {
944 case 407:
945 if (proxy_auth_state_ == AUTH_STATE_CANCELED)
946 return false;
947 proxy_auth_state_ = AUTH_STATE_NEED_AUTH;
948 return true;
949 case 401:
950 if (server_auth_state_ == AUTH_STATE_CANCELED)
951 return false;
952 server_auth_state_ = AUTH_STATE_NEED_AUTH;
953 return true;
954 }
955 return false;
956}
957
958void URLRequestHttpJob::GetAuthChallengeInfo(
959 scoped_refptr<AuthChallengeInfo>* result) {
960 DCHECK(transaction_.get());
961 DCHECK(response_info_);
962
963 // sanity checks:
964 DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH ||
965 server_auth_state_ == AUTH_STATE_NEED_AUTH);
[email protected]ea8141e2011-10-05 13:12:51966 DCHECK(GetResponseHeaders()->response_code() == 401 ||
967 GetResponseHeaders()->response_code() == 407);
[email protected]5394e422011-01-20 22:07:43968
969 *result = response_info_->auth_challenge;
970}
971
[email protected]f3cf9802011-10-28 18:44:58972void URLRequestHttpJob::SetAuth(const AuthCredentials& credentials) {
[email protected]5394e422011-01-20 22:07:43973 DCHECK(transaction_.get());
974
975 // Proxy gets set first, then WWW.
976 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
977 proxy_auth_state_ = AUTH_STATE_HAVE_AUTH;
978 } else {
[email protected]dd29bcd72011-03-24 00:03:44979 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH);
[email protected]5394e422011-01-20 22:07:43980 server_auth_state_ = AUTH_STATE_HAVE_AUTH;
981 }
982
[email protected]f3cf9802011-10-28 18:44:58983 RestartTransactionWithAuth(credentials);
[email protected]5394e422011-01-20 22:07:43984}
985
986void URLRequestHttpJob::CancelAuth() {
987 // Proxy gets set first, then WWW.
988 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
989 proxy_auth_state_ = AUTH_STATE_CANCELED;
990 } else {
[email protected]dd29bcd72011-03-24 00:03:44991 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH);
[email protected]5394e422011-01-20 22:07:43992 server_auth_state_ = AUTH_STATE_CANCELED;
993 }
994
995 // These will be reset in OnStartCompleted.
996 response_info_ = NULL;
997 response_cookies_.clear();
998
[email protected]ec23f522011-02-22 21:01:38999 ResetTimer();
1000
[email protected]5394e422011-01-20 22:07:431001 // OK, let the consumer read the error page...
1002 //
1003 // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false,
1004 // which will cause the consumer to receive OnResponseStarted instead of
1005 // OnAuthRequired.
1006 //
1007 // We have to do this via InvokeLater to avoid "recursing" the consumer.
1008 //
1009 MessageLoop::current()->PostTask(
1010 FROM_HERE,
[email protected]098b29112011-12-20 21:12:341011 base::Bind(&URLRequestHttpJob::OnStartCompleted,
1012 weak_factory_.GetWeakPtr(), OK));
[email protected]5394e422011-01-20 22:07:431013}
1014
1015void URLRequestHttpJob::ContinueWithCertificate(
1016 X509Certificate* client_cert) {
1017 DCHECK(transaction_.get());
1018
1019 DCHECK(!response_info_) << "should not have a response yet";
1020
[email protected]ec23f522011-02-22 21:01:381021 ResetTimer();
1022
[email protected]5394e422011-01-20 22:07:431023 // No matter what, we want to report our status as IO pending since we will
1024 // be notifying our consumer asynchronously via OnStartCompleted.
1025 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
1026
[email protected]49639fa2011-12-20 23:22:411027 int rv = transaction_->RestartWithCertificate(client_cert, start_callback_);
[email protected]5394e422011-01-20 22:07:431028 if (rv == ERR_IO_PENDING)
1029 return;
1030
1031 // The transaction started synchronously, but we need to notify the
1032 // URLRequest delegate via the message loop.
1033 MessageLoop::current()->PostTask(
1034 FROM_HERE,
[email protected]098b29112011-12-20 21:12:341035 base::Bind(&URLRequestHttpJob::OnStartCompleted,
1036 weak_factory_.GetWeakPtr(), rv));
[email protected]5394e422011-01-20 22:07:431037}
1038
1039void URLRequestHttpJob::ContinueDespiteLastError() {
1040 // If the transaction was destroyed, then the job was cancelled.
1041 if (!transaction_.get())
1042 return;
1043
1044 DCHECK(!response_info_) << "should not have a response yet";
1045
[email protected]ec23f522011-02-22 21:01:381046 ResetTimer();
1047
[email protected]5394e422011-01-20 22:07:431048 // No matter what, we want to report our status as IO pending since we will
1049 // be notifying our consumer asynchronously via OnStartCompleted.
1050 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
1051
[email protected]49639fa2011-12-20 23:22:411052 int rv = transaction_->RestartIgnoringLastError(start_callback_);
[email protected]5394e422011-01-20 22:07:431053 if (rv == ERR_IO_PENDING)
1054 return;
1055
1056 // The transaction started synchronously, but we need to notify the
1057 // URLRequest delegate via the message loop.
1058 MessageLoop::current()->PostTask(
1059 FROM_HERE,
[email protected]098b29112011-12-20 21:12:341060 base::Bind(&URLRequestHttpJob::OnStartCompleted,
1061 weak_factory_.GetWeakPtr(), rv));
[email protected]5394e422011-01-20 22:07:431062}
1063
[email protected]f001bd6a2011-12-08 04:31:371064bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const {
1065 // Some servers send the body compressed, but specify the content length as
1066 // the uncompressed size. Although this violates the HTTP spec we want to
1067 // support it (as IE and FireFox do), but *only* for an exact match.
1068 // See http://crbug.com/79694.
1069 if (rv == net::ERR_CONNECTION_CLOSED) {
1070 if (request_ && request_->response_headers()) {
1071 int64 expected_length = request_->response_headers()->GetContentLength();
1072 VLOG(1) << __FUNCTION__ << "() "
1073 << "\"" << request_->url().spec() << "\""
1074 << " content-length = " << expected_length
1075 << " pre total = " << prefilter_bytes_read()
1076 << " post total = " << postfilter_bytes_read();
1077 if (postfilter_bytes_read() == expected_length) {
1078 // Clear the error.
1079 return true;
1080 }
1081 }
1082 }
1083 return false;
1084}
1085
[email protected]5394e422011-01-20 22:07:431086bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size,
[email protected]b7996452011-10-31 19:30:561087 int* bytes_read) {
[email protected]5394e422011-01-20 22:07:431088 DCHECK_NE(buf_size, 0);
1089 DCHECK(bytes_read);
1090 DCHECK(!read_in_progress_);
1091
[email protected]49639fa2011-12-20 23:22:411092 int rv = transaction_->Read(
1093 buf, buf_size,
1094 base::Bind(&URLRequestHttpJob::OnReadCompleted, base::Unretained(this)));
[email protected]85c1dce2011-07-06 12:01:291095
[email protected]f001bd6a2011-12-08 04:31:371096 if (ShouldFixMismatchedContentLength(rv))
1097 rv = 0;
1098
[email protected]5394e422011-01-20 22:07:431099 if (rv >= 0) {
1100 *bytes_read = rv;
[email protected]bbaea8f2011-06-24 00:11:011101 if (!rv)
1102 DoneWithRequest(FINISHED);
[email protected]5394e422011-01-20 22:07:431103 return true;
1104 }
1105
1106 if (rv == ERR_IO_PENDING) {
1107 read_in_progress_ = true;
1108 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
1109 } else {
1110 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
1111 }
1112
1113 return false;
1114}
1115
1116void URLRequestHttpJob::StopCaching() {
1117 if (transaction_.get())
1118 transaction_->StopCaching();
1119}
1120
[email protected]5c04f722011-08-12 17:52:471121void URLRequestHttpJob::DoneReading() {
1122 if (transaction_.get())
1123 transaction_->DoneReading();
1124 DoneWithRequest(FINISHED);
1125}
1126
[email protected]6d81b482011-02-22 19:47:191127HostPortPair URLRequestHttpJob::GetSocketAddress() const {
1128 return response_info_ ? response_info_->socket_address : HostPortPair();
1129}
1130
[email protected]5394e422011-01-20 22:07:431131URLRequestHttpJob::~URLRequestHttpJob() {
[email protected]ea8141e2011-10-05 13:12:511132 CHECK(!awaiting_callback_);
1133
[email protected]5394e422011-01-20 22:07:431134 DCHECK(!sdch_test_control_ || !sdch_test_activated_);
[email protected]dd29bcd72011-03-24 00:03:441135 if (!is_cached_content_) {
[email protected]5394e422011-01-20 22:07:431136 if (sdch_test_control_)
[email protected]fc01f232011-03-17 19:06:011137 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK);
[email protected]5394e422011-01-20 22:07:431138 if (sdch_test_activated_)
[email protected]fc01f232011-03-17 19:06:011139 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE);
[email protected]5394e422011-01-20 22:07:431140 }
[email protected]dd29bcd72011-03-24 00:03:441141 // Make sure SDCH filters are told to emit histogram data while
1142 // filter_context_ is still alive.
[email protected]5394e422011-01-20 22:07:431143 DestroyFilters();
1144
1145 if (sdch_dictionary_url_.is_valid()) {
1146 // Prior to reaching the destructor, request_ has been set to a NULL
1147 // pointer, so request_->url() is no longer valid in the destructor, and we
1148 // use an alternate copy |request_info_.url|.
1149 SdchManager* manager = SdchManager::Global();
1150 // To be extra safe, since this is a "different time" from when we decided
1151 // to get the dictionary, we'll validate that an SdchManager is available.
1152 // At shutdown time, care is taken to be sure that we don't delete this
1153 // globally useful instance "too soon," so this check is just defensive
1154 // coding to assure that IF the system is shutting down, we don't have any
1155 // problem if the manager was deleted ahead of time.
1156 if (manager) // Defensive programming.
1157 manager->FetchDictionary(request_info_.url, sdch_dictionary_url_);
1158 }
[email protected]bbaea8f2011-06-24 00:11:011159 DoneWithRequest(ABORTED);
[email protected]5394e422011-01-20 22:07:431160}
1161
[email protected]ec23f522011-02-22 21:01:381162void URLRequestHttpJob::RecordTimer() {
1163 if (request_creation_time_.is_null()) {
1164 NOTREACHED()
1165 << "The same transaction shouldn't start twice without new timing.";
1166 return;
1167 }
1168
[email protected]320a29f12011-03-21 14:47:411169 base::TimeDelta to_start = base::Time::Now() - request_creation_time_;
[email protected]ec23f522011-02-22 21:01:381170 request_creation_time_ = base::Time();
[email protected]8684a8812011-03-22 13:59:381171
[email protected]5c68d692011-08-24 04:59:411172 UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte", to_start);
1173
1174 static const bool use_warm_socket_impact_histogram =
1175 base::FieldTrialList::TrialExists("WarmSocketImpact");
1176 if (use_warm_socket_impact_histogram) {
1177 UMA_HISTOGRAM_MEDIUM_TIMES(
1178 base::FieldTrial::MakeName("Net.HttpTimeToFirstByte",
1179 "WarmSocketImpact"),
1180 to_start);
1181 }
1182
[email protected]ca17f372011-12-27 17:44:431183 static const bool use_prerender_histogram =
1184 base::FieldTrialList::TrialExists("Prerender");
1185 if (use_prerender_histogram) {
[email protected]8684a8812011-03-22 13:59:381186 UMA_HISTOGRAM_MEDIUM_TIMES(
1187 base::FieldTrial::MakeName("Net.HttpTimeToFirstByte",
[email protected]ca17f372011-12-27 17:44:431188 "Prerender"),
[email protected]8684a8812011-03-22 13:59:381189 to_start);
1190 }
[email protected]ec23f522011-02-22 21:01:381191}
1192
1193void URLRequestHttpJob::ResetTimer() {
1194 if (!request_creation_time_.is_null()) {
1195 NOTREACHED()
1196 << "The timer was reset before it was recorded.";
1197 return;
1198 }
1199 request_creation_time_ = base::Time::Now();
1200}
1201
[email protected]dd29bcd72011-03-24 00:03:441202void URLRequestHttpJob::UpdatePacketReadTimes() {
1203 if (!packet_timing_enabled_)
1204 return;
1205
1206 if (filter_input_byte_count() <= bytes_observed_in_packets_) {
1207 DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_);
1208 return; // No new bytes have arrived.
1209 }
1210
[email protected]d6b55392011-08-05 04:04:351211 final_packet_time_ = base::Time::Now();
[email protected]dd29bcd72011-03-24 00:03:441212 if (!bytes_observed_in_packets_)
1213 request_time_snapshot_ = request_ ? request_->request_time() : base::Time();
1214
[email protected]dd29bcd72011-03-24 00:03:441215 bytes_observed_in_packets_ = filter_input_byte_count();
1216}
1217
1218void URLRequestHttpJob::RecordPacketStats(
1219 FilterContext::StatisticSelector statistic) const {
1220 if (!packet_timing_enabled_ || (final_packet_time_ == base::Time()))
1221 return;
1222
1223 base::TimeDelta duration = final_packet_time_ - request_time_snapshot_;
1224 switch (statistic) {
1225 case FilterContext::SDCH_DECODE: {
[email protected]dd29bcd72011-03-24 00:03:441226 UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b",
1227 static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100);
[email protected]dd29bcd72011-03-24 00:03:441228 return;
1229 }
1230 case FilterContext::SDCH_PASSTHROUGH: {
1231 // Despite advertising a dictionary, we handled non-sdch compressed
1232 // content.
[email protected]dd29bcd72011-03-24 00:03:441233 return;
1234 }
1235
1236 case FilterContext::SDCH_EXPERIMENT_DECODE: {
[email protected]d6b55392011-08-05 04:04:351237 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Decode",
[email protected]dd29bcd72011-03-24 00:03:441238 duration,
1239 base::TimeDelta::FromMilliseconds(20),
1240 base::TimeDelta::FromMinutes(10), 100);
[email protected]dd29bcd72011-03-24 00:03:441241 return;
1242 }
1243 case FilterContext::SDCH_EXPERIMENT_HOLDBACK: {
[email protected]d6b55392011-08-05 04:04:351244 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Holdback",
[email protected]dd29bcd72011-03-24 00:03:441245 duration,
1246 base::TimeDelta::FromMilliseconds(20),
1247 base::TimeDelta::FromMinutes(10), 100);
[email protected]dd29bcd72011-03-24 00:03:441248 return;
1249 }
1250 default:
1251 NOTREACHED();
1252 return;
1253 }
1254}
1255
1256// The common type of histogram we use for all compression-tracking histograms.
1257#define COMPRESSION_HISTOGRAM(name, sample) \
1258 do { \
1259 UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \
1260 500, 1000000, 100); \
[email protected]b7996452011-10-31 19:30:561261 } while (0)
[email protected]dd29bcd72011-03-24 00:03:441262
1263void URLRequestHttpJob::RecordCompressionHistograms() {
1264 DCHECK(request_);
1265 if (!request_)
1266 return;
1267
1268 if (is_cached_content_ || // Don't record cached content
1269 !GetStatus().is_success() || // Don't record failed content
1270 !IsCompressibleContent() || // Only record compressible content
1271 !prefilter_bytes_read()) // Zero-byte responses aren't useful.
1272 return;
1273
1274 // Miniature requests aren't really compressible. Don't count them.
1275 const int kMinSize = 16;
1276 if (prefilter_bytes_read() < kMinSize)
1277 return;
1278
1279 // Only record for http or https urls.
1280 bool is_http = request_->url().SchemeIs("http");
1281 bool is_https = request_->url().SchemeIs("https");
1282 if (!is_http && !is_https)
1283 return;
1284
1285 int compressed_B = prefilter_bytes_read();
1286 int decompressed_B = postfilter_bytes_read();
1287 bool was_filtered = HasFilter();
1288
1289 // We want to record how often downloaded resources are compressed.
1290 // But, we recognize that different protocols may have different
1291 // properties. So, for each request, we'll put it into one of 3
1292 // groups:
1293 // a) SSL resources
1294 // Proxies cannot tamper with compression headers with SSL.
1295 // b) Non-SSL, loaded-via-proxy resources
1296 // In this case, we know a proxy might have interfered.
1297 // c) Non-SSL, loaded-without-proxy resources
1298 // In this case, we know there was no explicit proxy. However,
1299 // it is possible that a transparent proxy was still interfering.
1300 //
1301 // For each group, we record the same 3 histograms.
1302
1303 if (is_https) {
1304 if (was_filtered) {
1305 COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B);
1306 COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B);
1307 } else {
1308 COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B);
1309 }
1310 return;
1311 }
1312
1313 if (request_->was_fetched_via_proxy()) {
1314 if (was_filtered) {
1315 COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B);
1316 COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B);
1317 } else {
1318 COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B);
1319 }
1320 return;
1321 }
1322
1323 if (was_filtered) {
1324 COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B);
1325 COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B);
1326 } else {
1327 COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B);
1328 }
1329}
1330
1331bool URLRequestHttpJob::IsCompressibleContent() const {
1332 std::string mime_type;
1333 return GetMimeType(&mime_type) &&
1334 (IsSupportedJavascriptMimeType(mime_type.c_str()) ||
1335 IsSupportedNonImageMimeType(mime_type.c_str()));
1336}
1337
[email protected]bbaea8f2011-06-24 00:11:011338void URLRequestHttpJob::RecordPerfHistograms(CompletionCause reason) {
1339 if (start_time_.is_null())
1340 return;
1341
1342 base::TimeDelta total_time = base::TimeTicks::Now() - start_time_;
1343 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTime", total_time);
1344
1345 if (reason == FINISHED) {
1346 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeSuccess", total_time);
1347 } else {
1348 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCancel", total_time);
1349 }
1350
1351 static bool cache_experiment = false;
1352 if (!cache_experiment)
1353 cache_experiment = base::FieldTrialList::TrialExists("CacheListSize");
1354 if (cache_experiment) {
1355 UMA_HISTOGRAM_TIMES(
1356 base::FieldTrial::MakeName("Net.HttpJob.TotalTime", "CacheListSize"),
1357 total_time);
1358 if (reason == FINISHED) {
1359 UMA_HISTOGRAM_TIMES(
1360 base::FieldTrial::MakeName("Net.HttpJob.TotalTimeSuccess",
1361 "CacheListSize"),
1362 total_time);
1363 } else {
1364 UMA_HISTOGRAM_TIMES(
1365 base::FieldTrial::MakeName("Net.HttpJob.TotalTimeCancel",
1366 "CacheListSize"),
1367 total_time);
1368 }
[email protected]b73656ca2011-07-22 17:42:171369 if (response_info_) {
1370 if (response_info_->was_cached) {
1371 UMA_HISTOGRAM_TIMES(
1372 base::FieldTrial::MakeName("Net.HttpJob.TotalTimeCached",
1373 "CacheListSize"),
1374 total_time);
1375 } else {
1376 UMA_HISTOGRAM_TIMES(
1377 base::FieldTrial::MakeName("Net.HttpJob.TotalTimeNotCached",
1378 "CacheListSize"),
1379 total_time);
1380 }
1381 }
[email protected]bbaea8f2011-06-24 00:11:011382 }
1383
1384 start_time_ = base::TimeTicks();
1385}
1386
1387void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) {
1388 if (done_)
1389 return;
1390 done_ = true;
1391
1392 RecordPerfHistograms(reason);
1393 if (reason == FINISHED)
1394 RecordCompressionHistograms();
1395}
1396
[email protected]ea8141e2011-10-05 13:12:511397HttpResponseHeaders* URLRequestHttpJob::GetResponseHeaders() const {
1398 DCHECK(transaction_.get());
1399 DCHECK(transaction_->GetResponseInfo());
1400 return override_response_headers_.get() ?
1401 override_response_headers_ :
1402 transaction_->GetResponseInfo()->headers;
1403}
1404
1405void URLRequestHttpJob::NotifyURLRequestDestroyed() {
1406 awaiting_callback_ = false;
1407}
1408
[email protected]4f5656c62010-12-13 10:47:091409} // namespace net