blob: 6ad2dc5582cf95f57ee49c558c0c6344d9a871c1 [file] [log] [blame]
license.botbf09a502008-08-24 00:55:551// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
initial.commit586acc5fe2008-07-26 22:42:524
[email protected]175adac2008-07-30 17:28:045#include "net/url_request/url_request_http_job.h"
initial.commit586acc5fe2008-07-26 22:42:526
[email protected]4ed2755f2008-12-15 09:01:337#include "base/base_switches.h"
8#include "base/command_line.h"
[email protected]39ce5c02008-08-22 04:03:449#include "base/compiler_specific.h"
[email protected]60889422008-09-23 01:18:1610#include "base/file_util.h"
11#include "base/file_version_info.h"
initial.commit586acc5fe2008-07-26 22:42:5212#include "base/message_loop.h"
[email protected]5b90b5d2009-04-30 23:06:0113#include "base/rand_util.h"
initial.commit586acc5fe2008-07-26 22:42:5214#include "base/string_util.h"
[email protected]a9cea7542009-05-20 04:30:2315#include "net/base/cert_status_flags.h"
initial.commit586acc5fe2008-07-26 22:42:5216#include "net/base/cookie_monster.h"
[email protected]423041b2008-10-27 17:39:2817#include "net/base/filter.h"
[email protected]a9cea7542009-05-20 04:30:2318#include "net/base/force_tls_state.h"
[email protected]b8430722008-09-17 20:05:4419#include "net/base/load_flags.h"
initial.commit586acc5fe2008-07-26 22:42:5220#include "net/base/net_errors.h"
21#include "net/base/net_util.h"
[email protected]60889422008-09-23 01:18:1622#include "net/base/sdch_manager.h"
[email protected]319d9e6f2009-02-18 19:47:2123#include "net/http/http_response_headers.h"
initial.commit586acc5fe2008-07-26 22:42:5224#include "net/http/http_response_info.h"
25#include "net/http/http_transaction.h"
26#include "net/http/http_transaction_factory.h"
[email protected]0757e7702009-03-27 04:00:2227#include "net/http/http_util.h"
initial.commit586acc5fe2008-07-26 22:42:5228#include "net/url_request/url_request.h"
[email protected]319d9e6f2009-02-18 19:47:2129#include "net/url_request/url_request_context.h"
initial.commit586acc5fe2008-07-26 22:42:5230#include "net/url_request/url_request_error_job.h"
31
32// TODO(darin): make sure the port blocking code is not lost
33
initial.commit586acc5fe2008-07-26 22:42:5234// static
[email protected]175adac2008-07-30 17:28:0435URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request,
36 const std::string& scheme) {
initial.commit586acc5fe2008-07-26 22:42:5237 DCHECK(scheme == "http" || scheme == "https");
38
[email protected]8ac1a752008-07-31 19:40:3739 if (!net::IsPortAllowedByDefault(request->url().IntPort()))
initial.commit586acc5fe2008-07-26 22:42:5240 return new URLRequestErrorJob(request, net::ERR_UNSAFE_PORT);
41
42 if (!request->context() ||
43 !request->context()->http_transaction_factory()) {
44 NOTREACHED() << "requires a valid context";
45 return new URLRequestErrorJob(request, net::ERR_INVALID_ARGUMENT);
46 }
47
[email protected]4ed2755f2008-12-15 09:01:3348 // We cache the value of the switch because this code path is hit on every
49 // network request.
50 static const bool kForceHTTPS =
[email protected]bb975362009-01-21 01:00:2251 CommandLine::ForCurrentProcess()->HasSwitch(switches::kForceHTTPS);
[email protected]a9cea7542009-05-20 04:30:2352 if (kForceHTTPS && scheme == "http" &&
53 request->context()->force_tls_state() &&
54 request->context()->force_tls_state()->IsEnabledForHost(
55 request->url().host()))
[email protected]4ed2755f2008-12-15 09:01:3356 return new URLRequestErrorJob(request, net::ERR_DISALLOWED_URL_SCHEME);
57
[email protected]175adac2008-07-30 17:28:0458 return new URLRequestHttpJob(request);
initial.commit586acc5fe2008-07-26 22:42:5259}
60
[email protected]175adac2008-07-30 17:28:0461URLRequestHttpJob::URLRequestHttpJob(URLRequest* request)
initial.commit586acc5fe2008-07-26 22:42:5262 : URLRequestJob(request),
initial.commit586acc5fe2008-07-26 22:42:5263 transaction_(NULL),
64 response_info_(NULL),
[email protected]a9bb6f692008-07-30 16:40:1065 proxy_auth_state_(net::AUTH_STATE_DONT_NEED_AUTH),
66 server_auth_state_(net::AUTH_STATE_DONT_NEED_AUTH),
[email protected]39ce5c02008-08-22 04:03:4467 ALLOW_THIS_IN_INITIALIZER_LIST(
68 start_callback_(this, &URLRequestHttpJob::OnStartCompleted)),
69 ALLOW_THIS_IN_INITIALIZER_LIST(
70 read_callback_(this, &URLRequestHttpJob::OnReadCompleted)),
[email protected]3589e552008-08-20 23:11:3471 read_in_progress_(false),
[email protected]5b90b5d2009-04-30 23:06:0172 context_(request->context()),
73 sdch_dictionary_advertised_(false),
74 sdch_test_activated_(false),
[email protected]d8fd5132009-05-15 01:06:5375 sdch_test_control_(false),
76 is_cached_content_(false) {
initial.commit586acc5fe2008-07-26 22:42:5277}
78
[email protected]175adac2008-07-30 17:28:0479URLRequestHttpJob::~URLRequestHttpJob() {
[email protected]5b90b5d2009-04-30 23:06:0180 DCHECK(!sdch_test_control_ || !sdch_test_activated_);
[email protected]d8fd5132009-05-15 01:06:5381 if (!IsCachedContent()) {
82 if (sdch_test_control_)
83 RecordPacketStats(SDCH_EXPERIMENT_HOLDBACK);
84 if (sdch_test_activated_)
85 RecordPacketStats(SDCH_EXPERIMENT_DECODE);
86 }
[email protected]284c373d42009-05-19 23:39:0387 // Make sure SDCH filters are told to emit histogram data while this class
88 // can still service the IsCachedContent() call.
89 DestroyFilters();
[email protected]5b90b5d2009-04-30 23:06:0190
[email protected]7234e6c2009-02-11 21:37:0491 if (sdch_dictionary_url_.is_valid()) {
[email protected]d55ad15d2009-02-17 19:40:5092 // Prior to reaching the destructor, request_ has been set to a NULL
93 // pointer, so request_->url() is no longer valid in the destructor, and we
94 // use an alternate copy |request_info_.url|.
[email protected]a41fae82009-02-21 06:11:4595 SdchManager* manager = SdchManager::Global();
96 // To be extra safe, since this is a "different time" from when we decided
97 // to get the dictionary, we'll validate that an SdchManager is available.
98 // At shutdown time, care is taken to be sure that we don't delete this
99 // globally useful instance "too soon," so this check is just defensive
100 // coding to assure that IF the system is shutting down, we don't have any
101 // problem if the manager was deleted ahead of time.
102 if (manager) // Defensive programming.
103 manager->FetchDictionary(request_info_.url, sdch_dictionary_url_);
[email protected]7234e6c2009-02-11 21:37:04104 }
initial.commit586acc5fe2008-07-26 22:42:52105}
106
[email protected]175adac2008-07-30 17:28:04107void URLRequestHttpJob::SetUpload(net::UploadData* upload) {
[email protected]af4876d2008-10-21 23:10:57108 DCHECK(!transaction_.get()) << "cannot change once started";
initial.commit586acc5fe2008-07-26 22:42:52109 request_info_.upload_data = upload;
110}
111
[email protected]175adac2008-07-30 17:28:04112void URLRequestHttpJob::SetExtraRequestHeaders(
initial.commit586acc5fe2008-07-26 22:42:52113 const std::string& headers) {
[email protected]af4876d2008-10-21 23:10:57114 DCHECK(!transaction_.get()) << "cannot change once started";
initial.commit586acc5fe2008-07-26 22:42:52115 request_info_.extra_headers = headers;
116}
117
[email protected]175adac2008-07-30 17:28:04118void URLRequestHttpJob::Start() {
[email protected]af4876d2008-10-21 23:10:57119 DCHECK(!transaction_.get());
initial.commit586acc5fe2008-07-26 22:42:52120
121 // TODO(darin): URLRequest::referrer() should return a GURL
122 GURL referrer(request_->referrer());
123
124 // Ensure that we do not send username and password fields in the referrer.
125 if (referrer.has_username() || referrer.has_password()) {
126 GURL::Replacements referrer_mods;
127 referrer_mods.ClearUsername();
128 referrer_mods.ClearPassword();
129 referrer = referrer.ReplaceComponents(referrer_mods);
130 }
131
132 request_info_.url = request_->url();
133 request_info_.referrer = referrer;
134 request_info_.method = request_->method();
135 request_info_.load_flags = request_->load_flags();
[email protected]725355a2009-03-25 20:42:55136 request_info_.priority = request_->priority();
initial.commit586acc5fe2008-07-26 22:42:52137
[email protected]6f681a42009-01-27 22:28:54138 if (request_->context()) {
139 request_info_.user_agent =
140 request_->context()->GetUserAgent(request_->url());
141 }
initial.commit586acc5fe2008-07-26 22:42:52142
143 AddExtraHeaders();
144
145 StartTransaction();
146}
147
[email protected]175adac2008-07-30 17:28:04148void URLRequestHttpJob::Kill() {
[email protected]af4876d2008-10-21 23:10:57149 if (!transaction_.get())
initial.commit586acc5fe2008-07-26 22:42:52150 return;
151
152 DestroyTransaction();
153 URLRequestJob::Kill();
154}
155
[email protected]175adac2008-07-30 17:28:04156net::LoadState URLRequestHttpJob::GetLoadState() const {
[email protected]af4876d2008-10-21 23:10:57157 return transaction_.get() ?
158 transaction_->GetLoadState() : net::LOAD_STATE_IDLE;
initial.commit586acc5fe2008-07-26 22:42:52159}
160
[email protected]175adac2008-07-30 17:28:04161uint64 URLRequestHttpJob::GetUploadProgress() const {
[email protected]af4876d2008-10-21 23:10:57162 return transaction_.get() ? transaction_->GetUploadProgress() : 0;
initial.commit586acc5fe2008-07-26 22:42:52163}
164
[email protected]60c413c92009-03-09 16:53:31165bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const {
[email protected]af4876d2008-10-21 23:10:57166 DCHECK(transaction_.get());
initial.commit586acc5fe2008-07-26 22:42:52167
168 if (!response_info_)
169 return false;
170
171 return response_info_->headers->GetMimeType(mime_type);
172}
173
[email protected]175adac2008-07-30 17:28:04174bool URLRequestHttpJob::GetCharset(std::string* charset) {
[email protected]af4876d2008-10-21 23:10:57175 DCHECK(transaction_.get());
initial.commit586acc5fe2008-07-26 22:42:52176
177 if (!response_info_)
178 return false;
179
180 return response_info_->headers->GetCharset(charset);
181}
182
[email protected]175adac2008-07-30 17:28:04183void URLRequestHttpJob::GetResponseInfo(net::HttpResponseInfo* info) {
initial.commit586acc5fe2008-07-26 22:42:52184 DCHECK(request_);
[email protected]af4876d2008-10-21 23:10:57185 DCHECK(transaction_.get());
initial.commit586acc5fe2008-07-26 22:42:52186
187 if (response_info_)
188 *info = *response_info_;
189}
190
[email protected]175adac2008-07-30 17:28:04191bool URLRequestHttpJob::GetResponseCookies(
initial.commit586acc5fe2008-07-26 22:42:52192 std::vector<std::string>* cookies) {
[email protected]af4876d2008-10-21 23:10:57193 DCHECK(transaction_.get());
initial.commit586acc5fe2008-07-26 22:42:52194
195 if (!response_info_)
196 return false;
197
198 if (response_cookies_.empty())
199 FetchResponseCookies();
200
201 cookies->clear();
202 cookies->swap(response_cookies_);
203 return true;
204}
205
[email protected]84973ad2009-03-30 18:05:43206int URLRequestHttpJob::GetResponseCode() const {
[email protected]af4876d2008-10-21 23:10:57207 DCHECK(transaction_.get());
initial.commit586acc5fe2008-07-26 22:42:52208
209 if (!response_info_)
210 return -1;
211
212 return response_info_->headers->response_code();
213}
214
[email protected]60889422008-09-23 01:18:16215bool URLRequestHttpJob::GetContentEncodings(
[email protected]423041b2008-10-27 17:39:28216 std::vector<Filter::FilterType>* encoding_types) {
[email protected]af4876d2008-10-21 23:10:57217 DCHECK(transaction_.get());
initial.commit586acc5fe2008-07-26 22:42:52218 if (!response_info_)
219 return false;
[email protected]423041b2008-10-27 17:39:28220 DCHECK(encoding_types->empty());
initial.commit586acc5fe2008-07-26 22:42:52221
[email protected]60889422008-09-23 01:18:16222 std::string encoding_type;
223 void* iter = NULL;
224 while (response_info_->headers->EnumerateHeader(&iter, "Content-Encoding",
225 &encoding_type)) {
[email protected]423041b2008-10-27 17:39:28226 encoding_types->push_back(Filter::ConvertEncodingToType(encoding_type));
[email protected]60889422008-09-23 01:18:16227 }
[email protected]c631b6aa2008-10-15 21:21:37228
[email protected]77e9fcf2009-03-28 01:45:58229 // Even if encoding types are empty, there is a chance that we need to add
230 // some decoding, as some proxies strip encoding completely. In such cases,
231 // we may need to add (for example) SDCH filtering (when the context suggests
232 // it is appropriate).
233 Filter::FixupEncodingTypes(*this, encoding_types);
234
[email protected]60889422008-09-23 01:18:16235 return !encoding_types->empty();
initial.commit586acc5fe2008-07-26 22:42:52236}
237
[email protected]c631b6aa2008-10-15 21:21:37238bool URLRequestHttpJob::IsSdchResponse() const {
[email protected]5b90b5d2009-04-30 23:06:01239 return sdch_dictionary_advertised_;
[email protected]c631b6aa2008-10-15 21:21:37240}
241
[email protected]175adac2008-07-30 17:28:04242bool URLRequestHttpJob::IsRedirectResponse(GURL* location,
243 int* http_status_code) {
initial.commit586acc5fe2008-07-26 22:42:52244 if (!response_info_)
245 return false;
246
247 std::string value;
248 if (!response_info_->headers->IsRedirect(&value))
249 return false;
250
251 *location = request_->url().Resolve(value);
252 *http_status_code = response_info_->headers->response_code();
253 return true;
254}
255
[email protected]175adac2008-07-30 17:28:04256bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) {
initial.commit586acc5fe2008-07-26 22:42:52257 // We only allow redirects to certain "safe" protocols. This does not
258 // restrict redirects to externally handled protocols. Our consumer would
259 // need to take care of those.
260
261 if (!URLRequest::IsHandledURL(location))
262 return true;
263
264 static const char* kSafeSchemes[] = {
265 "http",
266 "https",
267 "ftp"
268 };
269
270 for (size_t i = 0; i < arraysize(kSafeSchemes); ++i) {
271 if (location.SchemeIs(kSafeSchemes[i]))
272 return true;
273 }
274
275 return false;
276}
277
[email protected]175adac2008-07-30 17:28:04278bool URLRequestHttpJob::NeedsAuth() {
initial.commit586acc5fe2008-07-26 22:42:52279 int code = GetResponseCode();
280 if (code == -1)
281 return false;
282
283 // Check if we need either Proxy or WWW Authentication. This could happen
284 // because we either provided no auth info, or provided incorrect info.
285 switch (code) {
286 case 407:
[email protected]a9bb6f692008-07-30 16:40:10287 if (proxy_auth_state_ == net::AUTH_STATE_CANCELED)
initial.commit586acc5fe2008-07-26 22:42:52288 return false;
[email protected]a9bb6f692008-07-30 16:40:10289 proxy_auth_state_ = net::AUTH_STATE_NEED_AUTH;
initial.commit586acc5fe2008-07-26 22:42:52290 return true;
291 case 401:
[email protected]a9bb6f692008-07-30 16:40:10292 if (server_auth_state_ == net::AUTH_STATE_CANCELED)
initial.commit586acc5fe2008-07-26 22:42:52293 return false;
[email protected]a9bb6f692008-07-30 16:40:10294 server_auth_state_ = net::AUTH_STATE_NEED_AUTH;
initial.commit586acc5fe2008-07-26 22:42:52295 return true;
296 }
297 return false;
298}
299
[email protected]175adac2008-07-30 17:28:04300void URLRequestHttpJob::GetAuthChallengeInfo(
[email protected]a9bb6f692008-07-30 16:40:10301 scoped_refptr<net::AuthChallengeInfo>* result) {
[email protected]af4876d2008-10-21 23:10:57302 DCHECK(transaction_.get());
initial.commit586acc5fe2008-07-26 22:42:52303 DCHECK(response_info_);
304
305 // sanity checks:
[email protected]a9bb6f692008-07-30 16:40:10306 DCHECK(proxy_auth_state_ == net::AUTH_STATE_NEED_AUTH ||
307 server_auth_state_ == net::AUTH_STATE_NEED_AUTH);
initial.commit586acc5fe2008-07-26 22:42:52308 DCHECK(response_info_->headers->response_code() == 401 ||
309 response_info_->headers->response_code() == 407);
310
311 *result = response_info_->auth_challenge;
312}
313
[email protected]175adac2008-07-30 17:28:04314void URLRequestHttpJob::SetAuth(const std::wstring& username,
315 const std::wstring& password) {
[email protected]af4876d2008-10-21 23:10:57316 DCHECK(transaction_.get());
initial.commit586acc5fe2008-07-26 22:42:52317
318 // Proxy gets set first, then WWW.
[email protected]a9bb6f692008-07-30 16:40:10319 if (proxy_auth_state_ == net::AUTH_STATE_NEED_AUTH) {
320 proxy_auth_state_ = net::AUTH_STATE_HAVE_AUTH;
initial.commit586acc5fe2008-07-26 22:42:52321 } else {
[email protected]a9bb6f692008-07-30 16:40:10322 DCHECK(server_auth_state_ == net::AUTH_STATE_NEED_AUTH);
323 server_auth_state_ = net::AUTH_STATE_HAVE_AUTH;
initial.commit586acc5fe2008-07-26 22:42:52324 }
325
[email protected]0757e7702009-03-27 04:00:22326 RestartTransactionWithAuth(username, password);
327}
328
329void URLRequestHttpJob::RestartTransactionWithAuth(
330 const std::wstring& username,
331 const std::wstring& password) {
332
initial.commit586acc5fe2008-07-26 22:42:52333 // These will be reset in OnStartCompleted.
334 response_info_ = NULL;
335 response_cookies_.clear();
336
[email protected]0757e7702009-03-27 04:00:22337 // Update the cookies, since the cookie store may have been updated from the
338 // headers in the 401/407. Since cookies were already appended to
339 // extra_headers by AddExtraHeaders(), we need to strip them out.
340 static const char* const cookie_name[] = { "cookie" };
341 request_info_.extra_headers = net::HttpUtil::StripHeaders(
342 request_info_.extra_headers, cookie_name, arraysize(cookie_name));
343 // TODO(eroman): this ordering is inconsistent with non-restarted request,
344 // where cookies header appears second from the bottom.
345 request_info_.extra_headers += AssembleRequestCookies();
346
initial.commit586acc5fe2008-07-26 22:42:52347 // No matter what, we want to report our status as IO pending since we will
348 // be notifying our consumer asynchronously via OnStartCompleted.
349 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
350
351 int rv = transaction_->RestartWithAuth(username, password,
352 &start_callback_);
353 if (rv == net::ERR_IO_PENDING)
354 return;
355
356 // The transaction started synchronously, but we need to notify the
357 // URLRequest delegate via the message loop.
358 MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
[email protected]175adac2008-07-30 17:28:04359 this, &URLRequestHttpJob::OnStartCompleted, rv));
initial.commit586acc5fe2008-07-26 22:42:52360}
361
[email protected]175adac2008-07-30 17:28:04362void URLRequestHttpJob::CancelAuth() {
initial.commit586acc5fe2008-07-26 22:42:52363 // Proxy gets set first, then WWW.
[email protected]a9bb6f692008-07-30 16:40:10364 if (proxy_auth_state_ == net::AUTH_STATE_NEED_AUTH) {
365 proxy_auth_state_ = net::AUTH_STATE_CANCELED;
initial.commit586acc5fe2008-07-26 22:42:52366 } else {
[email protected]a9bb6f692008-07-30 16:40:10367 DCHECK(server_auth_state_ == net::AUTH_STATE_NEED_AUTH);
368 server_auth_state_ = net::AUTH_STATE_CANCELED;
initial.commit586acc5fe2008-07-26 22:42:52369 }
370
371 // These will be reset in OnStartCompleted.
372 response_info_ = NULL;
373 response_cookies_.clear();
374
375 // OK, let the consumer read the error page...
376 //
377 // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false,
378 // which will cause the consumer to receive OnResponseStarted instead of
379 // OnAuthRequired.
380 //
381 // We have to do this via InvokeLater to avoid "recursing" the consumer.
382 //
383 MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
[email protected]175adac2008-07-30 17:28:04384 this, &URLRequestHttpJob::OnStartCompleted, net::OK));
initial.commit586acc5fe2008-07-26 22:42:52385}
386
[email protected]175adac2008-07-30 17:28:04387void URLRequestHttpJob::ContinueDespiteLastError() {
[email protected]9ec48752009-02-06 23:33:58388 // If the transaction was destroyed, then the job was cancelled.
389 if (!transaction_.get())
390 return;
391
initial.commit586acc5fe2008-07-26 22:42:52392 DCHECK(!response_info_) << "should not have a response yet";
393
394 // No matter what, we want to report our status as IO pending since we will
395 // be notifying our consumer asynchronously via OnStartCompleted.
396 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
397
398 int rv = transaction_->RestartIgnoringLastError(&start_callback_);
399 if (rv == net::ERR_IO_PENDING)
400 return;
401
402 // The transaction started synchronously, but we need to notify the
403 // URLRequest delegate via the message loop.
404 MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
[email protected]175adac2008-07-30 17:28:04405 this, &URLRequestHttpJob::OnStartCompleted, rv));
initial.commit586acc5fe2008-07-26 22:42:52406}
407
[email protected]175adac2008-07-30 17:28:04408bool URLRequestHttpJob::GetMoreData() {
[email protected]af4876d2008-10-21 23:10:57409 return transaction_.get() && !read_in_progress_;
initial.commit586acc5fe2008-07-26 22:42:52410}
411
[email protected]9dea9e1f2009-01-29 00:30:47412bool URLRequestHttpJob::ReadRawData(net::IOBuffer* buf, int buf_size,
413 int *bytes_read) {
initial.commit586acc5fe2008-07-26 22:42:52414 DCHECK_NE(buf_size, 0);
415 DCHECK(bytes_read);
416 DCHECK(!read_in_progress_);
417
418 int rv = transaction_->Read(buf, buf_size, &read_callback_);
419 if (rv >= 0) {
420 *bytes_read = rv;
421 return true;
422 }
423
424 if (rv == net::ERR_IO_PENDING) {
425 read_in_progress_ = true;
426 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
427 } else {
428 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
429 }
430
431 return false;
432}
433
[email protected]175adac2008-07-30 17:28:04434void URLRequestHttpJob::OnStartCompleted(int result) {
initial.commit586acc5fe2008-07-26 22:42:52435 // If the request was destroyed, then there is no more work to do.
436 if (!request_ || !request_->delegate())
437 return;
438
439 // If the transaction was destroyed, then the job was cancelled, and
440 // we can just ignore this notification.
[email protected]af4876d2008-10-21 23:10:57441 if (!transaction_.get())
initial.commit586acc5fe2008-07-26 22:42:52442 return;
443
444 // Clear the IO_PENDING status
445 SetStatus(URLRequestStatus());
446
447 if (result == net::OK) {
448 NotifyHeadersComplete();
[email protected]a9cea7542009-05-20 04:30:23449 } else if (ShouldTreatAsCertificateError(result)) {
initial.commit586acc5fe2008-07-26 22:42:52450 // We encountered an SSL certificate error. Ask our delegate to decide
451 // what we should do.
452 // TODO(wtc): also pass ssl_info.cert_status, or just pass the whole
453 // ssl_info.
454 request_->delegate()->OnSSLCertificateError(
455 request_, result, transaction_->GetResponseInfo()->ssl_info.cert);
456 } else {
457 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
458 }
459}
460
[email protected]175adac2008-07-30 17:28:04461void URLRequestHttpJob::OnReadCompleted(int result) {
initial.commit586acc5fe2008-07-26 22:42:52462 read_in_progress_ = false;
463
464 if (result == 0) {
465 NotifyDone(URLRequestStatus());
466 } else if (result < 0) {
467 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
468 } else {
469 // Clear the IO_PENDING status
470 SetStatus(URLRequestStatus());
471 }
472
473 NotifyReadComplete(result);
474}
475
[email protected]a9cea7542009-05-20 04:30:23476bool URLRequestHttpJob::ShouldTreatAsCertificateError(int result) {
477 if (!net::IsCertificateError(result))
478 return false;
479
480 // Hide the fancy processing behind a command line switch.
481 if (!CommandLine::ForCurrentProcess()->HasSwitch(switches::kForceHTTPS))
482 return true;
483
484 // Check whether our context is using ForceTLS.
485 if (!context_->force_tls_state())
486 return true;
487
488 return !context_->force_tls_state()->IsEnabledForHost(
489 request_info_.url.host());
490}
491
[email protected]175adac2008-07-30 17:28:04492void URLRequestHttpJob::NotifyHeadersComplete() {
initial.commit586acc5fe2008-07-26 22:42:52493 DCHECK(!response_info_);
494
495 response_info_ = transaction_->GetResponseInfo();
496
[email protected]d8fd5132009-05-15 01:06:53497 // Save boolean, as we'll need this info at destruction time, and filters may
498 // also need this info.
499 is_cached_content_ = response_info_->was_cached;
500
initial.commit586acc5fe2008-07-26 22:42:52501 // Get the Set-Cookie values, and send them to our cookie database.
[email protected]b8430722008-09-17 20:05:44502 if (!(request_info_.load_flags & net::LOAD_DO_NOT_SAVE_COOKIES)) {
503 URLRequestContext* ctx = request_->context();
504 if (ctx && ctx->cookie_store() &&
[email protected]cfd733212009-05-23 18:11:10505 ctx->cookie_policy()->CanSetCookie(
506 request_->url(), request_->first_party_for_cookies())) {
[email protected]b8430722008-09-17 20:05:44507 FetchResponseCookies();
[email protected]3a96c742008-11-19 19:46:27508 net::CookieMonster::CookieOptions options;
509 options.set_include_httponly();
510 ctx->cookie_store()->SetCookiesWithOptions(request_->url(),
511 response_cookies_,
512 options);
[email protected]b8430722008-09-17 20:05:44513 }
514 }
initial.commit586acc5fe2008-07-26 22:42:52515
[email protected]a9cea7542009-05-20 04:30:23516 ProcessForceTLSHeader();
517
[email protected]fe219872008-09-23 02:17:00518 if (SdchManager::Global() &&
519 SdchManager::Global()->IsInSupportedDomain(request_->url())) {
[email protected]60889422008-09-23 01:18:16520 static const std::string name = "Get-Dictionary";
521 std::string url_text;
522 void* iter = NULL;
523 // TODO(jar): We need to not fetch dictionaries the first time they are
524 // seen, but rather wait until we can justify their usefulness.
525 // For now, we will only fetch the first dictionary, which will at least
526 // require multiple suggestions before we get additional ones for this site.
527 // Eventually we should wait until a dictionary is requested several times
528 // before we even download it (so that we don't waste memory or bandwidth).
529 if (response_info_->headers->EnumerateHeader(&iter, name, &url_text)) {
[email protected]d55ad15d2009-02-17 19:40:50530 // request_->url() won't be valid in the destructor, so we use an
531 // alternate copy.
532 DCHECK(request_->url() == request_info_.url);
533 // Resolve suggested URL relative to request url.
534 sdch_dictionary_url_ = request_info_.url.Resolve(url_text);
[email protected]60889422008-09-23 01:18:16535 }
536 }
537
[email protected]0757e7702009-03-27 04:00:22538 // The HTTP transaction may be restarted several times for the purposes
539 // of sending authorization information. Each time it restarts, we get
540 // notified of the headers completion so that we can update the cookie store.
541 if (transaction_->IsReadyToRestartForAuth()) {
542 DCHECK(!response_info_->auth_challenge.get());
543 RestartTransactionWithAuth(std::wstring(), std::wstring());
544 return;
545 }
546
initial.commit586acc5fe2008-07-26 22:42:52547 URLRequestJob::NotifyHeadersComplete();
548}
549
[email protected]175adac2008-07-30 17:28:04550void URLRequestHttpJob::DestroyTransaction() {
[email protected]af4876d2008-10-21 23:10:57551 DCHECK(transaction_.get());
initial.commit586acc5fe2008-07-26 22:42:52552
[email protected]af4876d2008-10-21 23:10:57553 transaction_.reset();
initial.commit586acc5fe2008-07-26 22:42:52554 response_info_ = NULL;
555}
556
[email protected]175adac2008-07-30 17:28:04557void URLRequestHttpJob::StartTransaction() {
initial.commit586acc5fe2008-07-26 22:42:52558 // NOTE: This method assumes that request_info_ is already setup properly.
559
560 // Create a transaction.
[email protected]af4876d2008-10-21 23:10:57561 DCHECK(!transaction_.get());
initial.commit586acc5fe2008-07-26 22:42:52562
563 DCHECK(request_->context());
564 DCHECK(request_->context()->http_transaction_factory());
565
[email protected]af4876d2008-10-21 23:10:57566 transaction_.reset(
567 request_->context()->http_transaction_factory()->CreateTransaction());
initial.commit586acc5fe2008-07-26 22:42:52568
569 // No matter what, we want to report our status as IO pending since we will
570 // be notifying our consumer asynchronously via OnStartCompleted.
571 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
572
573 int rv;
[email protected]af4876d2008-10-21 23:10:57574 if (transaction_.get()) {
initial.commit586acc5fe2008-07-26 22:42:52575 rv = transaction_->Start(&request_info_, &start_callback_);
576 if (rv == net::ERR_IO_PENDING)
577 return;
578 } else {
579 rv = net::ERR_FAILED;
580 }
581
582 // The transaction started synchronously, but we need to notify the
583 // URLRequest delegate via the message loop.
584 MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
[email protected]175adac2008-07-30 17:28:04585 this, &URLRequestHttpJob::OnStartCompleted, rv));
initial.commit586acc5fe2008-07-26 22:42:52586}
587
[email protected]175adac2008-07-30 17:28:04588void URLRequestHttpJob::AddExtraHeaders() {
[email protected]5b90b5d2009-04-30 23:06:01589 // TODO(jar): Consider optimizing away SDCH advertising bytes when the URL is
590 // probably an img or such (and SDCH encoding is not likely).
591 bool advertise_sdch = SdchManager::Global() &&
592 SdchManager::Global()->IsInSupportedDomain(request_->url());
593 std::string avail_dictionaries;
594 if (advertise_sdch) {
595 SdchManager::Global()->GetAvailDictionaryList(request_->url(),
596 &avail_dictionaries);
597
598 // The AllowLatencyExperiment() is only true if we've successfully done a
599 // full SDCH compression recently in this browser session for this host.
600 // Note that for this path, there might be no applicable dictionaries, and
601 // hence we can't participate in the experiment.
602 if (!avail_dictionaries.empty() &&
603 SdchManager::Global()->AllowLatencyExperiment(request_->url())) {
604 // We are participating in the test (or control), and hence we'll
605 // eventually record statistics via either SDCH_EXPERIMENT_DECODE or
606 // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data.
607 EnablePacketCounting(kSdchPacketHistogramCount);
[email protected]a88af5232009-06-05 01:34:53608 if (base::RandDouble() < .01) {
609 sdch_test_control_ = true; // 1% probability.
[email protected]5b90b5d2009-04-30 23:06:01610 advertise_sdch = false;
611 } else {
612 sdch_test_activated_ = true;
613 }
614 }
615 }
616
[email protected]423041b2008-10-27 17:39:28617 // Supply Accept-Encoding headers first so that it is more likely that they
618 // will be in the first transmitted packet. This can sometimes make it easier
619 // to filter and analyze the streams to assure that a proxy has not damaged
620 // these headers. Some proxies deliberately corrupt Accept-Encoding headers.
[email protected]5b90b5d2009-04-30 23:06:01621 if (!advertise_sdch) {
[email protected]423041b2008-10-27 17:39:28622 // Tell the server what compression formats we support (other than SDCH).
623 request_info_.extra_headers += "Accept-Encoding: gzip,deflate,bzip2\r\n";
624 } else {
[email protected]5b90b5d2009-04-30 23:06:01625 // Include SDCH in acceptable list.
[email protected]423041b2008-10-27 17:39:28626 request_info_.extra_headers += "Accept-Encoding: "
627 "gzip,deflate,bzip2,sdch\r\n";
[email protected]423041b2008-10-27 17:39:28628 if (!avail_dictionaries.empty()) {
629 request_info_.extra_headers += "Avail-Dictionary: "
630 + avail_dictionaries + "\r\n";
[email protected]5b90b5d2009-04-30 23:06:01631 sdch_dictionary_advertised_ = true;
632 // Since we're tagging this transaction as advertising a dictionary, we'll
633 // definately employ an SDCH filter (or tentative sdch filter) when we get
634 // a response. When done, we'll record histograms via SDCH_DECODE or
635 // SDCH_PASSTHROUGH. Hence we need to record packet arrival times.
636 EnablePacketCounting(kSdchPacketHistogramCount);
[email protected]423041b2008-10-27 17:39:28637 }
[email protected]423041b2008-10-27 17:39:28638 }
639
initial.commit586acc5fe2008-07-26 22:42:52640 URLRequestContext* context = request_->context();
641 if (context) {
[email protected]0757e7702009-03-27 04:00:22642 request_info_.extra_headers += AssembleRequestCookies();
initial.commit586acc5fe2008-07-26 22:42:52643 if (!context->accept_language().empty())
644 request_info_.extra_headers += "Accept-Language: " +
645 context->accept_language() + "\r\n";
646 if (!context->accept_charset().empty())
647 request_info_.extra_headers += "Accept-Charset: " +
648 context->accept_charset() + "\r\n";
649 }
initial.commit586acc5fe2008-07-26 22:42:52650}
651
[email protected]0757e7702009-03-27 04:00:22652std::string URLRequestHttpJob::AssembleRequestCookies() {
653 URLRequestContext* context = request_->context();
654 if (context) {
655 // Add in the cookie header. TODO might we need more than one header?
656 if (context->cookie_store() &&
[email protected]cfd733212009-05-23 18:11:10657 context->cookie_policy()->CanGetCookies(
658 request_->url(), request_->first_party_for_cookies())) {
[email protected]0757e7702009-03-27 04:00:22659 net::CookieMonster::CookieOptions options;
660 options.set_include_httponly();
661 std::string cookies = request_->context()->cookie_store()->
662 GetCookiesWithOptions(request_->url(), options);
663 if (!cookies.empty())
664 return "Cookie: " + cookies + "\r\n";
665 }
666 }
667 return std::string();
668}
669
[email protected]175adac2008-07-30 17:28:04670void URLRequestHttpJob::FetchResponseCookies() {
initial.commit586acc5fe2008-07-26 22:42:52671 DCHECK(response_info_);
672 DCHECK(response_cookies_.empty());
673
674 std::string name = "Set-Cookie";
675 std::string value;
676
677 void* iter = NULL;
678 while (response_info_->headers->EnumerateHeader(&iter, name, &value))
679 response_cookies_.push_back(value);
680}
[email protected]a9cea7542009-05-20 04:30:23681
682
683void URLRequestHttpJob::ProcessForceTLSHeader() {
684 DCHECK(response_info_);
685
686 // Hide processing behind a command line flag.
687 if (!CommandLine::ForCurrentProcess()->HasSwitch(switches::kForceHTTPS))
688 return;
689
690 // Only process X-Force-TLS from HTTPS responses.
691 if (request_info_.url.scheme() != "https")
692 return;
693
694 // Only process X-Force-TLS from responses with valid certificates.
695 if (response_info_->ssl_info.cert_status & net::CERT_STATUS_ALL_ERRORS)
696 return;
697
698 URLRequestContext* ctx = request_->context();
699 if (!ctx || !ctx->force_tls_state())
700 return;
701
702 std::string name = "X-Force-TLS";
703 std::string value;
704
705 void* iter = NULL;
706 while (response_info_->headers->EnumerateHeader(&iter, name, &value))
707 ctx->force_tls_state()->DidReceiveHeader(request_info_.url, value);
708}