license.bot | bf09a50 | 2008-08-24 00:55:55 | [diff] [blame] | 1 | // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 4 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 5 | #include "net/url_request/url_request_http_job.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 6 | |
[email protected] | 4ed2755f | 2008-12-15 09:01:33 | [diff] [blame] | 7 | #include "base/base_switches.h" |
| 8 | #include "base/command_line.h" |
[email protected] | 39ce5c0 | 2008-08-22 04:03:44 | [diff] [blame] | 9 | #include "base/compiler_specific.h" |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 10 | #include "base/file_util.h" |
| 11 | #include "base/file_version_info.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 12 | #include "base/message_loop.h" |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 13 | #include "base/rand_util.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 14 | #include "base/string_util.h" |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 15 | #include "net/base/cert_status_flags.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 16 | #include "net/base/cookie_monster.h" |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 17 | #include "net/base/filter.h" |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 18 | #include "net/base/force_tls_state.h" |
[email protected] | b843072 | 2008-09-17 20:05:44 | [diff] [blame] | 19 | #include "net/base/load_flags.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 20 | #include "net/base/net_errors.h" |
| 21 | #include "net/base/net_util.h" |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 22 | #include "net/base/sdch_manager.h" |
[email protected] | 319d9e6f | 2009-02-18 19:47:21 | [diff] [blame] | 23 | #include "net/http/http_response_headers.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 24 | #include "net/http/http_response_info.h" |
| 25 | #include "net/http/http_transaction.h" |
| 26 | #include "net/http/http_transaction_factory.h" |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 27 | #include "net/http/http_util.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 28 | #include "net/url_request/url_request.h" |
[email protected] | 319d9e6f | 2009-02-18 19:47:21 | [diff] [blame] | 29 | #include "net/url_request/url_request_context.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 30 | #include "net/url_request/url_request_error_job.h" |
| 31 | |
| 32 | // TODO(darin): make sure the port blocking code is not lost |
| 33 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 34 | // static |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 35 | URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request, |
| 36 | const std::string& scheme) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 37 | DCHECK(scheme == "http" || scheme == "https"); |
| 38 | |
[email protected] | 8ac1a75 | 2008-07-31 19:40:37 | [diff] [blame] | 39 | if (!net::IsPortAllowedByDefault(request->url().IntPort())) |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 40 | return new URLRequestErrorJob(request, net::ERR_UNSAFE_PORT); |
| 41 | |
| 42 | if (!request->context() || |
| 43 | !request->context()->http_transaction_factory()) { |
| 44 | NOTREACHED() << "requires a valid context"; |
| 45 | return new URLRequestErrorJob(request, net::ERR_INVALID_ARGUMENT); |
| 46 | } |
| 47 | |
[email protected] | 4ed2755f | 2008-12-15 09:01:33 | [diff] [blame] | 48 | // We cache the value of the switch because this code path is hit on every |
| 49 | // network request. |
| 50 | static const bool kForceHTTPS = |
[email protected] | bb97536 | 2009-01-21 01:00:22 | [diff] [blame] | 51 | CommandLine::ForCurrentProcess()->HasSwitch(switches::kForceHTTPS); |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 52 | if (kForceHTTPS && scheme == "http" && |
| 53 | request->context()->force_tls_state() && |
| 54 | request->context()->force_tls_state()->IsEnabledForHost( |
| 55 | request->url().host())) |
[email protected] | 4ed2755f | 2008-12-15 09:01:33 | [diff] [blame] | 56 | return new URLRequestErrorJob(request, net::ERR_DISALLOWED_URL_SCHEME); |
| 57 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 58 | return new URLRequestHttpJob(request); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 59 | } |
| 60 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 61 | URLRequestHttpJob::URLRequestHttpJob(URLRequest* request) |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 62 | : URLRequestJob(request), |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 63 | transaction_(NULL), |
| 64 | response_info_(NULL), |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 65 | proxy_auth_state_(net::AUTH_STATE_DONT_NEED_AUTH), |
| 66 | server_auth_state_(net::AUTH_STATE_DONT_NEED_AUTH), |
[email protected] | 39ce5c0 | 2008-08-22 04:03:44 | [diff] [blame] | 67 | ALLOW_THIS_IN_INITIALIZER_LIST( |
| 68 | start_callback_(this, &URLRequestHttpJob::OnStartCompleted)), |
| 69 | ALLOW_THIS_IN_INITIALIZER_LIST( |
| 70 | read_callback_(this, &URLRequestHttpJob::OnReadCompleted)), |
[email protected] | 3589e55 | 2008-08-20 23:11:34 | [diff] [blame] | 71 | read_in_progress_(false), |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 72 | context_(request->context()), |
| 73 | sdch_dictionary_advertised_(false), |
| 74 | sdch_test_activated_(false), |
[email protected] | d8fd513 | 2009-05-15 01:06:53 | [diff] [blame] | 75 | sdch_test_control_(false), |
| 76 | is_cached_content_(false) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 77 | } |
| 78 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 79 | URLRequestHttpJob::~URLRequestHttpJob() { |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 80 | DCHECK(!sdch_test_control_ || !sdch_test_activated_); |
[email protected] | d8fd513 | 2009-05-15 01:06:53 | [diff] [blame] | 81 | if (!IsCachedContent()) { |
| 82 | if (sdch_test_control_) |
| 83 | RecordPacketStats(SDCH_EXPERIMENT_HOLDBACK); |
| 84 | if (sdch_test_activated_) |
| 85 | RecordPacketStats(SDCH_EXPERIMENT_DECODE); |
| 86 | } |
[email protected] | 284c373d4 | 2009-05-19 23:39:03 | [diff] [blame] | 87 | // Make sure SDCH filters are told to emit histogram data while this class |
| 88 | // can still service the IsCachedContent() call. |
| 89 | DestroyFilters(); |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 90 | |
[email protected] | 7234e6c | 2009-02-11 21:37:04 | [diff] [blame] | 91 | if (sdch_dictionary_url_.is_valid()) { |
[email protected] | d55ad15d | 2009-02-17 19:40:50 | [diff] [blame] | 92 | // Prior to reaching the destructor, request_ has been set to a NULL |
| 93 | // pointer, so request_->url() is no longer valid in the destructor, and we |
| 94 | // use an alternate copy |request_info_.url|. |
[email protected] | a41fae8 | 2009-02-21 06:11:45 | [diff] [blame] | 95 | SdchManager* manager = SdchManager::Global(); |
| 96 | // To be extra safe, since this is a "different time" from when we decided |
| 97 | // to get the dictionary, we'll validate that an SdchManager is available. |
| 98 | // At shutdown time, care is taken to be sure that we don't delete this |
| 99 | // globally useful instance "too soon," so this check is just defensive |
| 100 | // coding to assure that IF the system is shutting down, we don't have any |
| 101 | // problem if the manager was deleted ahead of time. |
| 102 | if (manager) // Defensive programming. |
| 103 | manager->FetchDictionary(request_info_.url, sdch_dictionary_url_); |
[email protected] | 7234e6c | 2009-02-11 21:37:04 | [diff] [blame] | 104 | } |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 105 | } |
| 106 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 107 | void URLRequestHttpJob::SetUpload(net::UploadData* upload) { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 108 | DCHECK(!transaction_.get()) << "cannot change once started"; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 109 | request_info_.upload_data = upload; |
| 110 | } |
| 111 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 112 | void URLRequestHttpJob::SetExtraRequestHeaders( |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 113 | const std::string& headers) { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 114 | DCHECK(!transaction_.get()) << "cannot change once started"; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 115 | request_info_.extra_headers = headers; |
| 116 | } |
| 117 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 118 | void URLRequestHttpJob::Start() { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 119 | DCHECK(!transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 120 | |
| 121 | // TODO(darin): URLRequest::referrer() should return a GURL |
| 122 | GURL referrer(request_->referrer()); |
| 123 | |
| 124 | // Ensure that we do not send username and password fields in the referrer. |
| 125 | if (referrer.has_username() || referrer.has_password()) { |
| 126 | GURL::Replacements referrer_mods; |
| 127 | referrer_mods.ClearUsername(); |
| 128 | referrer_mods.ClearPassword(); |
| 129 | referrer = referrer.ReplaceComponents(referrer_mods); |
| 130 | } |
| 131 | |
| 132 | request_info_.url = request_->url(); |
| 133 | request_info_.referrer = referrer; |
| 134 | request_info_.method = request_->method(); |
| 135 | request_info_.load_flags = request_->load_flags(); |
[email protected] | 725355a | 2009-03-25 20:42:55 | [diff] [blame] | 136 | request_info_.priority = request_->priority(); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 137 | |
[email protected] | 6f681a4 | 2009-01-27 22:28:54 | [diff] [blame] | 138 | if (request_->context()) { |
| 139 | request_info_.user_agent = |
| 140 | request_->context()->GetUserAgent(request_->url()); |
| 141 | } |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 142 | |
| 143 | AddExtraHeaders(); |
| 144 | |
| 145 | StartTransaction(); |
| 146 | } |
| 147 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 148 | void URLRequestHttpJob::Kill() { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 149 | if (!transaction_.get()) |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 150 | return; |
| 151 | |
| 152 | DestroyTransaction(); |
| 153 | URLRequestJob::Kill(); |
| 154 | } |
| 155 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 156 | net::LoadState URLRequestHttpJob::GetLoadState() const { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 157 | return transaction_.get() ? |
| 158 | transaction_->GetLoadState() : net::LOAD_STATE_IDLE; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 159 | } |
| 160 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 161 | uint64 URLRequestHttpJob::GetUploadProgress() const { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 162 | return transaction_.get() ? transaction_->GetUploadProgress() : 0; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 163 | } |
| 164 | |
[email protected] | 60c413c9 | 2009-03-09 16:53:31 | [diff] [blame] | 165 | bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 166 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 167 | |
| 168 | if (!response_info_) |
| 169 | return false; |
| 170 | |
| 171 | return response_info_->headers->GetMimeType(mime_type); |
| 172 | } |
| 173 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 174 | bool URLRequestHttpJob::GetCharset(std::string* charset) { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 175 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 176 | |
| 177 | if (!response_info_) |
| 178 | return false; |
| 179 | |
| 180 | return response_info_->headers->GetCharset(charset); |
| 181 | } |
| 182 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 183 | void URLRequestHttpJob::GetResponseInfo(net::HttpResponseInfo* info) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 184 | DCHECK(request_); |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 185 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 186 | |
| 187 | if (response_info_) |
| 188 | *info = *response_info_; |
| 189 | } |
| 190 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 191 | bool URLRequestHttpJob::GetResponseCookies( |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 192 | std::vector<std::string>* cookies) { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 193 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 194 | |
| 195 | if (!response_info_) |
| 196 | return false; |
| 197 | |
| 198 | if (response_cookies_.empty()) |
| 199 | FetchResponseCookies(); |
| 200 | |
| 201 | cookies->clear(); |
| 202 | cookies->swap(response_cookies_); |
| 203 | return true; |
| 204 | } |
| 205 | |
[email protected] | 84973ad | 2009-03-30 18:05:43 | [diff] [blame] | 206 | int URLRequestHttpJob::GetResponseCode() const { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 207 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 208 | |
| 209 | if (!response_info_) |
| 210 | return -1; |
| 211 | |
| 212 | return response_info_->headers->response_code(); |
| 213 | } |
| 214 | |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 215 | bool URLRequestHttpJob::GetContentEncodings( |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 216 | std::vector<Filter::FilterType>* encoding_types) { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 217 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 218 | if (!response_info_) |
| 219 | return false; |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 220 | DCHECK(encoding_types->empty()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 221 | |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 222 | std::string encoding_type; |
| 223 | void* iter = NULL; |
| 224 | while (response_info_->headers->EnumerateHeader(&iter, "Content-Encoding", |
| 225 | &encoding_type)) { |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 226 | encoding_types->push_back(Filter::ConvertEncodingToType(encoding_type)); |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 227 | } |
[email protected] | c631b6aa | 2008-10-15 21:21:37 | [diff] [blame] | 228 | |
[email protected] | 77e9fcf | 2009-03-28 01:45:58 | [diff] [blame] | 229 | // Even if encoding types are empty, there is a chance that we need to add |
| 230 | // some decoding, as some proxies strip encoding completely. In such cases, |
| 231 | // we may need to add (for example) SDCH filtering (when the context suggests |
| 232 | // it is appropriate). |
| 233 | Filter::FixupEncodingTypes(*this, encoding_types); |
| 234 | |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 235 | return !encoding_types->empty(); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 236 | } |
| 237 | |
[email protected] | c631b6aa | 2008-10-15 21:21:37 | [diff] [blame] | 238 | bool URLRequestHttpJob::IsSdchResponse() const { |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 239 | return sdch_dictionary_advertised_; |
[email protected] | c631b6aa | 2008-10-15 21:21:37 | [diff] [blame] | 240 | } |
| 241 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 242 | bool URLRequestHttpJob::IsRedirectResponse(GURL* location, |
| 243 | int* http_status_code) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 244 | if (!response_info_) |
| 245 | return false; |
| 246 | |
| 247 | std::string value; |
| 248 | if (!response_info_->headers->IsRedirect(&value)) |
| 249 | return false; |
| 250 | |
| 251 | *location = request_->url().Resolve(value); |
| 252 | *http_status_code = response_info_->headers->response_code(); |
| 253 | return true; |
| 254 | } |
| 255 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 256 | bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 257 | // We only allow redirects to certain "safe" protocols. This does not |
| 258 | // restrict redirects to externally handled protocols. Our consumer would |
| 259 | // need to take care of those. |
| 260 | |
| 261 | if (!URLRequest::IsHandledURL(location)) |
| 262 | return true; |
| 263 | |
| 264 | static const char* kSafeSchemes[] = { |
| 265 | "http", |
| 266 | "https", |
| 267 | "ftp" |
| 268 | }; |
| 269 | |
| 270 | for (size_t i = 0; i < arraysize(kSafeSchemes); ++i) { |
| 271 | if (location.SchemeIs(kSafeSchemes[i])) |
| 272 | return true; |
| 273 | } |
| 274 | |
| 275 | return false; |
| 276 | } |
| 277 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 278 | bool URLRequestHttpJob::NeedsAuth() { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 279 | int code = GetResponseCode(); |
| 280 | if (code == -1) |
| 281 | return false; |
| 282 | |
| 283 | // Check if we need either Proxy or WWW Authentication. This could happen |
| 284 | // because we either provided no auth info, or provided incorrect info. |
| 285 | switch (code) { |
| 286 | case 407: |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 287 | if (proxy_auth_state_ == net::AUTH_STATE_CANCELED) |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 288 | return false; |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 289 | proxy_auth_state_ = net::AUTH_STATE_NEED_AUTH; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 290 | return true; |
| 291 | case 401: |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 292 | if (server_auth_state_ == net::AUTH_STATE_CANCELED) |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 293 | return false; |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 294 | server_auth_state_ = net::AUTH_STATE_NEED_AUTH; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 295 | return true; |
| 296 | } |
| 297 | return false; |
| 298 | } |
| 299 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 300 | void URLRequestHttpJob::GetAuthChallengeInfo( |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 301 | scoped_refptr<net::AuthChallengeInfo>* result) { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 302 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 303 | DCHECK(response_info_); |
| 304 | |
| 305 | // sanity checks: |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 306 | DCHECK(proxy_auth_state_ == net::AUTH_STATE_NEED_AUTH || |
| 307 | server_auth_state_ == net::AUTH_STATE_NEED_AUTH); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 308 | DCHECK(response_info_->headers->response_code() == 401 || |
| 309 | response_info_->headers->response_code() == 407); |
| 310 | |
| 311 | *result = response_info_->auth_challenge; |
| 312 | } |
| 313 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 314 | void URLRequestHttpJob::SetAuth(const std::wstring& username, |
| 315 | const std::wstring& password) { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 316 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 317 | |
| 318 | // Proxy gets set first, then WWW. |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 319 | if (proxy_auth_state_ == net::AUTH_STATE_NEED_AUTH) { |
| 320 | proxy_auth_state_ = net::AUTH_STATE_HAVE_AUTH; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 321 | } else { |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 322 | DCHECK(server_auth_state_ == net::AUTH_STATE_NEED_AUTH); |
| 323 | server_auth_state_ = net::AUTH_STATE_HAVE_AUTH; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 324 | } |
| 325 | |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 326 | RestartTransactionWithAuth(username, password); |
| 327 | } |
| 328 | |
| 329 | void URLRequestHttpJob::RestartTransactionWithAuth( |
| 330 | const std::wstring& username, |
| 331 | const std::wstring& password) { |
| 332 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 333 | // These will be reset in OnStartCompleted. |
| 334 | response_info_ = NULL; |
| 335 | response_cookies_.clear(); |
| 336 | |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 337 | // Update the cookies, since the cookie store may have been updated from the |
| 338 | // headers in the 401/407. Since cookies were already appended to |
| 339 | // extra_headers by AddExtraHeaders(), we need to strip them out. |
| 340 | static const char* const cookie_name[] = { "cookie" }; |
| 341 | request_info_.extra_headers = net::HttpUtil::StripHeaders( |
| 342 | request_info_.extra_headers, cookie_name, arraysize(cookie_name)); |
| 343 | // TODO(eroman): this ordering is inconsistent with non-restarted request, |
| 344 | // where cookies header appears second from the bottom. |
| 345 | request_info_.extra_headers += AssembleRequestCookies(); |
| 346 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 347 | // No matter what, we want to report our status as IO pending since we will |
| 348 | // be notifying our consumer asynchronously via OnStartCompleted. |
| 349 | SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
| 350 | |
| 351 | int rv = transaction_->RestartWithAuth(username, password, |
| 352 | &start_callback_); |
| 353 | if (rv == net::ERR_IO_PENDING) |
| 354 | return; |
| 355 | |
| 356 | // The transaction started synchronously, but we need to notify the |
| 357 | // URLRequest delegate via the message loop. |
| 358 | MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod( |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 359 | this, &URLRequestHttpJob::OnStartCompleted, rv)); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 360 | } |
| 361 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 362 | void URLRequestHttpJob::CancelAuth() { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 363 | // Proxy gets set first, then WWW. |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 364 | if (proxy_auth_state_ == net::AUTH_STATE_NEED_AUTH) { |
| 365 | proxy_auth_state_ = net::AUTH_STATE_CANCELED; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 366 | } else { |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 367 | DCHECK(server_auth_state_ == net::AUTH_STATE_NEED_AUTH); |
| 368 | server_auth_state_ = net::AUTH_STATE_CANCELED; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 369 | } |
| 370 | |
| 371 | // These will be reset in OnStartCompleted. |
| 372 | response_info_ = NULL; |
| 373 | response_cookies_.clear(); |
| 374 | |
| 375 | // OK, let the consumer read the error page... |
| 376 | // |
| 377 | // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false, |
| 378 | // which will cause the consumer to receive OnResponseStarted instead of |
| 379 | // OnAuthRequired. |
| 380 | // |
| 381 | // We have to do this via InvokeLater to avoid "recursing" the consumer. |
| 382 | // |
| 383 | MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod( |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 384 | this, &URLRequestHttpJob::OnStartCompleted, net::OK)); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 385 | } |
| 386 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 387 | void URLRequestHttpJob::ContinueDespiteLastError() { |
[email protected] | 9ec4875 | 2009-02-06 23:33:58 | [diff] [blame] | 388 | // If the transaction was destroyed, then the job was cancelled. |
| 389 | if (!transaction_.get()) |
| 390 | return; |
| 391 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 392 | DCHECK(!response_info_) << "should not have a response yet"; |
| 393 | |
| 394 | // No matter what, we want to report our status as IO pending since we will |
| 395 | // be notifying our consumer asynchronously via OnStartCompleted. |
| 396 | SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
| 397 | |
| 398 | int rv = transaction_->RestartIgnoringLastError(&start_callback_); |
| 399 | if (rv == net::ERR_IO_PENDING) |
| 400 | return; |
| 401 | |
| 402 | // The transaction started synchronously, but we need to notify the |
| 403 | // URLRequest delegate via the message loop. |
| 404 | MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod( |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 405 | this, &URLRequestHttpJob::OnStartCompleted, rv)); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 406 | } |
| 407 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 408 | bool URLRequestHttpJob::GetMoreData() { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 409 | return transaction_.get() && !read_in_progress_; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 410 | } |
| 411 | |
[email protected] | 9dea9e1f | 2009-01-29 00:30:47 | [diff] [blame] | 412 | bool URLRequestHttpJob::ReadRawData(net::IOBuffer* buf, int buf_size, |
| 413 | int *bytes_read) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 414 | DCHECK_NE(buf_size, 0); |
| 415 | DCHECK(bytes_read); |
| 416 | DCHECK(!read_in_progress_); |
| 417 | |
| 418 | int rv = transaction_->Read(buf, buf_size, &read_callback_); |
| 419 | if (rv >= 0) { |
| 420 | *bytes_read = rv; |
| 421 | return true; |
| 422 | } |
| 423 | |
| 424 | if (rv == net::ERR_IO_PENDING) { |
| 425 | read_in_progress_ = true; |
| 426 | SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
| 427 | } else { |
| 428 | NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); |
| 429 | } |
| 430 | |
| 431 | return false; |
| 432 | } |
| 433 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 434 | void URLRequestHttpJob::OnStartCompleted(int result) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 435 | // If the request was destroyed, then there is no more work to do. |
| 436 | if (!request_ || !request_->delegate()) |
| 437 | return; |
| 438 | |
| 439 | // If the transaction was destroyed, then the job was cancelled, and |
| 440 | // we can just ignore this notification. |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 441 | if (!transaction_.get()) |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 442 | return; |
| 443 | |
| 444 | // Clear the IO_PENDING status |
| 445 | SetStatus(URLRequestStatus()); |
| 446 | |
| 447 | if (result == net::OK) { |
| 448 | NotifyHeadersComplete(); |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 449 | } else if (ShouldTreatAsCertificateError(result)) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 450 | // We encountered an SSL certificate error. Ask our delegate to decide |
| 451 | // what we should do. |
| 452 | // TODO(wtc): also pass ssl_info.cert_status, or just pass the whole |
| 453 | // ssl_info. |
| 454 | request_->delegate()->OnSSLCertificateError( |
| 455 | request_, result, transaction_->GetResponseInfo()->ssl_info.cert); |
| 456 | } else { |
| 457 | NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); |
| 458 | } |
| 459 | } |
| 460 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 461 | void URLRequestHttpJob::OnReadCompleted(int result) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 462 | read_in_progress_ = false; |
| 463 | |
| 464 | if (result == 0) { |
| 465 | NotifyDone(URLRequestStatus()); |
| 466 | } else if (result < 0) { |
| 467 | NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); |
| 468 | } else { |
| 469 | // Clear the IO_PENDING status |
| 470 | SetStatus(URLRequestStatus()); |
| 471 | } |
| 472 | |
| 473 | NotifyReadComplete(result); |
| 474 | } |
| 475 | |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 476 | bool URLRequestHttpJob::ShouldTreatAsCertificateError(int result) { |
| 477 | if (!net::IsCertificateError(result)) |
| 478 | return false; |
| 479 | |
| 480 | // Hide the fancy processing behind a command line switch. |
| 481 | if (!CommandLine::ForCurrentProcess()->HasSwitch(switches::kForceHTTPS)) |
| 482 | return true; |
| 483 | |
| 484 | // Check whether our context is using ForceTLS. |
| 485 | if (!context_->force_tls_state()) |
| 486 | return true; |
| 487 | |
| 488 | return !context_->force_tls_state()->IsEnabledForHost( |
| 489 | request_info_.url.host()); |
| 490 | } |
| 491 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 492 | void URLRequestHttpJob::NotifyHeadersComplete() { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 493 | DCHECK(!response_info_); |
| 494 | |
| 495 | response_info_ = transaction_->GetResponseInfo(); |
| 496 | |
[email protected] | d8fd513 | 2009-05-15 01:06:53 | [diff] [blame] | 497 | // Save boolean, as we'll need this info at destruction time, and filters may |
| 498 | // also need this info. |
| 499 | is_cached_content_ = response_info_->was_cached; |
| 500 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 501 | // Get the Set-Cookie values, and send them to our cookie database. |
[email protected] | b843072 | 2008-09-17 20:05:44 | [diff] [blame] | 502 | if (!(request_info_.load_flags & net::LOAD_DO_NOT_SAVE_COOKIES)) { |
| 503 | URLRequestContext* ctx = request_->context(); |
| 504 | if (ctx && ctx->cookie_store() && |
[email protected] | cfd73321 | 2009-05-23 18:11:10 | [diff] [blame] | 505 | ctx->cookie_policy()->CanSetCookie( |
| 506 | request_->url(), request_->first_party_for_cookies())) { |
[email protected] | b843072 | 2008-09-17 20:05:44 | [diff] [blame] | 507 | FetchResponseCookies(); |
[email protected] | 3a96c74 | 2008-11-19 19:46:27 | [diff] [blame] | 508 | net::CookieMonster::CookieOptions options; |
| 509 | options.set_include_httponly(); |
| 510 | ctx->cookie_store()->SetCookiesWithOptions(request_->url(), |
| 511 | response_cookies_, |
| 512 | options); |
[email protected] | b843072 | 2008-09-17 20:05:44 | [diff] [blame] | 513 | } |
| 514 | } |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 515 | |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 516 | ProcessForceTLSHeader(); |
| 517 | |
[email protected] | fe21987 | 2008-09-23 02:17:00 | [diff] [blame] | 518 | if (SdchManager::Global() && |
| 519 | SdchManager::Global()->IsInSupportedDomain(request_->url())) { |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 520 | static const std::string name = "Get-Dictionary"; |
| 521 | std::string url_text; |
| 522 | void* iter = NULL; |
| 523 | // TODO(jar): We need to not fetch dictionaries the first time they are |
| 524 | // seen, but rather wait until we can justify their usefulness. |
| 525 | // For now, we will only fetch the first dictionary, which will at least |
| 526 | // require multiple suggestions before we get additional ones for this site. |
| 527 | // Eventually we should wait until a dictionary is requested several times |
| 528 | // before we even download it (so that we don't waste memory or bandwidth). |
| 529 | if (response_info_->headers->EnumerateHeader(&iter, name, &url_text)) { |
[email protected] | d55ad15d | 2009-02-17 19:40:50 | [diff] [blame] | 530 | // request_->url() won't be valid in the destructor, so we use an |
| 531 | // alternate copy. |
| 532 | DCHECK(request_->url() == request_info_.url); |
| 533 | // Resolve suggested URL relative to request url. |
| 534 | sdch_dictionary_url_ = request_info_.url.Resolve(url_text); |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 535 | } |
| 536 | } |
| 537 | |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 538 | // The HTTP transaction may be restarted several times for the purposes |
| 539 | // of sending authorization information. Each time it restarts, we get |
| 540 | // notified of the headers completion so that we can update the cookie store. |
| 541 | if (transaction_->IsReadyToRestartForAuth()) { |
| 542 | DCHECK(!response_info_->auth_challenge.get()); |
| 543 | RestartTransactionWithAuth(std::wstring(), std::wstring()); |
| 544 | return; |
| 545 | } |
| 546 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 547 | URLRequestJob::NotifyHeadersComplete(); |
| 548 | } |
| 549 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 550 | void URLRequestHttpJob::DestroyTransaction() { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 551 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 552 | |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 553 | transaction_.reset(); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 554 | response_info_ = NULL; |
| 555 | } |
| 556 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 557 | void URLRequestHttpJob::StartTransaction() { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 558 | // NOTE: This method assumes that request_info_ is already setup properly. |
| 559 | |
| 560 | // Create a transaction. |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 561 | DCHECK(!transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 562 | |
| 563 | DCHECK(request_->context()); |
| 564 | DCHECK(request_->context()->http_transaction_factory()); |
| 565 | |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 566 | transaction_.reset( |
| 567 | request_->context()->http_transaction_factory()->CreateTransaction()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 568 | |
| 569 | // No matter what, we want to report our status as IO pending since we will |
| 570 | // be notifying our consumer asynchronously via OnStartCompleted. |
| 571 | SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
| 572 | |
| 573 | int rv; |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 574 | if (transaction_.get()) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 575 | rv = transaction_->Start(&request_info_, &start_callback_); |
| 576 | if (rv == net::ERR_IO_PENDING) |
| 577 | return; |
| 578 | } else { |
| 579 | rv = net::ERR_FAILED; |
| 580 | } |
| 581 | |
| 582 | // The transaction started synchronously, but we need to notify the |
| 583 | // URLRequest delegate via the message loop. |
| 584 | MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod( |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 585 | this, &URLRequestHttpJob::OnStartCompleted, rv)); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 586 | } |
| 587 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 588 | void URLRequestHttpJob::AddExtraHeaders() { |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 589 | // TODO(jar): Consider optimizing away SDCH advertising bytes when the URL is |
| 590 | // probably an img or such (and SDCH encoding is not likely). |
| 591 | bool advertise_sdch = SdchManager::Global() && |
| 592 | SdchManager::Global()->IsInSupportedDomain(request_->url()); |
| 593 | std::string avail_dictionaries; |
| 594 | if (advertise_sdch) { |
| 595 | SdchManager::Global()->GetAvailDictionaryList(request_->url(), |
| 596 | &avail_dictionaries); |
| 597 | |
| 598 | // The AllowLatencyExperiment() is only true if we've successfully done a |
| 599 | // full SDCH compression recently in this browser session for this host. |
| 600 | // Note that for this path, there might be no applicable dictionaries, and |
| 601 | // hence we can't participate in the experiment. |
| 602 | if (!avail_dictionaries.empty() && |
| 603 | SdchManager::Global()->AllowLatencyExperiment(request_->url())) { |
| 604 | // We are participating in the test (or control), and hence we'll |
| 605 | // eventually record statistics via either SDCH_EXPERIMENT_DECODE or |
| 606 | // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data. |
| 607 | EnablePacketCounting(kSdchPacketHistogramCount); |
[email protected] | a88af523 | 2009-06-05 01:34:53 | [diff] [blame^] | 608 | if (base::RandDouble() < .01) { |
| 609 | sdch_test_control_ = true; // 1% probability. |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 610 | advertise_sdch = false; |
| 611 | } else { |
| 612 | sdch_test_activated_ = true; |
| 613 | } |
| 614 | } |
| 615 | } |
| 616 | |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 617 | // Supply Accept-Encoding headers first so that it is more likely that they |
| 618 | // will be in the first transmitted packet. This can sometimes make it easier |
| 619 | // to filter and analyze the streams to assure that a proxy has not damaged |
| 620 | // these headers. Some proxies deliberately corrupt Accept-Encoding headers. |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 621 | if (!advertise_sdch) { |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 622 | // Tell the server what compression formats we support (other than SDCH). |
| 623 | request_info_.extra_headers += "Accept-Encoding: gzip,deflate,bzip2\r\n"; |
| 624 | } else { |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 625 | // Include SDCH in acceptable list. |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 626 | request_info_.extra_headers += "Accept-Encoding: " |
| 627 | "gzip,deflate,bzip2,sdch\r\n"; |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 628 | if (!avail_dictionaries.empty()) { |
| 629 | request_info_.extra_headers += "Avail-Dictionary: " |
| 630 | + avail_dictionaries + "\r\n"; |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 631 | sdch_dictionary_advertised_ = true; |
| 632 | // Since we're tagging this transaction as advertising a dictionary, we'll |
| 633 | // definately employ an SDCH filter (or tentative sdch filter) when we get |
| 634 | // a response. When done, we'll record histograms via SDCH_DECODE or |
| 635 | // SDCH_PASSTHROUGH. Hence we need to record packet arrival times. |
| 636 | EnablePacketCounting(kSdchPacketHistogramCount); |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 637 | } |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 638 | } |
| 639 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 640 | URLRequestContext* context = request_->context(); |
| 641 | if (context) { |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 642 | request_info_.extra_headers += AssembleRequestCookies(); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 643 | if (!context->accept_language().empty()) |
| 644 | request_info_.extra_headers += "Accept-Language: " + |
| 645 | context->accept_language() + "\r\n"; |
| 646 | if (!context->accept_charset().empty()) |
| 647 | request_info_.extra_headers += "Accept-Charset: " + |
| 648 | context->accept_charset() + "\r\n"; |
| 649 | } |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 650 | } |
| 651 | |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 652 | std::string URLRequestHttpJob::AssembleRequestCookies() { |
| 653 | URLRequestContext* context = request_->context(); |
| 654 | if (context) { |
| 655 | // Add in the cookie header. TODO might we need more than one header? |
| 656 | if (context->cookie_store() && |
[email protected] | cfd73321 | 2009-05-23 18:11:10 | [diff] [blame] | 657 | context->cookie_policy()->CanGetCookies( |
| 658 | request_->url(), request_->first_party_for_cookies())) { |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 659 | net::CookieMonster::CookieOptions options; |
| 660 | options.set_include_httponly(); |
| 661 | std::string cookies = request_->context()->cookie_store()-> |
| 662 | GetCookiesWithOptions(request_->url(), options); |
| 663 | if (!cookies.empty()) |
| 664 | return "Cookie: " + cookies + "\r\n"; |
| 665 | } |
| 666 | } |
| 667 | return std::string(); |
| 668 | } |
| 669 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 670 | void URLRequestHttpJob::FetchResponseCookies() { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 671 | DCHECK(response_info_); |
| 672 | DCHECK(response_cookies_.empty()); |
| 673 | |
| 674 | std::string name = "Set-Cookie"; |
| 675 | std::string value; |
| 676 | |
| 677 | void* iter = NULL; |
| 678 | while (response_info_->headers->EnumerateHeader(&iter, name, &value)) |
| 679 | response_cookies_.push_back(value); |
| 680 | } |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 681 | |
| 682 | |
| 683 | void URLRequestHttpJob::ProcessForceTLSHeader() { |
| 684 | DCHECK(response_info_); |
| 685 | |
| 686 | // Hide processing behind a command line flag. |
| 687 | if (!CommandLine::ForCurrentProcess()->HasSwitch(switches::kForceHTTPS)) |
| 688 | return; |
| 689 | |
| 690 | // Only process X-Force-TLS from HTTPS responses. |
| 691 | if (request_info_.url.scheme() != "https") |
| 692 | return; |
| 693 | |
| 694 | // Only process X-Force-TLS from responses with valid certificates. |
| 695 | if (response_info_->ssl_info.cert_status & net::CERT_STATUS_ALL_ERRORS) |
| 696 | return; |
| 697 | |
| 698 | URLRequestContext* ctx = request_->context(); |
| 699 | if (!ctx || !ctx->force_tls_state()) |
| 700 | return; |
| 701 | |
| 702 | std::string name = "X-Force-TLS"; |
| 703 | std::string value; |
| 704 | |
| 705 | void* iter = NULL; |
| 706 | while (response_info_->headers->EnumerateHeader(&iter, name, &value)) |
| 707 | ctx->force_tls_state()->DidReceiveHeader(request_info_.url, value); |
| 708 | } |