[email protected] | 0b45559b | 2009-06-12 21:45:11 | [diff] [blame] | 1 | // Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. |
license.bot | bf09a50 | 2008-08-24 00:55:55 | [diff] [blame] | 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 4 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 5 | #include "net/url_request/url_request_http_job.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 6 | |
[email protected] | 4ed2755f | 2008-12-15 09:01:33 | [diff] [blame] | 7 | #include "base/base_switches.h" |
| 8 | #include "base/command_line.h" |
[email protected] | 39ce5c0 | 2008-08-22 04:03:44 | [diff] [blame] | 9 | #include "base/compiler_specific.h" |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 10 | #include "base/file_util.h" |
| 11 | #include "base/file_version_info.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 12 | #include "base/message_loop.h" |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 13 | #include "base/rand_util.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 14 | #include "base/string_util.h" |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 15 | #include "net/base/cert_status_flags.h" |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 16 | #include "net/base/filter.h" |
[email protected] | 77f6fb43 | 2009-09-05 14:21:09 | [diff] [blame^] | 17 | #include "net/base/strict_transport_security_state.h" |
[email protected] | b843072 | 2008-09-17 20:05:44 | [diff] [blame] | 18 | #include "net/base/load_flags.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 19 | #include "net/base/net_errors.h" |
| 20 | #include "net/base/net_util.h" |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 21 | #include "net/base/sdch_manager.h" |
[email protected] | 0b45559b | 2009-06-12 21:45:11 | [diff] [blame] | 22 | #include "net/base/ssl_cert_request_info.h" |
[email protected] | 319d9e6f | 2009-02-18 19:47:21 | [diff] [blame] | 23 | #include "net/http/http_response_headers.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 24 | #include "net/http/http_response_info.h" |
| 25 | #include "net/http/http_transaction.h" |
| 26 | #include "net/http/http_transaction_factory.h" |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 27 | #include "net/http/http_util.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 28 | #include "net/url_request/url_request.h" |
[email protected] | 319d9e6f | 2009-02-18 19:47:21 | [diff] [blame] | 29 | #include "net/url_request/url_request_context.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 30 | #include "net/url_request/url_request_error_job.h" |
[email protected] | 06965e0 | 2009-09-04 21:36:42 | [diff] [blame] | 31 | #include "net/url_request/url_request_redirect_job.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 32 | |
[email protected] | bcb84f8b | 2009-08-31 16:20:14 | [diff] [blame] | 33 | // static |
| 34 | std::set<int> URLRequestHttpJob::explicitly_allowed_ports_; |
| 35 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 36 | // TODO(darin): make sure the port blocking code is not lost |
| 37 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 38 | // static |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 39 | URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request, |
| 40 | const std::string& scheme) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 41 | DCHECK(scheme == "http" || scheme == "https"); |
| 42 | |
[email protected] | bcb84f8b | 2009-08-31 16:20:14 | [diff] [blame] | 43 | int port = request->url().IntPort(); |
| 44 | if (!net::IsPortAllowedByDefault(port) && !IsPortAllowedByOverride(port)) |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 45 | return new URLRequestErrorJob(request, net::ERR_UNSAFE_PORT); |
| 46 | |
| 47 | if (!request->context() || |
| 48 | !request->context()->http_transaction_factory()) { |
| 49 | NOTREACHED() << "requires a valid context"; |
| 50 | return new URLRequestErrorJob(request, net::ERR_INVALID_ARGUMENT); |
| 51 | } |
| 52 | |
[email protected] | 4ed2755f | 2008-12-15 09:01:33 | [diff] [blame] | 53 | // We cache the value of the switch because this code path is hit on every |
| 54 | // network request. |
| 55 | static const bool kForceHTTPS = |
[email protected] | bb97536 | 2009-01-21 01:00:22 | [diff] [blame] | 56 | CommandLine::ForCurrentProcess()->HasSwitch(switches::kForceHTTPS); |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 57 | if (kForceHTTPS && scheme == "http" && |
[email protected] | 77f6fb43 | 2009-09-05 14:21:09 | [diff] [blame^] | 58 | request->context()->strict_transport_security_state() && |
| 59 | request->context()->strict_transport_security_state()->IsEnabledForHost( |
[email protected] | 06965e0 | 2009-09-04 21:36:42 | [diff] [blame] | 60 | request->url().host())) { |
| 61 | DCHECK_EQ(request->url().scheme(), "http"); |
| 62 | url_canon::Replacements<char> replacements; |
| 63 | static const char kNewScheme[] = "https"; |
| 64 | replacements.SetScheme(kNewScheme, |
| 65 | url_parse::Component(0, strlen(kNewScheme))); |
| 66 | GURL new_location = request->url().ReplaceComponents(replacements); |
| 67 | return new URLRequestRedirectJob(request, new_location); |
| 68 | } |
[email protected] | 4ed2755f | 2008-12-15 09:01:33 | [diff] [blame] | 69 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 70 | return new URLRequestHttpJob(request); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 71 | } |
| 72 | |
[email protected] | bcb84f8b | 2009-08-31 16:20:14 | [diff] [blame] | 73 | // static |
| 74 | void URLRequestHttpJob::SetExplicitlyAllowedPorts( |
| 75 | const std::wstring& allowed_ports) { |
| 76 | if (allowed_ports.empty()) |
| 77 | return; |
| 78 | |
| 79 | std::set<int> ports; |
| 80 | size_t last = 0; |
| 81 | size_t size = allowed_ports.size(); |
| 82 | // The comma delimiter. |
| 83 | const std::wstring::value_type kComma = L','; |
| 84 | |
| 85 | // Overflow is still possible for evil user inputs. |
| 86 | for (size_t i = 0; i <= size; ++i) { |
| 87 | // The string should be composed of only digits and commas. |
| 88 | if (i != size && !IsAsciiDigit(allowed_ports[i]) && |
| 89 | (allowed_ports[i] != kComma)) |
| 90 | return; |
| 91 | if (i == size || allowed_ports[i] == kComma) { |
| 92 | size_t length = i - last; |
| 93 | if (length > 0) |
| 94 | ports.insert(StringToInt(WideToASCII( |
| 95 | allowed_ports.substr(last, length)))); |
| 96 | last = i + 1; |
| 97 | } |
| 98 | } |
| 99 | explicitly_allowed_ports_ = ports; |
| 100 | } |
| 101 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 102 | URLRequestHttpJob::URLRequestHttpJob(URLRequest* request) |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 103 | : URLRequestJob(request), |
[email protected] | 2aecf738 | 2009-06-17 04:14:27 | [diff] [blame] | 104 | context_(request->context()), |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 105 | response_info_(NULL), |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 106 | proxy_auth_state_(net::AUTH_STATE_DONT_NEED_AUTH), |
| 107 | server_auth_state_(net::AUTH_STATE_DONT_NEED_AUTH), |
[email protected] | 39ce5c0 | 2008-08-22 04:03:44 | [diff] [blame] | 108 | ALLOW_THIS_IN_INITIALIZER_LIST( |
| 109 | start_callback_(this, &URLRequestHttpJob::OnStartCompleted)), |
| 110 | ALLOW_THIS_IN_INITIALIZER_LIST( |
| 111 | read_callback_(this, &URLRequestHttpJob::OnReadCompleted)), |
[email protected] | 3589e55 | 2008-08-20 23:11:34 | [diff] [blame] | 112 | read_in_progress_(false), |
[email protected] | 2aecf738 | 2009-06-17 04:14:27 | [diff] [blame] | 113 | transaction_(NULL), |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 114 | sdch_dictionary_advertised_(false), |
| 115 | sdch_test_activated_(false), |
[email protected] | d8fd513 | 2009-05-15 01:06:53 | [diff] [blame] | 116 | sdch_test_control_(false), |
| 117 | is_cached_content_(false) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 118 | } |
| 119 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 120 | URLRequestHttpJob::~URLRequestHttpJob() { |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 121 | DCHECK(!sdch_test_control_ || !sdch_test_activated_); |
[email protected] | d8fd513 | 2009-05-15 01:06:53 | [diff] [blame] | 122 | if (!IsCachedContent()) { |
| 123 | if (sdch_test_control_) |
| 124 | RecordPacketStats(SDCH_EXPERIMENT_HOLDBACK); |
| 125 | if (sdch_test_activated_) |
| 126 | RecordPacketStats(SDCH_EXPERIMENT_DECODE); |
| 127 | } |
[email protected] | 284c373d4 | 2009-05-19 23:39:03 | [diff] [blame] | 128 | // Make sure SDCH filters are told to emit histogram data while this class |
| 129 | // can still service the IsCachedContent() call. |
| 130 | DestroyFilters(); |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 131 | |
[email protected] | 7234e6c | 2009-02-11 21:37:04 | [diff] [blame] | 132 | if (sdch_dictionary_url_.is_valid()) { |
[email protected] | d55ad15d | 2009-02-17 19:40:50 | [diff] [blame] | 133 | // Prior to reaching the destructor, request_ has been set to a NULL |
| 134 | // pointer, so request_->url() is no longer valid in the destructor, and we |
| 135 | // use an alternate copy |request_info_.url|. |
[email protected] | a41fae8 | 2009-02-21 06:11:45 | [diff] [blame] | 136 | SdchManager* manager = SdchManager::Global(); |
| 137 | // To be extra safe, since this is a "different time" from when we decided |
| 138 | // to get the dictionary, we'll validate that an SdchManager is available. |
| 139 | // At shutdown time, care is taken to be sure that we don't delete this |
| 140 | // globally useful instance "too soon," so this check is just defensive |
| 141 | // coding to assure that IF the system is shutting down, we don't have any |
| 142 | // problem if the manager was deleted ahead of time. |
| 143 | if (manager) // Defensive programming. |
| 144 | manager->FetchDictionary(request_info_.url, sdch_dictionary_url_); |
[email protected] | 7234e6c | 2009-02-11 21:37:04 | [diff] [blame] | 145 | } |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 146 | } |
| 147 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 148 | void URLRequestHttpJob::SetUpload(net::UploadData* upload) { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 149 | DCHECK(!transaction_.get()) << "cannot change once started"; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 150 | request_info_.upload_data = upload; |
| 151 | } |
| 152 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 153 | void URLRequestHttpJob::SetExtraRequestHeaders( |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 154 | const std::string& headers) { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 155 | DCHECK(!transaction_.get()) << "cannot change once started"; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 156 | request_info_.extra_headers = headers; |
| 157 | } |
| 158 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 159 | void URLRequestHttpJob::Start() { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 160 | DCHECK(!transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 161 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 162 | // Ensure that we do not send username and password fields in the referrer. |
[email protected] | e600c82 | 2009-08-31 16:57:08 | [diff] [blame] | 163 | GURL referrer(request_->GetSanitizedReferrer()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 164 | |
| 165 | request_info_.url = request_->url(); |
| 166 | request_info_.referrer = referrer; |
| 167 | request_info_.method = request_->method(); |
| 168 | request_info_.load_flags = request_->load_flags(); |
[email protected] | 725355a | 2009-03-25 20:42:55 | [diff] [blame] | 169 | request_info_.priority = request_->priority(); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 170 | |
[email protected] | 6f681a4 | 2009-01-27 22:28:54 | [diff] [blame] | 171 | if (request_->context()) { |
| 172 | request_info_.user_agent = |
| 173 | request_->context()->GetUserAgent(request_->url()); |
| 174 | } |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 175 | |
| 176 | AddExtraHeaders(); |
| 177 | |
| 178 | StartTransaction(); |
| 179 | } |
| 180 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 181 | void URLRequestHttpJob::Kill() { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 182 | if (!transaction_.get()) |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 183 | return; |
| 184 | |
| 185 | DestroyTransaction(); |
| 186 | URLRequestJob::Kill(); |
| 187 | } |
| 188 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 189 | net::LoadState URLRequestHttpJob::GetLoadState() const { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 190 | return transaction_.get() ? |
| 191 | transaction_->GetLoadState() : net::LOAD_STATE_IDLE; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 192 | } |
| 193 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 194 | uint64 URLRequestHttpJob::GetUploadProgress() const { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 195 | return transaction_.get() ? transaction_->GetUploadProgress() : 0; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 196 | } |
| 197 | |
[email protected] | 60c413c9 | 2009-03-09 16:53:31 | [diff] [blame] | 198 | bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 199 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 200 | |
| 201 | if (!response_info_) |
| 202 | return false; |
| 203 | |
| 204 | return response_info_->headers->GetMimeType(mime_type); |
| 205 | } |
| 206 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 207 | bool URLRequestHttpJob::GetCharset(std::string* charset) { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 208 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 209 | |
| 210 | if (!response_info_) |
| 211 | return false; |
| 212 | |
| 213 | return response_info_->headers->GetCharset(charset); |
| 214 | } |
| 215 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 216 | void URLRequestHttpJob::GetResponseInfo(net::HttpResponseInfo* info) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 217 | DCHECK(request_); |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 218 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 219 | |
| 220 | if (response_info_) |
| 221 | *info = *response_info_; |
| 222 | } |
| 223 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 224 | bool URLRequestHttpJob::GetResponseCookies( |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 225 | std::vector<std::string>* cookies) { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 226 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 227 | |
| 228 | if (!response_info_) |
| 229 | return false; |
| 230 | |
| 231 | if (response_cookies_.empty()) |
| 232 | FetchResponseCookies(); |
| 233 | |
| 234 | cookies->clear(); |
| 235 | cookies->swap(response_cookies_); |
| 236 | return true; |
| 237 | } |
| 238 | |
[email protected] | 84973ad | 2009-03-30 18:05:43 | [diff] [blame] | 239 | int URLRequestHttpJob::GetResponseCode() const { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 240 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 241 | |
| 242 | if (!response_info_) |
| 243 | return -1; |
| 244 | |
| 245 | return response_info_->headers->response_code(); |
| 246 | } |
| 247 | |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 248 | bool URLRequestHttpJob::GetContentEncodings( |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 249 | std::vector<Filter::FilterType>* encoding_types) { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 250 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 251 | if (!response_info_) |
| 252 | return false; |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 253 | DCHECK(encoding_types->empty()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 254 | |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 255 | std::string encoding_type; |
| 256 | void* iter = NULL; |
| 257 | while (response_info_->headers->EnumerateHeader(&iter, "Content-Encoding", |
| 258 | &encoding_type)) { |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 259 | encoding_types->push_back(Filter::ConvertEncodingToType(encoding_type)); |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 260 | } |
[email protected] | c631b6aa | 2008-10-15 21:21:37 | [diff] [blame] | 261 | |
[email protected] | 77e9fcf | 2009-03-28 01:45:58 | [diff] [blame] | 262 | // Even if encoding types are empty, there is a chance that we need to add |
| 263 | // some decoding, as some proxies strip encoding completely. In such cases, |
| 264 | // we may need to add (for example) SDCH filtering (when the context suggests |
| 265 | // it is appropriate). |
| 266 | Filter::FixupEncodingTypes(*this, encoding_types); |
| 267 | |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 268 | return !encoding_types->empty(); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 269 | } |
| 270 | |
[email protected] | c631b6aa | 2008-10-15 21:21:37 | [diff] [blame] | 271 | bool URLRequestHttpJob::IsSdchResponse() const { |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 272 | return sdch_dictionary_advertised_; |
[email protected] | c631b6aa | 2008-10-15 21:21:37 | [diff] [blame] | 273 | } |
| 274 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 275 | bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 276 | // We only allow redirects to certain "safe" protocols. This does not |
| 277 | // restrict redirects to externally handled protocols. Our consumer would |
| 278 | // need to take care of those. |
| 279 | |
| 280 | if (!URLRequest::IsHandledURL(location)) |
| 281 | return true; |
| 282 | |
| 283 | static const char* kSafeSchemes[] = { |
| 284 | "http", |
| 285 | "https", |
| 286 | "ftp" |
| 287 | }; |
| 288 | |
| 289 | for (size_t i = 0; i < arraysize(kSafeSchemes); ++i) { |
| 290 | if (location.SchemeIs(kSafeSchemes[i])) |
| 291 | return true; |
| 292 | } |
| 293 | |
| 294 | return false; |
| 295 | } |
| 296 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 297 | bool URLRequestHttpJob::NeedsAuth() { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 298 | int code = GetResponseCode(); |
| 299 | if (code == -1) |
| 300 | return false; |
| 301 | |
| 302 | // Check if we need either Proxy or WWW Authentication. This could happen |
| 303 | // because we either provided no auth info, or provided incorrect info. |
| 304 | switch (code) { |
| 305 | case 407: |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 306 | if (proxy_auth_state_ == net::AUTH_STATE_CANCELED) |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 307 | return false; |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 308 | proxy_auth_state_ = net::AUTH_STATE_NEED_AUTH; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 309 | return true; |
| 310 | case 401: |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 311 | if (server_auth_state_ == net::AUTH_STATE_CANCELED) |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 312 | return false; |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 313 | server_auth_state_ = net::AUTH_STATE_NEED_AUTH; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 314 | return true; |
| 315 | } |
| 316 | return false; |
| 317 | } |
| 318 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 319 | void URLRequestHttpJob::GetAuthChallengeInfo( |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 320 | scoped_refptr<net::AuthChallengeInfo>* result) { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 321 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 322 | DCHECK(response_info_); |
| 323 | |
| 324 | // sanity checks: |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 325 | DCHECK(proxy_auth_state_ == net::AUTH_STATE_NEED_AUTH || |
| 326 | server_auth_state_ == net::AUTH_STATE_NEED_AUTH); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 327 | DCHECK(response_info_->headers->response_code() == 401 || |
| 328 | response_info_->headers->response_code() == 407); |
| 329 | |
| 330 | *result = response_info_->auth_challenge; |
| 331 | } |
| 332 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 333 | void URLRequestHttpJob::SetAuth(const std::wstring& username, |
| 334 | const std::wstring& password) { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 335 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 336 | |
| 337 | // Proxy gets set first, then WWW. |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 338 | if (proxy_auth_state_ == net::AUTH_STATE_NEED_AUTH) { |
| 339 | proxy_auth_state_ = net::AUTH_STATE_HAVE_AUTH; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 340 | } else { |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 341 | DCHECK(server_auth_state_ == net::AUTH_STATE_NEED_AUTH); |
| 342 | server_auth_state_ = net::AUTH_STATE_HAVE_AUTH; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 343 | } |
| 344 | |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 345 | RestartTransactionWithAuth(username, password); |
| 346 | } |
| 347 | |
| 348 | void URLRequestHttpJob::RestartTransactionWithAuth( |
| 349 | const std::wstring& username, |
| 350 | const std::wstring& password) { |
| 351 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 352 | // These will be reset in OnStartCompleted. |
| 353 | response_info_ = NULL; |
| 354 | response_cookies_.clear(); |
| 355 | |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 356 | // Update the cookies, since the cookie store may have been updated from the |
| 357 | // headers in the 401/407. Since cookies were already appended to |
| 358 | // extra_headers by AddExtraHeaders(), we need to strip them out. |
| 359 | static const char* const cookie_name[] = { "cookie" }; |
| 360 | request_info_.extra_headers = net::HttpUtil::StripHeaders( |
| 361 | request_info_.extra_headers, cookie_name, arraysize(cookie_name)); |
| 362 | // TODO(eroman): this ordering is inconsistent with non-restarted request, |
| 363 | // where cookies header appears second from the bottom. |
| 364 | request_info_.extra_headers += AssembleRequestCookies(); |
| 365 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 366 | // No matter what, we want to report our status as IO pending since we will |
| 367 | // be notifying our consumer asynchronously via OnStartCompleted. |
| 368 | SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
| 369 | |
| 370 | int rv = transaction_->RestartWithAuth(username, password, |
| 371 | &start_callback_); |
| 372 | if (rv == net::ERR_IO_PENDING) |
| 373 | return; |
| 374 | |
| 375 | // The transaction started synchronously, but we need to notify the |
| 376 | // URLRequest delegate via the message loop. |
| 377 | MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod( |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 378 | this, &URLRequestHttpJob::OnStartCompleted, rv)); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 379 | } |
| 380 | |
[email protected] | bcb84f8b | 2009-08-31 16:20:14 | [diff] [blame] | 381 | // static |
| 382 | bool URLRequestHttpJob::IsPortAllowedByOverride(int port) { |
| 383 | if (explicitly_allowed_ports().empty()) |
| 384 | return false; |
| 385 | |
| 386 | std::set<int>::const_iterator it = |
| 387 | std::find(explicitly_allowed_ports().begin(), |
| 388 | explicitly_allowed_ports().end(), |
| 389 | port); |
| 390 | |
| 391 | return it != explicitly_allowed_ports().end(); |
| 392 | } |
| 393 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 394 | void URLRequestHttpJob::CancelAuth() { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 395 | // Proxy gets set first, then WWW. |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 396 | if (proxy_auth_state_ == net::AUTH_STATE_NEED_AUTH) { |
| 397 | proxy_auth_state_ = net::AUTH_STATE_CANCELED; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 398 | } else { |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 399 | DCHECK(server_auth_state_ == net::AUTH_STATE_NEED_AUTH); |
| 400 | server_auth_state_ = net::AUTH_STATE_CANCELED; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 401 | } |
| 402 | |
| 403 | // These will be reset in OnStartCompleted. |
| 404 | response_info_ = NULL; |
| 405 | response_cookies_.clear(); |
| 406 | |
| 407 | // OK, let the consumer read the error page... |
| 408 | // |
| 409 | // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false, |
| 410 | // which will cause the consumer to receive OnResponseStarted instead of |
| 411 | // OnAuthRequired. |
| 412 | // |
| 413 | // We have to do this via InvokeLater to avoid "recursing" the consumer. |
| 414 | // |
| 415 | MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod( |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 416 | this, &URLRequestHttpJob::OnStartCompleted, net::OK)); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 417 | } |
| 418 | |
[email protected] | 0b45559b | 2009-06-12 21:45:11 | [diff] [blame] | 419 | void URLRequestHttpJob::ContinueWithCertificate( |
| 420 | net::X509Certificate* client_cert) { |
| 421 | DCHECK(transaction_.get()); |
| 422 | |
| 423 | DCHECK(!response_info_) << "should not have a response yet"; |
| 424 | |
| 425 | // No matter what, we want to report our status as IO pending since we will |
| 426 | // be notifying our consumer asynchronously via OnStartCompleted. |
| 427 | SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
| 428 | |
| 429 | int rv = transaction_->RestartWithCertificate(client_cert, &start_callback_); |
| 430 | if (rv == net::ERR_IO_PENDING) |
| 431 | return; |
| 432 | |
| 433 | // The transaction started synchronously, but we need to notify the |
| 434 | // URLRequest delegate via the message loop. |
| 435 | MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod( |
| 436 | this, &URLRequestHttpJob::OnStartCompleted, rv)); |
| 437 | } |
| 438 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 439 | void URLRequestHttpJob::ContinueDespiteLastError() { |
[email protected] | 9ec4875 | 2009-02-06 23:33:58 | [diff] [blame] | 440 | // If the transaction was destroyed, then the job was cancelled. |
| 441 | if (!transaction_.get()) |
| 442 | return; |
| 443 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 444 | DCHECK(!response_info_) << "should not have a response yet"; |
| 445 | |
| 446 | // No matter what, we want to report our status as IO pending since we will |
| 447 | // be notifying our consumer asynchronously via OnStartCompleted. |
| 448 | SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
| 449 | |
| 450 | int rv = transaction_->RestartIgnoringLastError(&start_callback_); |
| 451 | if (rv == net::ERR_IO_PENDING) |
| 452 | return; |
| 453 | |
| 454 | // The transaction started synchronously, but we need to notify the |
| 455 | // URLRequest delegate via the message loop. |
| 456 | MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod( |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 457 | this, &URLRequestHttpJob::OnStartCompleted, rv)); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 458 | } |
| 459 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 460 | bool URLRequestHttpJob::GetMoreData() { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 461 | return transaction_.get() && !read_in_progress_; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 462 | } |
| 463 | |
[email protected] | 9dea9e1f | 2009-01-29 00:30:47 | [diff] [blame] | 464 | bool URLRequestHttpJob::ReadRawData(net::IOBuffer* buf, int buf_size, |
| 465 | int *bytes_read) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 466 | DCHECK_NE(buf_size, 0); |
| 467 | DCHECK(bytes_read); |
| 468 | DCHECK(!read_in_progress_); |
| 469 | |
| 470 | int rv = transaction_->Read(buf, buf_size, &read_callback_); |
| 471 | if (rv >= 0) { |
| 472 | *bytes_read = rv; |
| 473 | return true; |
| 474 | } |
| 475 | |
| 476 | if (rv == net::ERR_IO_PENDING) { |
| 477 | read_in_progress_ = true; |
| 478 | SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
| 479 | } else { |
| 480 | NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); |
| 481 | } |
| 482 | |
| 483 | return false; |
| 484 | } |
| 485 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 486 | void URLRequestHttpJob::OnStartCompleted(int result) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 487 | // If the request was destroyed, then there is no more work to do. |
| 488 | if (!request_ || !request_->delegate()) |
| 489 | return; |
| 490 | |
| 491 | // If the transaction was destroyed, then the job was cancelled, and |
| 492 | // we can just ignore this notification. |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 493 | if (!transaction_.get()) |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 494 | return; |
| 495 | |
| 496 | // Clear the IO_PENDING status |
| 497 | SetStatus(URLRequestStatus()); |
| 498 | |
| 499 | if (result == net::OK) { |
| 500 | NotifyHeadersComplete(); |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 501 | } else if (ShouldTreatAsCertificateError(result)) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 502 | // We encountered an SSL certificate error. Ask our delegate to decide |
| 503 | // what we should do. |
| 504 | // TODO(wtc): also pass ssl_info.cert_status, or just pass the whole |
| 505 | // ssl_info. |
| 506 | request_->delegate()->OnSSLCertificateError( |
| 507 | request_, result, transaction_->GetResponseInfo()->ssl_info.cert); |
[email protected] | 0b45559b | 2009-06-12 21:45:11 | [diff] [blame] | 508 | } else if (result == net::ERR_SSL_CLIENT_AUTH_CERT_NEEDED) { |
| 509 | request_->delegate()->OnCertificateRequested( |
| 510 | request_, transaction_->GetResponseInfo()->cert_request_info); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 511 | } else { |
| 512 | NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); |
| 513 | } |
| 514 | } |
| 515 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 516 | void URLRequestHttpJob::OnReadCompleted(int result) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 517 | read_in_progress_ = false; |
| 518 | |
| 519 | if (result == 0) { |
| 520 | NotifyDone(URLRequestStatus()); |
| 521 | } else if (result < 0) { |
| 522 | NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); |
| 523 | } else { |
| 524 | // Clear the IO_PENDING status |
| 525 | SetStatus(URLRequestStatus()); |
| 526 | } |
| 527 | |
| 528 | NotifyReadComplete(result); |
| 529 | } |
| 530 | |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 531 | bool URLRequestHttpJob::ShouldTreatAsCertificateError(int result) { |
| 532 | if (!net::IsCertificateError(result)) |
| 533 | return false; |
| 534 | |
| 535 | // Hide the fancy processing behind a command line switch. |
| 536 | if (!CommandLine::ForCurrentProcess()->HasSwitch(switches::kForceHTTPS)) |
| 537 | return true; |
| 538 | |
[email protected] | 77f6fb43 | 2009-09-05 14:21:09 | [diff] [blame^] | 539 | // Check whether our context is using Strict-Transport-Security. |
| 540 | if (!context_->strict_transport_security_state()) |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 541 | return true; |
| 542 | |
[email protected] | 77f6fb43 | 2009-09-05 14:21:09 | [diff] [blame^] | 543 | return !context_->strict_transport_security_state()->IsEnabledForHost( |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 544 | request_info_.url.host()); |
| 545 | } |
| 546 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 547 | void URLRequestHttpJob::NotifyHeadersComplete() { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 548 | DCHECK(!response_info_); |
| 549 | |
| 550 | response_info_ = transaction_->GetResponseInfo(); |
| 551 | |
[email protected] | d8fd513 | 2009-05-15 01:06:53 | [diff] [blame] | 552 | // Save boolean, as we'll need this info at destruction time, and filters may |
| 553 | // also need this info. |
| 554 | is_cached_content_ = response_info_->was_cached; |
| 555 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 556 | // Get the Set-Cookie values, and send them to our cookie database. |
[email protected] | b843072 | 2008-09-17 20:05:44 | [diff] [blame] | 557 | if (!(request_info_.load_flags & net::LOAD_DO_NOT_SAVE_COOKIES)) { |
| 558 | URLRequestContext* ctx = request_->context(); |
| 559 | if (ctx && ctx->cookie_store() && |
[email protected] | cfd73321 | 2009-05-23 18:11:10 | [diff] [blame] | 560 | ctx->cookie_policy()->CanSetCookie( |
| 561 | request_->url(), request_->first_party_for_cookies())) { |
[email protected] | b843072 | 2008-09-17 20:05:44 | [diff] [blame] | 562 | FetchResponseCookies(); |
[email protected] | 5f450e5 | 2009-07-28 13:28:11 | [diff] [blame] | 563 | net::CookieOptions options; |
[email protected] | 3a96c74 | 2008-11-19 19:46:27 | [diff] [blame] | 564 | options.set_include_httponly(); |
| 565 | ctx->cookie_store()->SetCookiesWithOptions(request_->url(), |
| 566 | response_cookies_, |
| 567 | options); |
[email protected] | b843072 | 2008-09-17 20:05:44 | [diff] [blame] | 568 | } |
| 569 | } |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 570 | |
[email protected] | 77f6fb43 | 2009-09-05 14:21:09 | [diff] [blame^] | 571 | ProcessStrictTransportSecurityHeader(); |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 572 | |
[email protected] | fe21987 | 2008-09-23 02:17:00 | [diff] [blame] | 573 | if (SdchManager::Global() && |
| 574 | SdchManager::Global()->IsInSupportedDomain(request_->url())) { |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 575 | static const std::string name = "Get-Dictionary"; |
| 576 | std::string url_text; |
| 577 | void* iter = NULL; |
| 578 | // TODO(jar): We need to not fetch dictionaries the first time they are |
| 579 | // seen, but rather wait until we can justify their usefulness. |
| 580 | // For now, we will only fetch the first dictionary, which will at least |
| 581 | // require multiple suggestions before we get additional ones for this site. |
| 582 | // Eventually we should wait until a dictionary is requested several times |
| 583 | // before we even download it (so that we don't waste memory or bandwidth). |
| 584 | if (response_info_->headers->EnumerateHeader(&iter, name, &url_text)) { |
[email protected] | d55ad15d | 2009-02-17 19:40:50 | [diff] [blame] | 585 | // request_->url() won't be valid in the destructor, so we use an |
| 586 | // alternate copy. |
| 587 | DCHECK(request_->url() == request_info_.url); |
| 588 | // Resolve suggested URL relative to request url. |
| 589 | sdch_dictionary_url_ = request_info_.url.Resolve(url_text); |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 590 | } |
| 591 | } |
| 592 | |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 593 | // The HTTP transaction may be restarted several times for the purposes |
| 594 | // of sending authorization information. Each time it restarts, we get |
| 595 | // notified of the headers completion so that we can update the cookie store. |
| 596 | if (transaction_->IsReadyToRestartForAuth()) { |
| 597 | DCHECK(!response_info_->auth_challenge.get()); |
| 598 | RestartTransactionWithAuth(std::wstring(), std::wstring()); |
| 599 | return; |
| 600 | } |
| 601 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 602 | URLRequestJob::NotifyHeadersComplete(); |
| 603 | } |
| 604 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 605 | void URLRequestHttpJob::DestroyTransaction() { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 606 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 607 | |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 608 | transaction_.reset(); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 609 | response_info_ = NULL; |
| 610 | } |
| 611 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 612 | void URLRequestHttpJob::StartTransaction() { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 613 | // NOTE: This method assumes that request_info_ is already setup properly. |
| 614 | |
| 615 | // Create a transaction. |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 616 | DCHECK(!transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 617 | |
| 618 | DCHECK(request_->context()); |
| 619 | DCHECK(request_->context()->http_transaction_factory()); |
| 620 | |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 621 | transaction_.reset( |
| 622 | request_->context()->http_transaction_factory()->CreateTransaction()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 623 | |
| 624 | // No matter what, we want to report our status as IO pending since we will |
| 625 | // be notifying our consumer asynchronously via OnStartCompleted. |
| 626 | SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
| 627 | |
| 628 | int rv; |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 629 | if (transaction_.get()) { |
[email protected] | ec08bb2 | 2009-08-12 00:25:12 | [diff] [blame] | 630 | rv = transaction_->Start( |
[email protected] | 684970b | 2009-08-14 04:54:46 | [diff] [blame] | 631 | &request_info_, &start_callback_, request_->load_log()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 632 | if (rv == net::ERR_IO_PENDING) |
| 633 | return; |
| 634 | } else { |
| 635 | rv = net::ERR_FAILED; |
| 636 | } |
| 637 | |
| 638 | // The transaction started synchronously, but we need to notify the |
| 639 | // URLRequest delegate via the message loop. |
| 640 | MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod( |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 641 | this, &URLRequestHttpJob::OnStartCompleted, rv)); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 642 | } |
| 643 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 644 | void URLRequestHttpJob::AddExtraHeaders() { |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 645 | // TODO(jar): Consider optimizing away SDCH advertising bytes when the URL is |
| 646 | // probably an img or such (and SDCH encoding is not likely). |
| 647 | bool advertise_sdch = SdchManager::Global() && |
| 648 | SdchManager::Global()->IsInSupportedDomain(request_->url()); |
| 649 | std::string avail_dictionaries; |
| 650 | if (advertise_sdch) { |
| 651 | SdchManager::Global()->GetAvailDictionaryList(request_->url(), |
| 652 | &avail_dictionaries); |
| 653 | |
| 654 | // The AllowLatencyExperiment() is only true if we've successfully done a |
| 655 | // full SDCH compression recently in this browser session for this host. |
| 656 | // Note that for this path, there might be no applicable dictionaries, and |
| 657 | // hence we can't participate in the experiment. |
| 658 | if (!avail_dictionaries.empty() && |
| 659 | SdchManager::Global()->AllowLatencyExperiment(request_->url())) { |
| 660 | // We are participating in the test (or control), and hence we'll |
| 661 | // eventually record statistics via either SDCH_EXPERIMENT_DECODE or |
| 662 | // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data. |
| 663 | EnablePacketCounting(kSdchPacketHistogramCount); |
[email protected] | a88af523 | 2009-06-05 01:34:53 | [diff] [blame] | 664 | if (base::RandDouble() < .01) { |
| 665 | sdch_test_control_ = true; // 1% probability. |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 666 | advertise_sdch = false; |
| 667 | } else { |
| 668 | sdch_test_activated_ = true; |
| 669 | } |
| 670 | } |
| 671 | } |
| 672 | |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 673 | // Supply Accept-Encoding headers first so that it is more likely that they |
| 674 | // will be in the first transmitted packet. This can sometimes make it easier |
| 675 | // to filter and analyze the streams to assure that a proxy has not damaged |
| 676 | // these headers. Some proxies deliberately corrupt Accept-Encoding headers. |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 677 | if (!advertise_sdch) { |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 678 | // Tell the server what compression formats we support (other than SDCH). |
[email protected] | a86c97cc | 2009-06-24 21:26:27 | [diff] [blame] | 679 | request_info_.extra_headers += "Accept-Encoding: gzip,deflate\r\n"; |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 680 | } else { |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 681 | // Include SDCH in acceptable list. |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 682 | request_info_.extra_headers += "Accept-Encoding: " |
[email protected] | a86c97cc | 2009-06-24 21:26:27 | [diff] [blame] | 683 | "gzip,deflate,sdch\r\n"; |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 684 | if (!avail_dictionaries.empty()) { |
| 685 | request_info_.extra_headers += "Avail-Dictionary: " |
| 686 | + avail_dictionaries + "\r\n"; |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 687 | sdch_dictionary_advertised_ = true; |
| 688 | // Since we're tagging this transaction as advertising a dictionary, we'll |
| 689 | // definately employ an SDCH filter (or tentative sdch filter) when we get |
| 690 | // a response. When done, we'll record histograms via SDCH_DECODE or |
| 691 | // SDCH_PASSTHROUGH. Hence we need to record packet arrival times. |
| 692 | EnablePacketCounting(kSdchPacketHistogramCount); |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 693 | } |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 694 | } |
| 695 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 696 | URLRequestContext* context = request_->context(); |
| 697 | if (context) { |
[email protected] | 5f450e5 | 2009-07-28 13:28:11 | [diff] [blame] | 698 | if (context->AllowSendingCookies(request_)) |
[email protected] | eaadd905 | 2009-06-23 18:02:23 | [diff] [blame] | 699 | request_info_.extra_headers += AssembleRequestCookies(); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 700 | if (!context->accept_language().empty()) |
| 701 | request_info_.extra_headers += "Accept-Language: " + |
| 702 | context->accept_language() + "\r\n"; |
| 703 | if (!context->accept_charset().empty()) |
| 704 | request_info_.extra_headers += "Accept-Charset: " + |
| 705 | context->accept_charset() + "\r\n"; |
| 706 | } |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 707 | } |
| 708 | |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 709 | std::string URLRequestHttpJob::AssembleRequestCookies() { |
[email protected] | 861fcd5 | 2009-08-26 02:33:46 | [diff] [blame] | 710 | if (request_info_.load_flags & net::LOAD_DO_NOT_SEND_COOKIES) |
| 711 | return std::string(); |
| 712 | |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 713 | URLRequestContext* context = request_->context(); |
| 714 | if (context) { |
| 715 | // Add in the cookie header. TODO might we need more than one header? |
| 716 | if (context->cookie_store() && |
[email protected] | cfd73321 | 2009-05-23 18:11:10 | [diff] [blame] | 717 | context->cookie_policy()->CanGetCookies( |
| 718 | request_->url(), request_->first_party_for_cookies())) { |
[email protected] | 5f450e5 | 2009-07-28 13:28:11 | [diff] [blame] | 719 | net::CookieOptions options; |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 720 | options.set_include_httponly(); |
| 721 | std::string cookies = request_->context()->cookie_store()-> |
| 722 | GetCookiesWithOptions(request_->url(), options); |
| 723 | if (!cookies.empty()) |
| 724 | return "Cookie: " + cookies + "\r\n"; |
| 725 | } |
| 726 | } |
| 727 | return std::string(); |
| 728 | } |
| 729 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 730 | void URLRequestHttpJob::FetchResponseCookies() { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 731 | DCHECK(response_info_); |
| 732 | DCHECK(response_cookies_.empty()); |
| 733 | |
| 734 | std::string name = "Set-Cookie"; |
| 735 | std::string value; |
| 736 | |
| 737 | void* iter = NULL; |
| 738 | while (response_info_->headers->EnumerateHeader(&iter, name, &value)) |
[email protected] | 5f450e5 | 2009-07-28 13:28:11 | [diff] [blame] | 739 | if (request_->context()->InterceptCookie(request_, &value)) |
[email protected] | eaadd905 | 2009-06-23 18:02:23 | [diff] [blame] | 740 | response_cookies_.push_back(value); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 741 | } |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 742 | |
| 743 | |
[email protected] | 77f6fb43 | 2009-09-05 14:21:09 | [diff] [blame^] | 744 | void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() { |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 745 | DCHECK(response_info_); |
| 746 | |
| 747 | // Hide processing behind a command line flag. |
| 748 | if (!CommandLine::ForCurrentProcess()->HasSwitch(switches::kForceHTTPS)) |
| 749 | return; |
| 750 | |
[email protected] | 77f6fb43 | 2009-09-05 14:21:09 | [diff] [blame^] | 751 | // Only process Strict-Transport-Security from HTTPS responses. |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 752 | if (request_info_.url.scheme() != "https") |
| 753 | return; |
| 754 | |
[email protected] | 77f6fb43 | 2009-09-05 14:21:09 | [diff] [blame^] | 755 | // Only process Strict-Transport-Security from responses with valid certificates. |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 756 | if (response_info_->ssl_info.cert_status & net::CERT_STATUS_ALL_ERRORS) |
| 757 | return; |
| 758 | |
| 759 | URLRequestContext* ctx = request_->context(); |
[email protected] | 77f6fb43 | 2009-09-05 14:21:09 | [diff] [blame^] | 760 | if (!ctx || !ctx->strict_transport_security_state()) |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 761 | return; |
| 762 | |
[email protected] | 77f6fb43 | 2009-09-05 14:21:09 | [diff] [blame^] | 763 | std::string name = "Strict-Transport-Security"; |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 764 | std::string value; |
| 765 | |
| 766 | void* iter = NULL; |
[email protected] | 77f6fb43 | 2009-09-05 14:21:09 | [diff] [blame^] | 767 | while (response_info_->headers->EnumerateHeader(&iter, name, &value)) { |
| 768 | ctx->strict_transport_security_state()->DidReceiveHeader( |
| 769 | request_info_.url, value); |
| 770 | } |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 771 | } |