[email protected] | 0b45559b | 2009-06-12 21:45:11 | [diff] [blame] | 1 | // Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. |
license.bot | bf09a50 | 2008-08-24 00:55:55 | [diff] [blame] | 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 4 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 5 | #include "net/url_request/url_request_http_job.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 6 | |
[email protected] | 4ed2755f | 2008-12-15 09:01:33 | [diff] [blame] | 7 | #include "base/base_switches.h" |
| 8 | #include "base/command_line.h" |
[email protected] | 39ce5c0 | 2008-08-22 04:03:44 | [diff] [blame] | 9 | #include "base/compiler_specific.h" |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 10 | #include "base/file_util.h" |
| 11 | #include "base/file_version_info.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 12 | #include "base/message_loop.h" |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 13 | #include "base/rand_util.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 14 | #include "base/string_util.h" |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 15 | #include "net/base/cert_status_flags.h" |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 16 | #include "net/base/filter.h" |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 17 | #include "net/base/force_tls_state.h" |
[email protected] | b843072 | 2008-09-17 20:05:44 | [diff] [blame] | 18 | #include "net/base/load_flags.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 19 | #include "net/base/net_errors.h" |
| 20 | #include "net/base/net_util.h" |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 21 | #include "net/base/sdch_manager.h" |
[email protected] | 0b45559b | 2009-06-12 21:45:11 | [diff] [blame] | 22 | #include "net/base/ssl_cert_request_info.h" |
[email protected] | 319d9e6f | 2009-02-18 19:47:21 | [diff] [blame] | 23 | #include "net/http/http_response_headers.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 24 | #include "net/http/http_response_info.h" |
| 25 | #include "net/http/http_transaction.h" |
| 26 | #include "net/http/http_transaction_factory.h" |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 27 | #include "net/http/http_util.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 28 | #include "net/url_request/url_request.h" |
[email protected] | 319d9e6f | 2009-02-18 19:47:21 | [diff] [blame] | 29 | #include "net/url_request/url_request_context.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 30 | #include "net/url_request/url_request_error_job.h" |
| 31 | |
[email protected] | bcb84f8b | 2009-08-31 16:20:14 | [diff] [blame] | 32 | // static |
| 33 | std::set<int> URLRequestHttpJob::explicitly_allowed_ports_; |
| 34 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 35 | // TODO(darin): make sure the port blocking code is not lost |
| 36 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 37 | // static |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 38 | URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request, |
| 39 | const std::string& scheme) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 40 | DCHECK(scheme == "http" || scheme == "https"); |
| 41 | |
[email protected] | bcb84f8b | 2009-08-31 16:20:14 | [diff] [blame] | 42 | int port = request->url().IntPort(); |
| 43 | if (!net::IsPortAllowedByDefault(port) && !IsPortAllowedByOverride(port)) |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 44 | return new URLRequestErrorJob(request, net::ERR_UNSAFE_PORT); |
| 45 | |
| 46 | if (!request->context() || |
| 47 | !request->context()->http_transaction_factory()) { |
| 48 | NOTREACHED() << "requires a valid context"; |
| 49 | return new URLRequestErrorJob(request, net::ERR_INVALID_ARGUMENT); |
| 50 | } |
| 51 | |
[email protected] | 4ed2755f | 2008-12-15 09:01:33 | [diff] [blame] | 52 | // We cache the value of the switch because this code path is hit on every |
| 53 | // network request. |
| 54 | static const bool kForceHTTPS = |
[email protected] | bb97536 | 2009-01-21 01:00:22 | [diff] [blame] | 55 | CommandLine::ForCurrentProcess()->HasSwitch(switches::kForceHTTPS); |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 56 | if (kForceHTTPS && scheme == "http" && |
| 57 | request->context()->force_tls_state() && |
| 58 | request->context()->force_tls_state()->IsEnabledForHost( |
| 59 | request->url().host())) |
[email protected] | 4ed2755f | 2008-12-15 09:01:33 | [diff] [blame] | 60 | return new URLRequestErrorJob(request, net::ERR_DISALLOWED_URL_SCHEME); |
| 61 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 62 | return new URLRequestHttpJob(request); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 63 | } |
| 64 | |
[email protected] | bcb84f8b | 2009-08-31 16:20:14 | [diff] [blame] | 65 | // static |
| 66 | void URLRequestHttpJob::SetExplicitlyAllowedPorts( |
| 67 | const std::wstring& allowed_ports) { |
| 68 | if (allowed_ports.empty()) |
| 69 | return; |
| 70 | |
| 71 | std::set<int> ports; |
| 72 | size_t last = 0; |
| 73 | size_t size = allowed_ports.size(); |
| 74 | // The comma delimiter. |
| 75 | const std::wstring::value_type kComma = L','; |
| 76 | |
| 77 | // Overflow is still possible for evil user inputs. |
| 78 | for (size_t i = 0; i <= size; ++i) { |
| 79 | // The string should be composed of only digits and commas. |
| 80 | if (i != size && !IsAsciiDigit(allowed_ports[i]) && |
| 81 | (allowed_ports[i] != kComma)) |
| 82 | return; |
| 83 | if (i == size || allowed_ports[i] == kComma) { |
| 84 | size_t length = i - last; |
| 85 | if (length > 0) |
| 86 | ports.insert(StringToInt(WideToASCII( |
| 87 | allowed_ports.substr(last, length)))); |
| 88 | last = i + 1; |
| 89 | } |
| 90 | } |
| 91 | explicitly_allowed_ports_ = ports; |
| 92 | } |
| 93 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 94 | URLRequestHttpJob::URLRequestHttpJob(URLRequest* request) |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 95 | : URLRequestJob(request), |
[email protected] | 2aecf738 | 2009-06-17 04:14:27 | [diff] [blame] | 96 | context_(request->context()), |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 97 | response_info_(NULL), |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 98 | proxy_auth_state_(net::AUTH_STATE_DONT_NEED_AUTH), |
| 99 | server_auth_state_(net::AUTH_STATE_DONT_NEED_AUTH), |
[email protected] | 39ce5c0 | 2008-08-22 04:03:44 | [diff] [blame] | 100 | ALLOW_THIS_IN_INITIALIZER_LIST( |
| 101 | start_callback_(this, &URLRequestHttpJob::OnStartCompleted)), |
| 102 | ALLOW_THIS_IN_INITIALIZER_LIST( |
| 103 | read_callback_(this, &URLRequestHttpJob::OnReadCompleted)), |
[email protected] | 3589e55 | 2008-08-20 23:11:34 | [diff] [blame] | 104 | read_in_progress_(false), |
[email protected] | 2aecf738 | 2009-06-17 04:14:27 | [diff] [blame] | 105 | transaction_(NULL), |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 106 | sdch_dictionary_advertised_(false), |
| 107 | sdch_test_activated_(false), |
[email protected] | d8fd513 | 2009-05-15 01:06:53 | [diff] [blame] | 108 | sdch_test_control_(false), |
| 109 | is_cached_content_(false) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 110 | } |
| 111 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 112 | URLRequestHttpJob::~URLRequestHttpJob() { |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 113 | DCHECK(!sdch_test_control_ || !sdch_test_activated_); |
[email protected] | d8fd513 | 2009-05-15 01:06:53 | [diff] [blame] | 114 | if (!IsCachedContent()) { |
| 115 | if (sdch_test_control_) |
| 116 | RecordPacketStats(SDCH_EXPERIMENT_HOLDBACK); |
| 117 | if (sdch_test_activated_) |
| 118 | RecordPacketStats(SDCH_EXPERIMENT_DECODE); |
| 119 | } |
[email protected] | 284c373d4 | 2009-05-19 23:39:03 | [diff] [blame] | 120 | // Make sure SDCH filters are told to emit histogram data while this class |
| 121 | // can still service the IsCachedContent() call. |
| 122 | DestroyFilters(); |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 123 | |
[email protected] | 7234e6c | 2009-02-11 21:37:04 | [diff] [blame] | 124 | if (sdch_dictionary_url_.is_valid()) { |
[email protected] | d55ad15d | 2009-02-17 19:40:50 | [diff] [blame] | 125 | // Prior to reaching the destructor, request_ has been set to a NULL |
| 126 | // pointer, so request_->url() is no longer valid in the destructor, and we |
| 127 | // use an alternate copy |request_info_.url|. |
[email protected] | a41fae8 | 2009-02-21 06:11:45 | [diff] [blame] | 128 | SdchManager* manager = SdchManager::Global(); |
| 129 | // To be extra safe, since this is a "different time" from when we decided |
| 130 | // to get the dictionary, we'll validate that an SdchManager is available. |
| 131 | // At shutdown time, care is taken to be sure that we don't delete this |
| 132 | // globally useful instance "too soon," so this check is just defensive |
| 133 | // coding to assure that IF the system is shutting down, we don't have any |
| 134 | // problem if the manager was deleted ahead of time. |
| 135 | if (manager) // Defensive programming. |
| 136 | manager->FetchDictionary(request_info_.url, sdch_dictionary_url_); |
[email protected] | 7234e6c | 2009-02-11 21:37:04 | [diff] [blame] | 137 | } |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 138 | } |
| 139 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 140 | void URLRequestHttpJob::SetUpload(net::UploadData* upload) { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 141 | DCHECK(!transaction_.get()) << "cannot change once started"; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 142 | request_info_.upload_data = upload; |
| 143 | } |
| 144 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 145 | void URLRequestHttpJob::SetExtraRequestHeaders( |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 146 | const std::string& headers) { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 147 | DCHECK(!transaction_.get()) << "cannot change once started"; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 148 | request_info_.extra_headers = headers; |
| 149 | } |
| 150 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 151 | void URLRequestHttpJob::Start() { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 152 | DCHECK(!transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 153 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 154 | // Ensure that we do not send username and password fields in the referrer. |
[email protected] | e600c82 | 2009-08-31 16:57:08 | [diff] [blame^] | 155 | GURL referrer(request_->GetSanitizedReferrer()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 156 | |
| 157 | request_info_.url = request_->url(); |
| 158 | request_info_.referrer = referrer; |
| 159 | request_info_.method = request_->method(); |
| 160 | request_info_.load_flags = request_->load_flags(); |
[email protected] | 725355a | 2009-03-25 20:42:55 | [diff] [blame] | 161 | request_info_.priority = request_->priority(); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 162 | |
[email protected] | 6f681a4 | 2009-01-27 22:28:54 | [diff] [blame] | 163 | if (request_->context()) { |
| 164 | request_info_.user_agent = |
| 165 | request_->context()->GetUserAgent(request_->url()); |
| 166 | } |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 167 | |
| 168 | AddExtraHeaders(); |
| 169 | |
| 170 | StartTransaction(); |
| 171 | } |
| 172 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 173 | void URLRequestHttpJob::Kill() { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 174 | if (!transaction_.get()) |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 175 | return; |
| 176 | |
| 177 | DestroyTransaction(); |
| 178 | URLRequestJob::Kill(); |
| 179 | } |
| 180 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 181 | net::LoadState URLRequestHttpJob::GetLoadState() const { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 182 | return transaction_.get() ? |
| 183 | transaction_->GetLoadState() : net::LOAD_STATE_IDLE; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 184 | } |
| 185 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 186 | uint64 URLRequestHttpJob::GetUploadProgress() const { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 187 | return transaction_.get() ? transaction_->GetUploadProgress() : 0; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 188 | } |
| 189 | |
[email protected] | 60c413c9 | 2009-03-09 16:53:31 | [diff] [blame] | 190 | bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 191 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 192 | |
| 193 | if (!response_info_) |
| 194 | return false; |
| 195 | |
| 196 | return response_info_->headers->GetMimeType(mime_type); |
| 197 | } |
| 198 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 199 | bool URLRequestHttpJob::GetCharset(std::string* charset) { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 200 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 201 | |
| 202 | if (!response_info_) |
| 203 | return false; |
| 204 | |
| 205 | return response_info_->headers->GetCharset(charset); |
| 206 | } |
| 207 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 208 | void URLRequestHttpJob::GetResponseInfo(net::HttpResponseInfo* info) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 209 | DCHECK(request_); |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 210 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 211 | |
| 212 | if (response_info_) |
| 213 | *info = *response_info_; |
| 214 | } |
| 215 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 216 | bool URLRequestHttpJob::GetResponseCookies( |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 217 | std::vector<std::string>* cookies) { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 218 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 219 | |
| 220 | if (!response_info_) |
| 221 | return false; |
| 222 | |
| 223 | if (response_cookies_.empty()) |
| 224 | FetchResponseCookies(); |
| 225 | |
| 226 | cookies->clear(); |
| 227 | cookies->swap(response_cookies_); |
| 228 | return true; |
| 229 | } |
| 230 | |
[email protected] | 84973ad | 2009-03-30 18:05:43 | [diff] [blame] | 231 | int URLRequestHttpJob::GetResponseCode() const { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 232 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 233 | |
| 234 | if (!response_info_) |
| 235 | return -1; |
| 236 | |
| 237 | return response_info_->headers->response_code(); |
| 238 | } |
| 239 | |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 240 | bool URLRequestHttpJob::GetContentEncodings( |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 241 | std::vector<Filter::FilterType>* encoding_types) { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 242 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 243 | if (!response_info_) |
| 244 | return false; |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 245 | DCHECK(encoding_types->empty()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 246 | |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 247 | std::string encoding_type; |
| 248 | void* iter = NULL; |
| 249 | while (response_info_->headers->EnumerateHeader(&iter, "Content-Encoding", |
| 250 | &encoding_type)) { |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 251 | encoding_types->push_back(Filter::ConvertEncodingToType(encoding_type)); |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 252 | } |
[email protected] | c631b6aa | 2008-10-15 21:21:37 | [diff] [blame] | 253 | |
[email protected] | 77e9fcf | 2009-03-28 01:45:58 | [diff] [blame] | 254 | // Even if encoding types are empty, there is a chance that we need to add |
| 255 | // some decoding, as some proxies strip encoding completely. In such cases, |
| 256 | // we may need to add (for example) SDCH filtering (when the context suggests |
| 257 | // it is appropriate). |
| 258 | Filter::FixupEncodingTypes(*this, encoding_types); |
| 259 | |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 260 | return !encoding_types->empty(); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 261 | } |
| 262 | |
[email protected] | c631b6aa | 2008-10-15 21:21:37 | [diff] [blame] | 263 | bool URLRequestHttpJob::IsSdchResponse() const { |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 264 | return sdch_dictionary_advertised_; |
[email protected] | c631b6aa | 2008-10-15 21:21:37 | [diff] [blame] | 265 | } |
| 266 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 267 | bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 268 | // We only allow redirects to certain "safe" protocols. This does not |
| 269 | // restrict redirects to externally handled protocols. Our consumer would |
| 270 | // need to take care of those. |
| 271 | |
| 272 | if (!URLRequest::IsHandledURL(location)) |
| 273 | return true; |
| 274 | |
| 275 | static const char* kSafeSchemes[] = { |
| 276 | "http", |
| 277 | "https", |
| 278 | "ftp" |
| 279 | }; |
| 280 | |
| 281 | for (size_t i = 0; i < arraysize(kSafeSchemes); ++i) { |
| 282 | if (location.SchemeIs(kSafeSchemes[i])) |
| 283 | return true; |
| 284 | } |
| 285 | |
| 286 | return false; |
| 287 | } |
| 288 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 289 | bool URLRequestHttpJob::NeedsAuth() { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 290 | int code = GetResponseCode(); |
| 291 | if (code == -1) |
| 292 | return false; |
| 293 | |
| 294 | // Check if we need either Proxy or WWW Authentication. This could happen |
| 295 | // because we either provided no auth info, or provided incorrect info. |
| 296 | switch (code) { |
| 297 | case 407: |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 298 | if (proxy_auth_state_ == net::AUTH_STATE_CANCELED) |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 299 | return false; |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 300 | proxy_auth_state_ = net::AUTH_STATE_NEED_AUTH; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 301 | return true; |
| 302 | case 401: |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 303 | if (server_auth_state_ == net::AUTH_STATE_CANCELED) |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 304 | return false; |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 305 | server_auth_state_ = net::AUTH_STATE_NEED_AUTH; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 306 | return true; |
| 307 | } |
| 308 | return false; |
| 309 | } |
| 310 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 311 | void URLRequestHttpJob::GetAuthChallengeInfo( |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 312 | scoped_refptr<net::AuthChallengeInfo>* result) { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 313 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 314 | DCHECK(response_info_); |
| 315 | |
| 316 | // sanity checks: |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 317 | DCHECK(proxy_auth_state_ == net::AUTH_STATE_NEED_AUTH || |
| 318 | server_auth_state_ == net::AUTH_STATE_NEED_AUTH); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 319 | DCHECK(response_info_->headers->response_code() == 401 || |
| 320 | response_info_->headers->response_code() == 407); |
| 321 | |
| 322 | *result = response_info_->auth_challenge; |
| 323 | } |
| 324 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 325 | void URLRequestHttpJob::SetAuth(const std::wstring& username, |
| 326 | const std::wstring& password) { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 327 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 328 | |
| 329 | // Proxy gets set first, then WWW. |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 330 | if (proxy_auth_state_ == net::AUTH_STATE_NEED_AUTH) { |
| 331 | proxy_auth_state_ = net::AUTH_STATE_HAVE_AUTH; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 332 | } else { |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 333 | DCHECK(server_auth_state_ == net::AUTH_STATE_NEED_AUTH); |
| 334 | server_auth_state_ = net::AUTH_STATE_HAVE_AUTH; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 335 | } |
| 336 | |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 337 | RestartTransactionWithAuth(username, password); |
| 338 | } |
| 339 | |
| 340 | void URLRequestHttpJob::RestartTransactionWithAuth( |
| 341 | const std::wstring& username, |
| 342 | const std::wstring& password) { |
| 343 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 344 | // These will be reset in OnStartCompleted. |
| 345 | response_info_ = NULL; |
| 346 | response_cookies_.clear(); |
| 347 | |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 348 | // Update the cookies, since the cookie store may have been updated from the |
| 349 | // headers in the 401/407. Since cookies were already appended to |
| 350 | // extra_headers by AddExtraHeaders(), we need to strip them out. |
| 351 | static const char* const cookie_name[] = { "cookie" }; |
| 352 | request_info_.extra_headers = net::HttpUtil::StripHeaders( |
| 353 | request_info_.extra_headers, cookie_name, arraysize(cookie_name)); |
| 354 | // TODO(eroman): this ordering is inconsistent with non-restarted request, |
| 355 | // where cookies header appears second from the bottom. |
| 356 | request_info_.extra_headers += AssembleRequestCookies(); |
| 357 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 358 | // No matter what, we want to report our status as IO pending since we will |
| 359 | // be notifying our consumer asynchronously via OnStartCompleted. |
| 360 | SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
| 361 | |
| 362 | int rv = transaction_->RestartWithAuth(username, password, |
| 363 | &start_callback_); |
| 364 | if (rv == net::ERR_IO_PENDING) |
| 365 | return; |
| 366 | |
| 367 | // The transaction started synchronously, but we need to notify the |
| 368 | // URLRequest delegate via the message loop. |
| 369 | MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod( |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 370 | this, &URLRequestHttpJob::OnStartCompleted, rv)); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 371 | } |
| 372 | |
[email protected] | bcb84f8b | 2009-08-31 16:20:14 | [diff] [blame] | 373 | // static |
| 374 | bool URLRequestHttpJob::IsPortAllowedByOverride(int port) { |
| 375 | if (explicitly_allowed_ports().empty()) |
| 376 | return false; |
| 377 | |
| 378 | std::set<int>::const_iterator it = |
| 379 | std::find(explicitly_allowed_ports().begin(), |
| 380 | explicitly_allowed_ports().end(), |
| 381 | port); |
| 382 | |
| 383 | return it != explicitly_allowed_ports().end(); |
| 384 | } |
| 385 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 386 | void URLRequestHttpJob::CancelAuth() { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 387 | // Proxy gets set first, then WWW. |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 388 | if (proxy_auth_state_ == net::AUTH_STATE_NEED_AUTH) { |
| 389 | proxy_auth_state_ = net::AUTH_STATE_CANCELED; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 390 | } else { |
[email protected] | a9bb6f69 | 2008-07-30 16:40:10 | [diff] [blame] | 391 | DCHECK(server_auth_state_ == net::AUTH_STATE_NEED_AUTH); |
| 392 | server_auth_state_ = net::AUTH_STATE_CANCELED; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 393 | } |
| 394 | |
| 395 | // These will be reset in OnStartCompleted. |
| 396 | response_info_ = NULL; |
| 397 | response_cookies_.clear(); |
| 398 | |
| 399 | // OK, let the consumer read the error page... |
| 400 | // |
| 401 | // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false, |
| 402 | // which will cause the consumer to receive OnResponseStarted instead of |
| 403 | // OnAuthRequired. |
| 404 | // |
| 405 | // We have to do this via InvokeLater to avoid "recursing" the consumer. |
| 406 | // |
| 407 | MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod( |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 408 | this, &URLRequestHttpJob::OnStartCompleted, net::OK)); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 409 | } |
| 410 | |
[email protected] | 0b45559b | 2009-06-12 21:45:11 | [diff] [blame] | 411 | void URLRequestHttpJob::ContinueWithCertificate( |
| 412 | net::X509Certificate* client_cert) { |
| 413 | DCHECK(transaction_.get()); |
| 414 | |
| 415 | DCHECK(!response_info_) << "should not have a response yet"; |
| 416 | |
| 417 | // No matter what, we want to report our status as IO pending since we will |
| 418 | // be notifying our consumer asynchronously via OnStartCompleted. |
| 419 | SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
| 420 | |
| 421 | int rv = transaction_->RestartWithCertificate(client_cert, &start_callback_); |
| 422 | if (rv == net::ERR_IO_PENDING) |
| 423 | return; |
| 424 | |
| 425 | // The transaction started synchronously, but we need to notify the |
| 426 | // URLRequest delegate via the message loop. |
| 427 | MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod( |
| 428 | this, &URLRequestHttpJob::OnStartCompleted, rv)); |
| 429 | } |
| 430 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 431 | void URLRequestHttpJob::ContinueDespiteLastError() { |
[email protected] | 9ec4875 | 2009-02-06 23:33:58 | [diff] [blame] | 432 | // If the transaction was destroyed, then the job was cancelled. |
| 433 | if (!transaction_.get()) |
| 434 | return; |
| 435 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 436 | DCHECK(!response_info_) << "should not have a response yet"; |
| 437 | |
| 438 | // No matter what, we want to report our status as IO pending since we will |
| 439 | // be notifying our consumer asynchronously via OnStartCompleted. |
| 440 | SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
| 441 | |
| 442 | int rv = transaction_->RestartIgnoringLastError(&start_callback_); |
| 443 | if (rv == net::ERR_IO_PENDING) |
| 444 | return; |
| 445 | |
| 446 | // The transaction started synchronously, but we need to notify the |
| 447 | // URLRequest delegate via the message loop. |
| 448 | MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod( |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 449 | this, &URLRequestHttpJob::OnStartCompleted, rv)); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 450 | } |
| 451 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 452 | bool URLRequestHttpJob::GetMoreData() { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 453 | return transaction_.get() && !read_in_progress_; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 454 | } |
| 455 | |
[email protected] | 9dea9e1f | 2009-01-29 00:30:47 | [diff] [blame] | 456 | bool URLRequestHttpJob::ReadRawData(net::IOBuffer* buf, int buf_size, |
| 457 | int *bytes_read) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 458 | DCHECK_NE(buf_size, 0); |
| 459 | DCHECK(bytes_read); |
| 460 | DCHECK(!read_in_progress_); |
| 461 | |
| 462 | int rv = transaction_->Read(buf, buf_size, &read_callback_); |
| 463 | if (rv >= 0) { |
| 464 | *bytes_read = rv; |
| 465 | return true; |
| 466 | } |
| 467 | |
| 468 | if (rv == net::ERR_IO_PENDING) { |
| 469 | read_in_progress_ = true; |
| 470 | SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
| 471 | } else { |
| 472 | NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); |
| 473 | } |
| 474 | |
| 475 | return false; |
| 476 | } |
| 477 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 478 | void URLRequestHttpJob::OnStartCompleted(int result) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 479 | // If the request was destroyed, then there is no more work to do. |
| 480 | if (!request_ || !request_->delegate()) |
| 481 | return; |
| 482 | |
| 483 | // If the transaction was destroyed, then the job was cancelled, and |
| 484 | // we can just ignore this notification. |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 485 | if (!transaction_.get()) |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 486 | return; |
| 487 | |
| 488 | // Clear the IO_PENDING status |
| 489 | SetStatus(URLRequestStatus()); |
| 490 | |
| 491 | if (result == net::OK) { |
| 492 | NotifyHeadersComplete(); |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 493 | } else if (ShouldTreatAsCertificateError(result)) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 494 | // We encountered an SSL certificate error. Ask our delegate to decide |
| 495 | // what we should do. |
| 496 | // TODO(wtc): also pass ssl_info.cert_status, or just pass the whole |
| 497 | // ssl_info. |
| 498 | request_->delegate()->OnSSLCertificateError( |
| 499 | request_, result, transaction_->GetResponseInfo()->ssl_info.cert); |
[email protected] | 0b45559b | 2009-06-12 21:45:11 | [diff] [blame] | 500 | } else if (result == net::ERR_SSL_CLIENT_AUTH_CERT_NEEDED) { |
| 501 | request_->delegate()->OnCertificateRequested( |
| 502 | request_, transaction_->GetResponseInfo()->cert_request_info); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 503 | } else { |
| 504 | NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); |
| 505 | } |
| 506 | } |
| 507 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 508 | void URLRequestHttpJob::OnReadCompleted(int result) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 509 | read_in_progress_ = false; |
| 510 | |
| 511 | if (result == 0) { |
| 512 | NotifyDone(URLRequestStatus()); |
| 513 | } else if (result < 0) { |
| 514 | NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); |
| 515 | } else { |
| 516 | // Clear the IO_PENDING status |
| 517 | SetStatus(URLRequestStatus()); |
| 518 | } |
| 519 | |
| 520 | NotifyReadComplete(result); |
| 521 | } |
| 522 | |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 523 | bool URLRequestHttpJob::ShouldTreatAsCertificateError(int result) { |
| 524 | if (!net::IsCertificateError(result)) |
| 525 | return false; |
| 526 | |
| 527 | // Hide the fancy processing behind a command line switch. |
| 528 | if (!CommandLine::ForCurrentProcess()->HasSwitch(switches::kForceHTTPS)) |
| 529 | return true; |
| 530 | |
| 531 | // Check whether our context is using ForceTLS. |
| 532 | if (!context_->force_tls_state()) |
| 533 | return true; |
| 534 | |
| 535 | return !context_->force_tls_state()->IsEnabledForHost( |
| 536 | request_info_.url.host()); |
| 537 | } |
| 538 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 539 | void URLRequestHttpJob::NotifyHeadersComplete() { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 540 | DCHECK(!response_info_); |
| 541 | |
| 542 | response_info_ = transaction_->GetResponseInfo(); |
| 543 | |
[email protected] | d8fd513 | 2009-05-15 01:06:53 | [diff] [blame] | 544 | // Save boolean, as we'll need this info at destruction time, and filters may |
| 545 | // also need this info. |
| 546 | is_cached_content_ = response_info_->was_cached; |
| 547 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 548 | // Get the Set-Cookie values, and send them to our cookie database. |
[email protected] | b843072 | 2008-09-17 20:05:44 | [diff] [blame] | 549 | if (!(request_info_.load_flags & net::LOAD_DO_NOT_SAVE_COOKIES)) { |
| 550 | URLRequestContext* ctx = request_->context(); |
| 551 | if (ctx && ctx->cookie_store() && |
[email protected] | cfd73321 | 2009-05-23 18:11:10 | [diff] [blame] | 552 | ctx->cookie_policy()->CanSetCookie( |
| 553 | request_->url(), request_->first_party_for_cookies())) { |
[email protected] | b843072 | 2008-09-17 20:05:44 | [diff] [blame] | 554 | FetchResponseCookies(); |
[email protected] | 5f450e5 | 2009-07-28 13:28:11 | [diff] [blame] | 555 | net::CookieOptions options; |
[email protected] | 3a96c74 | 2008-11-19 19:46:27 | [diff] [blame] | 556 | options.set_include_httponly(); |
| 557 | ctx->cookie_store()->SetCookiesWithOptions(request_->url(), |
| 558 | response_cookies_, |
| 559 | options); |
[email protected] | b843072 | 2008-09-17 20:05:44 | [diff] [blame] | 560 | } |
| 561 | } |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 562 | |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 563 | ProcessForceTLSHeader(); |
| 564 | |
[email protected] | fe21987 | 2008-09-23 02:17:00 | [diff] [blame] | 565 | if (SdchManager::Global() && |
| 566 | SdchManager::Global()->IsInSupportedDomain(request_->url())) { |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 567 | static const std::string name = "Get-Dictionary"; |
| 568 | std::string url_text; |
| 569 | void* iter = NULL; |
| 570 | // TODO(jar): We need to not fetch dictionaries the first time they are |
| 571 | // seen, but rather wait until we can justify their usefulness. |
| 572 | // For now, we will only fetch the first dictionary, which will at least |
| 573 | // require multiple suggestions before we get additional ones for this site. |
| 574 | // Eventually we should wait until a dictionary is requested several times |
| 575 | // before we even download it (so that we don't waste memory or bandwidth). |
| 576 | if (response_info_->headers->EnumerateHeader(&iter, name, &url_text)) { |
[email protected] | d55ad15d | 2009-02-17 19:40:50 | [diff] [blame] | 577 | // request_->url() won't be valid in the destructor, so we use an |
| 578 | // alternate copy. |
| 579 | DCHECK(request_->url() == request_info_.url); |
| 580 | // Resolve suggested URL relative to request url. |
| 581 | sdch_dictionary_url_ = request_info_.url.Resolve(url_text); |
[email protected] | 6088942 | 2008-09-23 01:18:16 | [diff] [blame] | 582 | } |
| 583 | } |
| 584 | |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 585 | // The HTTP transaction may be restarted several times for the purposes |
| 586 | // of sending authorization information. Each time it restarts, we get |
| 587 | // notified of the headers completion so that we can update the cookie store. |
| 588 | if (transaction_->IsReadyToRestartForAuth()) { |
| 589 | DCHECK(!response_info_->auth_challenge.get()); |
| 590 | RestartTransactionWithAuth(std::wstring(), std::wstring()); |
| 591 | return; |
| 592 | } |
| 593 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 594 | URLRequestJob::NotifyHeadersComplete(); |
| 595 | } |
| 596 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 597 | void URLRequestHttpJob::DestroyTransaction() { |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 598 | DCHECK(transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 599 | |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 600 | transaction_.reset(); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 601 | response_info_ = NULL; |
| 602 | } |
| 603 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 604 | void URLRequestHttpJob::StartTransaction() { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 605 | // NOTE: This method assumes that request_info_ is already setup properly. |
| 606 | |
| 607 | // Create a transaction. |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 608 | DCHECK(!transaction_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 609 | |
| 610 | DCHECK(request_->context()); |
| 611 | DCHECK(request_->context()->http_transaction_factory()); |
| 612 | |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 613 | transaction_.reset( |
| 614 | request_->context()->http_transaction_factory()->CreateTransaction()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 615 | |
| 616 | // No matter what, we want to report our status as IO pending since we will |
| 617 | // be notifying our consumer asynchronously via OnStartCompleted. |
| 618 | SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
| 619 | |
| 620 | int rv; |
[email protected] | af4876d | 2008-10-21 23:10:57 | [diff] [blame] | 621 | if (transaction_.get()) { |
[email protected] | ec08bb2 | 2009-08-12 00:25:12 | [diff] [blame] | 622 | rv = transaction_->Start( |
[email protected] | 684970b | 2009-08-14 04:54:46 | [diff] [blame] | 623 | &request_info_, &start_callback_, request_->load_log()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 624 | if (rv == net::ERR_IO_PENDING) |
| 625 | return; |
| 626 | } else { |
| 627 | rv = net::ERR_FAILED; |
| 628 | } |
| 629 | |
| 630 | // The transaction started synchronously, but we need to notify the |
| 631 | // URLRequest delegate via the message loop. |
| 632 | MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod( |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 633 | this, &URLRequestHttpJob::OnStartCompleted, rv)); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 634 | } |
| 635 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 636 | void URLRequestHttpJob::AddExtraHeaders() { |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 637 | // TODO(jar): Consider optimizing away SDCH advertising bytes when the URL is |
| 638 | // probably an img or such (and SDCH encoding is not likely). |
| 639 | bool advertise_sdch = SdchManager::Global() && |
| 640 | SdchManager::Global()->IsInSupportedDomain(request_->url()); |
| 641 | std::string avail_dictionaries; |
| 642 | if (advertise_sdch) { |
| 643 | SdchManager::Global()->GetAvailDictionaryList(request_->url(), |
| 644 | &avail_dictionaries); |
| 645 | |
| 646 | // The AllowLatencyExperiment() is only true if we've successfully done a |
| 647 | // full SDCH compression recently in this browser session for this host. |
| 648 | // Note that for this path, there might be no applicable dictionaries, and |
| 649 | // hence we can't participate in the experiment. |
| 650 | if (!avail_dictionaries.empty() && |
| 651 | SdchManager::Global()->AllowLatencyExperiment(request_->url())) { |
| 652 | // We are participating in the test (or control), and hence we'll |
| 653 | // eventually record statistics via either SDCH_EXPERIMENT_DECODE or |
| 654 | // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data. |
| 655 | EnablePacketCounting(kSdchPacketHistogramCount); |
[email protected] | a88af523 | 2009-06-05 01:34:53 | [diff] [blame] | 656 | if (base::RandDouble() < .01) { |
| 657 | sdch_test_control_ = true; // 1% probability. |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 658 | advertise_sdch = false; |
| 659 | } else { |
| 660 | sdch_test_activated_ = true; |
| 661 | } |
| 662 | } |
| 663 | } |
| 664 | |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 665 | // Supply Accept-Encoding headers first so that it is more likely that they |
| 666 | // will be in the first transmitted packet. This can sometimes make it easier |
| 667 | // to filter and analyze the streams to assure that a proxy has not damaged |
| 668 | // these headers. Some proxies deliberately corrupt Accept-Encoding headers. |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 669 | if (!advertise_sdch) { |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 670 | // Tell the server what compression formats we support (other than SDCH). |
[email protected] | a86c97cc | 2009-06-24 21:26:27 | [diff] [blame] | 671 | request_info_.extra_headers += "Accept-Encoding: gzip,deflate\r\n"; |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 672 | } else { |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 673 | // Include SDCH in acceptable list. |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 674 | request_info_.extra_headers += "Accept-Encoding: " |
[email protected] | a86c97cc | 2009-06-24 21:26:27 | [diff] [blame] | 675 | "gzip,deflate,sdch\r\n"; |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 676 | if (!avail_dictionaries.empty()) { |
| 677 | request_info_.extra_headers += "Avail-Dictionary: " |
| 678 | + avail_dictionaries + "\r\n"; |
[email protected] | 5b90b5d | 2009-04-30 23:06:01 | [diff] [blame] | 679 | sdch_dictionary_advertised_ = true; |
| 680 | // Since we're tagging this transaction as advertising a dictionary, we'll |
| 681 | // definately employ an SDCH filter (or tentative sdch filter) when we get |
| 682 | // a response. When done, we'll record histograms via SDCH_DECODE or |
| 683 | // SDCH_PASSTHROUGH. Hence we need to record packet arrival times. |
| 684 | EnablePacketCounting(kSdchPacketHistogramCount); |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 685 | } |
[email protected] | 423041b | 2008-10-27 17:39:28 | [diff] [blame] | 686 | } |
| 687 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 688 | URLRequestContext* context = request_->context(); |
| 689 | if (context) { |
[email protected] | 5f450e5 | 2009-07-28 13:28:11 | [diff] [blame] | 690 | if (context->AllowSendingCookies(request_)) |
[email protected] | eaadd905 | 2009-06-23 18:02:23 | [diff] [blame] | 691 | request_info_.extra_headers += AssembleRequestCookies(); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 692 | if (!context->accept_language().empty()) |
| 693 | request_info_.extra_headers += "Accept-Language: " + |
| 694 | context->accept_language() + "\r\n"; |
| 695 | if (!context->accept_charset().empty()) |
| 696 | request_info_.extra_headers += "Accept-Charset: " + |
| 697 | context->accept_charset() + "\r\n"; |
| 698 | } |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 699 | } |
| 700 | |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 701 | std::string URLRequestHttpJob::AssembleRequestCookies() { |
[email protected] | 861fcd5 | 2009-08-26 02:33:46 | [diff] [blame] | 702 | if (request_info_.load_flags & net::LOAD_DO_NOT_SEND_COOKIES) |
| 703 | return std::string(); |
| 704 | |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 705 | URLRequestContext* context = request_->context(); |
| 706 | if (context) { |
| 707 | // Add in the cookie header. TODO might we need more than one header? |
| 708 | if (context->cookie_store() && |
[email protected] | cfd73321 | 2009-05-23 18:11:10 | [diff] [blame] | 709 | context->cookie_policy()->CanGetCookies( |
| 710 | request_->url(), request_->first_party_for_cookies())) { |
[email protected] | 5f450e5 | 2009-07-28 13:28:11 | [diff] [blame] | 711 | net::CookieOptions options; |
[email protected] | 0757e770 | 2009-03-27 04:00:22 | [diff] [blame] | 712 | options.set_include_httponly(); |
| 713 | std::string cookies = request_->context()->cookie_store()-> |
| 714 | GetCookiesWithOptions(request_->url(), options); |
| 715 | if (!cookies.empty()) |
| 716 | return "Cookie: " + cookies + "\r\n"; |
| 717 | } |
| 718 | } |
| 719 | return std::string(); |
| 720 | } |
| 721 | |
[email protected] | 175adac | 2008-07-30 17:28:04 | [diff] [blame] | 722 | void URLRequestHttpJob::FetchResponseCookies() { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 723 | DCHECK(response_info_); |
| 724 | DCHECK(response_cookies_.empty()); |
| 725 | |
| 726 | std::string name = "Set-Cookie"; |
| 727 | std::string value; |
| 728 | |
| 729 | void* iter = NULL; |
| 730 | while (response_info_->headers->EnumerateHeader(&iter, name, &value)) |
[email protected] | 5f450e5 | 2009-07-28 13:28:11 | [diff] [blame] | 731 | if (request_->context()->InterceptCookie(request_, &value)) |
[email protected] | eaadd905 | 2009-06-23 18:02:23 | [diff] [blame] | 732 | response_cookies_.push_back(value); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 733 | } |
[email protected] | a9cea754 | 2009-05-20 04:30:23 | [diff] [blame] | 734 | |
| 735 | |
| 736 | void URLRequestHttpJob::ProcessForceTLSHeader() { |
| 737 | DCHECK(response_info_); |
| 738 | |
| 739 | // Hide processing behind a command line flag. |
| 740 | if (!CommandLine::ForCurrentProcess()->HasSwitch(switches::kForceHTTPS)) |
| 741 | return; |
| 742 | |
| 743 | // Only process X-Force-TLS from HTTPS responses. |
| 744 | if (request_info_.url.scheme() != "https") |
| 745 | return; |
| 746 | |
| 747 | // Only process X-Force-TLS from responses with valid certificates. |
| 748 | if (response_info_->ssl_info.cert_status & net::CERT_STATUS_ALL_ERRORS) |
| 749 | return; |
| 750 | |
| 751 | URLRequestContext* ctx = request_->context(); |
| 752 | if (!ctx || !ctx->force_tls_state()) |
| 753 | return; |
| 754 | |
| 755 | std::string name = "X-Force-TLS"; |
| 756 | std::string value; |
| 757 | |
| 758 | void* iter = NULL; |
| 759 | while (response_info_->headers->EnumerateHeader(&iter, name, &value)) |
| 760 | ctx->force_tls_state()->DidReceiveHeader(request_info_.url, value); |
| 761 | } |