blob: 7a447135d12c377df8c07eb01045e42976f2f584 [file] [log] [blame]
[email protected]9c0b1352012-11-04 00:03:271// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/quic/reliable_quic_stream.h"
6
7#include "net/quic/quic_session.h"
[email protected]c244c5a12013-05-07 20:55:048#include "net/quic/quic_spdy_decompressor.h"
[email protected]24e5bc52013-09-18 15:36:589#include "net/spdy/write_blocked_list.h"
[email protected]9c0b1352012-11-04 00:03:2710
11using base::StringPiece;
[email protected]c244c5a12013-05-07 20:55:0412using std::min;
[email protected]9c0b1352012-11-04 00:03:2713
14namespace net {
15
[email protected]8edeb8d2013-08-28 06:11:4316namespace {
17
18// This is somewhat arbitrary. It's possible, but unlikely, we will either fail
19// to set a priority client-side, or cancel a stream before stripping the
20// priority from the wire server-side. In either case, start out with a
21// priority in the middle.
22QuicPriority kDefaultPriority = 3;
23
24// Appends bytes from data into partial_data_buffer. Once partial_data_buffer
25// reaches 4 bytes, copies the data into 'result' and clears
26// partial_data_buffer.
27// Returns the number of bytes consumed.
28uint32 StripUint32(const char* data, uint32 data_len,
29 string* partial_data_buffer,
30 uint32* result) {
31 DCHECK_GT(4u, partial_data_buffer->length());
32 size_t missing_size = 4 - partial_data_buffer->length();
33 if (data_len < missing_size) {
34 StringPiece(data, data_len).AppendToString(partial_data_buffer);
35 return data_len;
36 }
37 StringPiece(data, missing_size).AppendToString(partial_data_buffer);
38 DCHECK_EQ(4u, partial_data_buffer->length());
39 memcpy(result, partial_data_buffer->data(), 4);
40 partial_data_buffer->clear();
41 return missing_size;
42}
43
44} // namespace
45
[email protected]9c0b1352012-11-04 00:03:2746ReliableQuicStream::ReliableQuicStream(QuicStreamId id,
47 QuicSession* session)
48 : sequencer_(this),
49 id_(id),
[email protected]9c0b1352012-11-04 00:03:2750 session_(session),
[email protected]63534512012-12-23 18:49:0051 visitor_(NULL),
52 stream_bytes_read_(0),
53 stream_bytes_written_(0),
[email protected]2532de12013-05-09 12:29:3354 headers_decompressed_(false),
[email protected]8edeb8d2013-08-28 06:11:4355 priority_(kDefaultPriority),
[email protected]c244c5a12013-05-07 20:55:0456 headers_id_(0),
[email protected]48878092013-07-26 14:51:5657 decompression_failed_(false),
[email protected]74bda142013-03-31 02:49:1158 stream_error_(QUIC_STREAM_NO_ERROR),
59 connection_error_(QUIC_NO_ERROR),
[email protected]9c0b1352012-11-04 00:03:2760 read_side_closed_(false),
[email protected]a5d4eee22012-12-13 09:09:0161 write_side_closed_(false),
[email protected]8edeb8d2013-08-28 06:11:4362 priority_parsed_(false),
[email protected]a5d4eee22012-12-13 09:09:0163 fin_buffered_(false),
64 fin_sent_(false) {
[email protected]9c0b1352012-11-04 00:03:2765}
66
67ReliableQuicStream::~ReliableQuicStream() {
68}
69
70bool ReliableQuicStream::WillAcceptStreamFrame(
71 const QuicStreamFrame& frame) const {
72 if (read_side_closed_) {
[email protected]044ac2b2012-11-13 21:41:0673 return true;
[email protected]9c0b1352012-11-04 00:03:2774 }
75 if (frame.stream_id != id_) {
76 LOG(ERROR) << "Error!";
77 return false;
78 }
79 return sequencer_.WillAcceptStreamFrame(frame);
80}
81
82bool ReliableQuicStream::OnStreamFrame(const QuicStreamFrame& frame) {
83 DCHECK_EQ(frame.stream_id, id_);
84 if (read_side_closed_) {
[email protected]044ac2b2012-11-13 21:41:0685 DLOG(INFO) << "Ignoring frame " << frame.stream_id;
86 // We don't want to be reading: blackhole the data.
87 return true;
[email protected]9c0b1352012-11-04 00:03:2788 }
[email protected]9db443912013-02-25 05:27:0389 // Note: This count include duplicate data received.
90 stream_bytes_read_ += frame.data.length();
[email protected]9c0b1352012-11-04 00:03:2791
92 bool accepted = sequencer_.OnStreamFrame(frame);
93
[email protected]9c0b1352012-11-04 00:03:2794 return accepted;
95}
96
[email protected]74bda142013-03-31 02:49:1197void ReliableQuicStream::OnStreamReset(QuicRstStreamErrorCode error) {
98 stream_error_ = error;
[email protected]9db443912013-02-25 05:27:0399 TerminateFromPeer(false); // Full close.
[email protected]9c0b1352012-11-04 00:03:27100}
101
102void ReliableQuicStream::ConnectionClose(QuicErrorCode error, bool from_peer) {
[email protected]4e49b6a2013-06-18 16:39:28103 if (read_side_closed_ && write_side_closed_) {
[email protected]a57e0272013-04-26 07:31:47104 return;
105 }
[email protected]74bda142013-03-31 02:49:11106 if (error != QUIC_NO_ERROR) {
107 stream_error_ = QUIC_STREAM_CONNECTION_ERROR;
108 connection_error_ = error;
109 }
110
[email protected]9c0b1352012-11-04 00:03:27111 if (from_peer) {
112 TerminateFromPeer(false);
113 } else {
114 CloseWriteSide();
115 CloseReadSide();
116 }
117}
118
119void ReliableQuicStream::TerminateFromPeer(bool half_close) {
120 if (!half_close) {
121 CloseWriteSide();
122 }
123 CloseReadSide();
124}
125
[email protected]74bda142013-03-31 02:49:11126void ReliableQuicStream::Close(QuicRstStreamErrorCode error) {
127 stream_error_ = error;
[email protected]25c31dc2013-06-05 17:56:04128 if (error != QUIC_STREAM_NO_ERROR) {
129 // Sending a RstStream results in calling CloseStream.
130 session()->SendRstStream(id(), error);
131 } else {
132 session_->CloseStream(id());
133 }
[email protected]9c0b1352012-11-04 00:03:27134}
135
[email protected]d6ce2eb2013-06-11 13:36:57136size_t ReliableQuicStream::Readv(const struct iovec* iov, size_t iov_len) {
[email protected]2532de12013-05-09 12:29:33137 if (headers_decompressed_ && decompressed_headers_.empty()) {
[email protected]c244c5a12013-05-07 20:55:04138 return sequencer_.Readv(iov, iov_len);
139 }
140 size_t bytes_consumed = 0;
[email protected]b064310782013-05-30 21:12:17141 size_t iov_index = 0;
[email protected]c244c5a12013-05-07 20:55:04142 while (iov_index < iov_len &&
143 decompressed_headers_.length() > bytes_consumed) {
[email protected]d6ce2eb2013-06-11 13:36:57144 size_t bytes_to_read = min(iov[iov_index].iov_len,
145 decompressed_headers_.length() - bytes_consumed);
[email protected]c244c5a12013-05-07 20:55:04146 char* iov_ptr = static_cast<char*>(iov[iov_index].iov_base);
147 memcpy(iov_ptr,
148 decompressed_headers_.data() + bytes_consumed, bytes_to_read);
149 bytes_consumed += bytes_to_read;
150 ++iov_index;
151 }
152 decompressed_headers_.erase(0, bytes_consumed);
153 return bytes_consumed;
154}
155
[email protected]b064310782013-05-30 21:12:17156int ReliableQuicStream::GetReadableRegions(iovec* iov, size_t iov_len) {
[email protected]2532de12013-05-09 12:29:33157 if (headers_decompressed_ && decompressed_headers_.empty()) {
[email protected]c244c5a12013-05-07 20:55:04158 return sequencer_.GetReadableRegions(iov, iov_len);
159 }
160 if (iov_len == 0) {
161 return 0;
162 }
163 iov[0].iov_base = static_cast<void*>(
164 const_cast<char*>(decompressed_headers_.data()));
165 iov[0].iov_len = decompressed_headers_.length();
166 return 1;
167}
168
[email protected]2ff600a2012-11-11 19:22:19169bool ReliableQuicStream::IsHalfClosed() const {
[email protected]2532de12013-05-09 12:29:33170 if (!headers_decompressed_ || !decompressed_headers_.empty()) {
[email protected]c244c5a12013-05-07 20:55:04171 return false;
172 }
[email protected]9c0b1352012-11-04 00:03:27173 return sequencer_.IsHalfClosed();
174}
175
[email protected]2ff600a2012-11-11 19:22:19176bool ReliableQuicStream::HasBytesToRead() const {
[email protected]c244c5a12013-05-07 20:55:04177 return !decompressed_headers_.empty() || sequencer_.HasBytesToRead();
[email protected]9c0b1352012-11-04 00:03:27178}
179
[email protected]f702d572012-12-04 15:56:20180const IPEndPoint& ReliableQuicStream::GetPeerAddress() const {
181 return session_->peer_address();
182}
183
[email protected]821555c2013-05-16 20:20:17184QuicSpdyCompressor* ReliableQuicStream::compressor() {
185 return session_->compressor();
186}
187
[email protected]a69af0522013-07-12 19:23:47188bool ReliableQuicStream::GetSSLInfo(SSLInfo* ssl_info) {
189 return session_->GetSSLInfo(ssl_info);
190}
191
[email protected]cff7b7b52013-01-11 08:49:07192QuicConsumedData ReliableQuicStream::WriteData(StringPiece data, bool fin) {
[email protected]25c31dc2013-06-05 17:56:04193 DCHECK(data.size() > 0 || fin);
[email protected]a5d4eee22012-12-13 09:09:01194 return WriteOrBuffer(data, fin);
195}
196
[email protected]24e5bc52013-09-18 15:36:58197
198void ReliableQuicStream::set_priority(QuicPriority priority) {
199 DCHECK_EQ(0u, stream_bytes_written_);
200 priority_ = priority;
201}
202
[email protected]cff7b7b52013-01-11 08:49:07203QuicConsumedData ReliableQuicStream::WriteOrBuffer(StringPiece data, bool fin) {
[email protected]a5d4eee22012-12-13 09:09:01204 DCHECK(!fin_buffered_);
205
[email protected]cff7b7b52013-01-11 08:49:07206 QuicConsumedData consumed_data(0, false);
[email protected]a5d4eee22012-12-13 09:09:01207 fin_buffered_ = fin;
208
209 if (queued_data_.empty()) {
[email protected]cff7b7b52013-01-11 08:49:07210 consumed_data = WriteDataInternal(string(data.data(), data.length()), fin);
[email protected]c995c572013-01-18 05:43:20211 DCHECK_LE(consumed_data.bytes_consumed, data.length());
[email protected]a5d4eee22012-12-13 09:09:01212 }
[email protected]cff7b7b52013-01-11 08:49:07213
[email protected]c995c572013-01-18 05:43:20214 // If there's unconsumed data or an unconsumed fin, queue it.
215 if (consumed_data.bytes_consumed < data.length() ||
[email protected]cff7b7b52013-01-11 08:49:07216 (fin && !consumed_data.fin_consumed)) {
217 queued_data_.push_back(
218 string(data.data() + consumed_data.bytes_consumed,
219 data.length() - consumed_data.bytes_consumed));
[email protected]a5d4eee22012-12-13 09:09:01220 }
[email protected]cff7b7b52013-01-11 08:49:07221
222 return QuicConsumedData(data.size(), true);
[email protected]a5d4eee22012-12-13 09:09:01223}
224
225void ReliableQuicStream::OnCanWrite() {
226 bool fin = false;
[email protected]610a7e942012-12-18 00:21:39227 while (!queued_data_.empty()) {
[email protected]a5d4eee22012-12-13 09:09:01228 const string& data = queued_data_.front();
229 if (queued_data_.size() == 1 && fin_buffered_) {
230 fin = true;
231 }
[email protected]cff7b7b52013-01-11 08:49:07232 QuicConsumedData consumed_data = WriteDataInternal(data, fin);
233 if (consumed_data.bytes_consumed == data.size() &&
234 fin == consumed_data.fin_consumed) {
[email protected]a5d4eee22012-12-13 09:09:01235 queued_data_.pop_front();
236 } else {
[email protected]cff7b7b52013-01-11 08:49:07237 queued_data_.front().erase(0, consumed_data.bytes_consumed);
[email protected]610a7e942012-12-18 00:21:39238 break;
[email protected]a5d4eee22012-12-13 09:09:01239 }
240 }
241}
242
[email protected]cff7b7b52013-01-11 08:49:07243QuicConsumedData ReliableQuicStream::WriteDataInternal(
244 StringPiece data, bool fin) {
[email protected]24e5bc52013-09-18 15:36:58245 struct iovec iov = {const_cast<char*>(data.data()),
246 static_cast<size_t>(data.size())};
247 return WritevDataInternal(&iov, 1, fin);
248}
249
250QuicConsumedData ReliableQuicStream::WritevDataInternal(const struct iovec* iov,
251 int count,
252 bool fin) {
[email protected]9c0b1352012-11-04 00:03:27253 if (write_side_closed_) {
254 DLOG(ERROR) << "Attempt to write when the write side is closed";
[email protected]cff7b7b52013-01-11 08:49:07255 return QuicConsumedData(0, false);
[email protected]9c0b1352012-11-04 00:03:27256 }
257
[email protected]24e5bc52013-09-18 15:36:58258 size_t write_length = 0u;
259 for (int i = 0; i < count; ++i) {
260 write_length += iov[i].iov_len;
261 }
[email protected]cff7b7b52013-01-11 08:49:07262 QuicConsumedData consumed_data =
[email protected]24e5bc52013-09-18 15:36:58263 session()->WritevData(id(), iov, count, stream_bytes_written_, fin);
[email protected]cff7b7b52013-01-11 08:49:07264 stream_bytes_written_ += consumed_data.bytes_consumed;
[email protected]24e5bc52013-09-18 15:36:58265 if (consumed_data.bytes_consumed == write_length) {
[email protected]cff7b7b52013-01-11 08:49:07266 if (fin && consumed_data.fin_consumed) {
[email protected]a5d4eee22012-12-13 09:09:01267 fin_sent_ = true;
268 CloseWriteSide();
[email protected]25c31dc2013-06-05 17:56:04269 } else if (fin && !consumed_data.fin_consumed) {
[email protected]24e5bc52013-09-18 15:36:58270 session_->MarkWriteBlocked(id(), EffectivePriority());
[email protected]a5d4eee22012-12-13 09:09:01271 }
272 } else {
[email protected]24e5bc52013-09-18 15:36:58273 session_->MarkWriteBlocked(id(), EffectivePriority());
[email protected]9c0b1352012-11-04 00:03:27274 }
[email protected]cff7b7b52013-01-11 08:49:07275 return consumed_data;
[email protected]9c0b1352012-11-04 00:03:27276}
277
[email protected]24e5bc52013-09-18 15:36:58278QuicPriority ReliableQuicStream::EffectivePriority() const {
279 return priority();
280}
281
[email protected]9c0b1352012-11-04 00:03:27282void ReliableQuicStream::CloseReadSide() {
[email protected]044ac2b2012-11-13 21:41:06283 if (read_side_closed_) {
284 return;
285 }
[email protected]9c0b1352012-11-04 00:03:27286 DLOG(INFO) << "Done reading from stream " << id();
287
288 read_side_closed_ = true;
289 if (write_side_closed_) {
[email protected]f702d572012-12-04 15:56:20290 DLOG(INFO) << "Closing stream: " << id();
[email protected]9c0b1352012-11-04 00:03:27291 session_->CloseStream(id());
292 }
293}
294
[email protected]c244c5a12013-05-07 20:55:04295uint32 ReliableQuicStream::ProcessRawData(const char* data, uint32 data_len) {
296 if (id() == kCryptoStreamId) {
[email protected]dbeea412013-07-16 17:38:46297 if (data_len == 0) {
298 return 0;
299 }
[email protected]c244c5a12013-05-07 20:55:04300 // The crypto stream does not use compression.
301 return ProcessData(data, data_len);
302 }
[email protected]8edeb8d2013-08-28 06:11:43303
[email protected]c244c5a12013-05-07 20:55:04304 uint32 total_bytes_consumed = 0;
305 if (headers_id_ == 0u) {
[email protected]8edeb8d2013-08-28 06:11:43306 total_bytes_consumed += StripPriorityAndHeaderId(data, data_len);
307 data += total_bytes_consumed;
308 data_len -= total_bytes_consumed;
[email protected]24e5bc52013-09-18 15:36:58309 if (data_len == 0 || !session_->connection()->connected()) {
[email protected]8edeb8d2013-08-28 06:11:43310 return total_bytes_consumed;
[email protected]c244c5a12013-05-07 20:55:04311 }
[email protected]c244c5a12013-05-07 20:55:04312 }
313 DCHECK_NE(0u, headers_id_);
314
315 // Once the headers are finished, we simply pass the data through.
[email protected]2532de12013-05-09 12:29:33316 if (headers_decompressed_) {
317 // Some buffered header data remains.
318 if (!decompressed_headers_.empty()) {
319 ProcessHeaderData();
320 }
[email protected]48878092013-07-26 14:51:56321 if (decompressed_headers_.empty()) {
[email protected]2532de12013-05-09 12:29:33322 DVLOG(1) << "Delegating procesing to ProcessData";
323 total_bytes_consumed += ProcessData(data, data_len);
324 }
325 return total_bytes_consumed;
[email protected]c244c5a12013-05-07 20:55:04326 }
327
328 QuicHeaderId current_header_id =
329 session_->decompressor()->current_header_id();
330 // Ensure that this header id looks sane.
331 if (headers_id_ < current_header_id ||
332 headers_id_ > kMaxHeaderIdDelta + current_header_id) {
[email protected]2532de12013-05-09 12:29:33333 DVLOG(1) << "Invalid headers for stream: " << id()
334 << " header_id: " << headers_id_
335 << " current_header_id: " << current_header_id;
[email protected]c244c5a12013-05-07 20:55:04336 session_->connection()->SendConnectionClose(QUIC_INVALID_HEADER_ID);
[email protected]2532de12013-05-09 12:29:33337 return total_bytes_consumed;
[email protected]c244c5a12013-05-07 20:55:04338 }
339
340 // If we are head-of-line blocked on decompression, then back up.
341 if (current_header_id != headers_id_) {
342 session_->MarkDecompressionBlocked(headers_id_, id());
[email protected]821555c2013-05-16 20:20:17343 DVLOG(1) << "Unable to decompress header data for stream: " << id()
344 << " header_id: " << headers_id_;
[email protected]c244c5a12013-05-07 20:55:04345 return total_bytes_consumed;
346 }
347
348 // Decompressed data will be delivered to decompressed_headers_.
349 size_t bytes_consumed = session_->decompressor()->DecompressData(
350 StringPiece(data, data_len), this);
[email protected]48878092013-07-26 14:51:56351 DCHECK_NE(0u, bytes_consumed);
352 if (bytes_consumed > data_len) {
353 DCHECK(false) << "DecompressData returned illegal value";
354 OnDecompressionError();
355 return total_bytes_consumed;
356 }
[email protected]c244c5a12013-05-07 20:55:04357 total_bytes_consumed += bytes_consumed;
[email protected]48878092013-07-26 14:51:56358 data += bytes_consumed;
359 data_len -= bytes_consumed;
360
361 if (decompression_failed_) {
362 // The session will have been closed in OnDecompressionError.
363 return total_bytes_consumed;
364 }
[email protected]c244c5a12013-05-07 20:55:04365
366 // Headers are complete if the decompressor has moved on to the
367 // next stream.
[email protected]2532de12013-05-09 12:29:33368 headers_decompressed_ =
[email protected]c244c5a12013-05-07 20:55:04369 session_->decompressor()->current_header_id() != headers_id_;
[email protected]48878092013-07-26 14:51:56370 if (!headers_decompressed_) {
371 DCHECK_EQ(0u, data_len);
372 }
[email protected]c244c5a12013-05-07 20:55:04373
[email protected]2532de12013-05-09 12:29:33374 ProcessHeaderData();
[email protected]c244c5a12013-05-07 20:55:04375
[email protected]48878092013-07-26 14:51:56376 if (!headers_decompressed_ || !decompressed_headers_.empty()) {
377 return total_bytes_consumed;
378 }
379
[email protected]c244c5a12013-05-07 20:55:04380 // We have processed all of the decompressed data but we might
381 // have some more raw data to process.
[email protected]48878092013-07-26 14:51:56382 if (data_len > 0) {
383 total_bytes_consumed += ProcessData(data, data_len);
[email protected]c244c5a12013-05-07 20:55:04384 }
385
386 // The sequencer will push any additional buffered frames if this data
387 // has been completely consumed.
388 return total_bytes_consumed;
389}
390
391uint32 ReliableQuicStream::ProcessHeaderData() {
392 if (decompressed_headers_.empty()) {
393 return 0;
394 }
395
396 size_t bytes_processed = ProcessData(decompressed_headers_.data(),
397 decompressed_headers_.length());
398 if (bytes_processed == decompressed_headers_.length()) {
399 decompressed_headers_.clear();
400 } else {
401 decompressed_headers_ = decompressed_headers_.erase(0, bytes_processed);
402 }
403 return bytes_processed;
404}
405
406void ReliableQuicStream::OnDecompressorAvailable() {
407 DCHECK_EQ(headers_id_,
408 session_->decompressor()->current_header_id());
[email protected]2532de12013-05-09 12:29:33409 DCHECK(!headers_decompressed_);
[email protected]48878092013-07-26 14:51:56410 DCHECK(!decompression_failed_);
[email protected]c244c5a12013-05-07 20:55:04411 DCHECK_EQ(0u, decompressed_headers_.length());
412
[email protected]2532de12013-05-09 12:29:33413 while (!headers_decompressed_) {
[email protected]8fb9adb2013-07-28 14:04:37414 struct iovec iovec;
415 if (sequencer_.GetReadableRegions(&iovec, 1) == 0) {
[email protected]c244c5a12013-05-07 20:55:04416 return;
417 }
[email protected]c244c5a12013-05-07 20:55:04418
[email protected]8fb9adb2013-07-28 14:04:37419 size_t bytes_consumed = session_->decompressor()->DecompressData(
420 StringPiece(static_cast<char*>(iovec.iov_base),
421 iovec.iov_len),
422 this);
423 DCHECK_LE(bytes_consumed, iovec.iov_len);
424 if (decompression_failed_) {
425 return;
[email protected]c244c5a12013-05-07 20:55:04426 }
[email protected]8fb9adb2013-07-28 14:04:37427 sequencer_.MarkConsumed(bytes_consumed);
428
429 headers_decompressed_ =
430 session_->decompressor()->current_header_id() != headers_id_;
[email protected]c244c5a12013-05-07 20:55:04431 }
432
433 // Either the headers are complete, or the all data as been consumed.
[email protected]c244c5a12013-05-07 20:55:04434 ProcessHeaderData(); // Unprocessed headers remain in decompressed_headers_.
[email protected]4e49b6a2013-06-18 16:39:28435 if (IsHalfClosed()) {
436 TerminateFromPeer(true);
437 } else if (headers_decompressed_ && decompressed_headers_.empty()) {
[email protected]c244c5a12013-05-07 20:55:04438 sequencer_.FlushBufferedFrames();
439 }
440}
441
442bool ReliableQuicStream::OnDecompressedData(StringPiece data) {
443 data.AppendToString(&decompressed_headers_);
444 return true;
445}
446
[email protected]899951652013-05-16 12:52:39447void ReliableQuicStream::OnDecompressionError() {
[email protected]48878092013-07-26 14:51:56448 DCHECK(!decompression_failed_);
449 decompression_failed_ = true;
[email protected]899951652013-05-16 12:52:39450 session_->connection()->SendConnectionClose(QUIC_DECOMPRESSION_FAILURE);
451}
452
453
[email protected]9c0b1352012-11-04 00:03:27454void ReliableQuicStream::CloseWriteSide() {
[email protected]044ac2b2012-11-13 21:41:06455 if (write_side_closed_) {
456 return;
457 }
[email protected]9c0b1352012-11-04 00:03:27458 DLOG(INFO) << "Done writing to stream " << id();
459
460 write_side_closed_ = true;
461 if (read_side_closed_) {
[email protected]f702d572012-12-04 15:56:20462 DLOG(INFO) << "Closing stream: " << id();
[email protected]9c0b1352012-11-04 00:03:27463 session_->CloseStream(id());
464 }
465}
466
[email protected]6adeb922013-09-01 22:43:25467bool ReliableQuicStream::HasBufferedData() {
468 return !queued_data_.empty();
469}
470
[email protected]63534512012-12-23 18:49:00471void ReliableQuicStream::OnClose() {
[email protected]a57e0272013-04-26 07:31:47472 CloseReadSide();
473 CloseWriteSide();
474
[email protected]63534512012-12-23 18:49:00475 if (visitor_) {
476 Visitor* visitor = visitor_;
477 // Calling Visitor::OnClose() may result the destruction of the visitor,
478 // so we need to ensure we don't call it again.
479 visitor_ = NULL;
480 visitor->OnClose(this);
481 }
482}
483
[email protected]8edeb8d2013-08-28 06:11:43484uint32 ReliableQuicStream::StripPriorityAndHeaderId(
485 const char* data, uint32 data_len) {
486 uint32 total_bytes_parsed = 0;
487
488 if (!priority_parsed_ &&
489 session_->connection()->version() >= QUIC_VERSION_9 &&
490 session_->connection()->is_server()) {
[email protected]24e5bc52013-09-18 15:36:58491 QuicPriority temporary_priority = priority_;
[email protected]8edeb8d2013-08-28 06:11:43492 total_bytes_parsed = StripUint32(
[email protected]24e5bc52013-09-18 15:36:58493 data, data_len, &headers_id_and_priority_buffer_, &temporary_priority);
[email protected]8edeb8d2013-08-28 06:11:43494 if (total_bytes_parsed > 0 && headers_id_and_priority_buffer_.size() == 0) {
[email protected]8edeb8d2013-08-28 06:11:43495 priority_parsed_ = true;
[email protected]24e5bc52013-09-18 15:36:58496 // Spdy priorities are inverted, so the highest numerical value is the
497 // lowest legal priority.
498 if (temporary_priority > static_cast<QuicPriority>(kLowestPriority)) {
499 session_->connection()->SendConnectionClose(QUIC_INVALID_PRIORITY);
500 return 0;
501 }
502 priority_ = temporary_priority;
[email protected]8edeb8d2013-08-28 06:11:43503 }
504 data += total_bytes_parsed;
505 data_len -= total_bytes_parsed;
506 }
507 if (data_len > 0 && headers_id_ == 0u) {
508 // The headers ID has not yet been read. Strip it from the beginning of
509 // the data stream.
510 total_bytes_parsed += StripUint32(
511 data, data_len, &headers_id_and_priority_buffer_, &headers_id_);
512 }
513 return total_bytes_parsed;
514}
515
[email protected]9c0b1352012-11-04 00:03:27516} // namespace net