blob: 8f4b9c57e1703a613c50cb5cc5dc592474319a00 [file] [log] [blame]
[email protected]9c0b1352012-11-04 00:03:271// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/quic/reliable_quic_stream.h"
6
7#include "net/quic/quic_session.h"
[email protected]c244c5a12013-05-07 20:55:048#include "net/quic/quic_spdy_decompressor.h"
[email protected]9c0b1352012-11-04 00:03:279
10using base::StringPiece;
[email protected]c244c5a12013-05-07 20:55:0411using std::min;
[email protected]9c0b1352012-11-04 00:03:2712
13namespace net {
14
15ReliableQuicStream::ReliableQuicStream(QuicStreamId id,
16 QuicSession* session)
17 : sequencer_(this),
18 id_(id),
[email protected]9c0b1352012-11-04 00:03:2719 session_(session),
[email protected]63534512012-12-23 18:49:0020 visitor_(NULL),
21 stream_bytes_read_(0),
22 stream_bytes_written_(0),
[email protected]2532de12013-05-09 12:29:3323 headers_decompressed_(false),
[email protected]c244c5a12013-05-07 20:55:0424 headers_id_(0),
[email protected]74bda142013-03-31 02:49:1125 stream_error_(QUIC_STREAM_NO_ERROR),
26 connection_error_(QUIC_NO_ERROR),
[email protected]9c0b1352012-11-04 00:03:2727 read_side_closed_(false),
[email protected]a5d4eee22012-12-13 09:09:0128 write_side_closed_(false),
29 fin_buffered_(false),
30 fin_sent_(false) {
[email protected]9c0b1352012-11-04 00:03:2731}
32
33ReliableQuicStream::~ReliableQuicStream() {
34}
35
36bool ReliableQuicStream::WillAcceptStreamFrame(
37 const QuicStreamFrame& frame) const {
38 if (read_side_closed_) {
[email protected]044ac2b2012-11-13 21:41:0639 return true;
[email protected]9c0b1352012-11-04 00:03:2740 }
41 if (frame.stream_id != id_) {
42 LOG(ERROR) << "Error!";
43 return false;
44 }
45 return sequencer_.WillAcceptStreamFrame(frame);
46}
47
48bool ReliableQuicStream::OnStreamFrame(const QuicStreamFrame& frame) {
49 DCHECK_EQ(frame.stream_id, id_);
50 if (read_side_closed_) {
[email protected]044ac2b2012-11-13 21:41:0651 DLOG(INFO) << "Ignoring frame " << frame.stream_id;
52 // We don't want to be reading: blackhole the data.
53 return true;
[email protected]9c0b1352012-11-04 00:03:2754 }
[email protected]9db443912013-02-25 05:27:0355 // Note: This count include duplicate data received.
56 stream_bytes_read_ += frame.data.length();
[email protected]9c0b1352012-11-04 00:03:2757
58 bool accepted = sequencer_.OnStreamFrame(frame);
59
60 if (frame.fin) {
[email protected]4e49b6a2013-06-18 16:39:2861 sequencer_.CloseStreamAtOffset(frame.offset + frame.data.size());
[email protected]9c0b1352012-11-04 00:03:2762 }
63
64 return accepted;
65}
66
[email protected]74bda142013-03-31 02:49:1167void ReliableQuicStream::OnStreamReset(QuicRstStreamErrorCode error) {
68 stream_error_ = error;
[email protected]9db443912013-02-25 05:27:0369 TerminateFromPeer(false); // Full close.
[email protected]9c0b1352012-11-04 00:03:2770}
71
72void ReliableQuicStream::ConnectionClose(QuicErrorCode error, bool from_peer) {
[email protected]4e49b6a2013-06-18 16:39:2873 if (read_side_closed_ && write_side_closed_) {
[email protected]a57e0272013-04-26 07:31:4774 return;
75 }
[email protected]74bda142013-03-31 02:49:1176 if (error != QUIC_NO_ERROR) {
77 stream_error_ = QUIC_STREAM_CONNECTION_ERROR;
78 connection_error_ = error;
79 }
80
[email protected]9c0b1352012-11-04 00:03:2781 if (from_peer) {
82 TerminateFromPeer(false);
83 } else {
84 CloseWriteSide();
85 CloseReadSide();
86 }
87}
88
89void ReliableQuicStream::TerminateFromPeer(bool half_close) {
90 if (!half_close) {
91 CloseWriteSide();
92 }
93 CloseReadSide();
94}
95
[email protected]74bda142013-03-31 02:49:1196void ReliableQuicStream::Close(QuicRstStreamErrorCode error) {
97 stream_error_ = error;
[email protected]25c31dc2013-06-05 17:56:0498 if (error != QUIC_STREAM_NO_ERROR) {
99 // Sending a RstStream results in calling CloseStream.
100 session()->SendRstStream(id(), error);
101 } else {
102 session_->CloseStream(id());
103 }
[email protected]9c0b1352012-11-04 00:03:27104}
105
[email protected]d6ce2eb2013-06-11 13:36:57106size_t ReliableQuicStream::Readv(const struct iovec* iov, size_t iov_len) {
[email protected]2532de12013-05-09 12:29:33107 if (headers_decompressed_ && decompressed_headers_.empty()) {
[email protected]c244c5a12013-05-07 20:55:04108 return sequencer_.Readv(iov, iov_len);
109 }
110 size_t bytes_consumed = 0;
[email protected]b064310782013-05-30 21:12:17111 size_t iov_index = 0;
[email protected]c244c5a12013-05-07 20:55:04112 while (iov_index < iov_len &&
113 decompressed_headers_.length() > bytes_consumed) {
[email protected]d6ce2eb2013-06-11 13:36:57114 size_t bytes_to_read = min(iov[iov_index].iov_len,
115 decompressed_headers_.length() - bytes_consumed);
[email protected]c244c5a12013-05-07 20:55:04116 char* iov_ptr = static_cast<char*>(iov[iov_index].iov_base);
117 memcpy(iov_ptr,
118 decompressed_headers_.data() + bytes_consumed, bytes_to_read);
119 bytes_consumed += bytes_to_read;
120 ++iov_index;
121 }
122 decompressed_headers_.erase(0, bytes_consumed);
123 return bytes_consumed;
124}
125
[email protected]b064310782013-05-30 21:12:17126int ReliableQuicStream::GetReadableRegions(iovec* iov, size_t iov_len) {
[email protected]2532de12013-05-09 12:29:33127 if (headers_decompressed_ && decompressed_headers_.empty()) {
[email protected]c244c5a12013-05-07 20:55:04128 return sequencer_.GetReadableRegions(iov, iov_len);
129 }
130 if (iov_len == 0) {
131 return 0;
132 }
133 iov[0].iov_base = static_cast<void*>(
134 const_cast<char*>(decompressed_headers_.data()));
135 iov[0].iov_len = decompressed_headers_.length();
136 return 1;
137}
138
[email protected]2ff600a2012-11-11 19:22:19139bool ReliableQuicStream::IsHalfClosed() const {
[email protected]2532de12013-05-09 12:29:33140 if (!headers_decompressed_ || !decompressed_headers_.empty()) {
[email protected]c244c5a12013-05-07 20:55:04141 return false;
142 }
[email protected]9c0b1352012-11-04 00:03:27143 return sequencer_.IsHalfClosed();
144}
145
[email protected]2ff600a2012-11-11 19:22:19146bool ReliableQuicStream::HasBytesToRead() const {
[email protected]c244c5a12013-05-07 20:55:04147 return !decompressed_headers_.empty() || sequencer_.HasBytesToRead();
[email protected]9c0b1352012-11-04 00:03:27148}
149
[email protected]f702d572012-12-04 15:56:20150const IPEndPoint& ReliableQuicStream::GetPeerAddress() const {
151 return session_->peer_address();
152}
153
[email protected]821555c2013-05-16 20:20:17154QuicSpdyCompressor* ReliableQuicStream::compressor() {
155 return session_->compressor();
156}
157
[email protected]cff7b7b52013-01-11 08:49:07158QuicConsumedData ReliableQuicStream::WriteData(StringPiece data, bool fin) {
[email protected]25c31dc2013-06-05 17:56:04159 DCHECK(data.size() > 0 || fin);
[email protected]a5d4eee22012-12-13 09:09:01160 return WriteOrBuffer(data, fin);
161}
162
[email protected]cff7b7b52013-01-11 08:49:07163QuicConsumedData ReliableQuicStream::WriteOrBuffer(StringPiece data, bool fin) {
[email protected]a5d4eee22012-12-13 09:09:01164 DCHECK(!fin_buffered_);
165
[email protected]cff7b7b52013-01-11 08:49:07166 QuicConsumedData consumed_data(0, false);
[email protected]a5d4eee22012-12-13 09:09:01167 fin_buffered_ = fin;
168
169 if (queued_data_.empty()) {
[email protected]cff7b7b52013-01-11 08:49:07170 consumed_data = WriteDataInternal(string(data.data(), data.length()), fin);
[email protected]c995c572013-01-18 05:43:20171 DCHECK_LE(consumed_data.bytes_consumed, data.length());
[email protected]a5d4eee22012-12-13 09:09:01172 }
[email protected]cff7b7b52013-01-11 08:49:07173
[email protected]c995c572013-01-18 05:43:20174 // If there's unconsumed data or an unconsumed fin, queue it.
175 if (consumed_data.bytes_consumed < data.length() ||
[email protected]cff7b7b52013-01-11 08:49:07176 (fin && !consumed_data.fin_consumed)) {
177 queued_data_.push_back(
178 string(data.data() + consumed_data.bytes_consumed,
179 data.length() - consumed_data.bytes_consumed));
[email protected]a5d4eee22012-12-13 09:09:01180 }
[email protected]cff7b7b52013-01-11 08:49:07181
182 return QuicConsumedData(data.size(), true);
[email protected]a5d4eee22012-12-13 09:09:01183}
184
185void ReliableQuicStream::OnCanWrite() {
186 bool fin = false;
[email protected]610a7e942012-12-18 00:21:39187 while (!queued_data_.empty()) {
[email protected]a5d4eee22012-12-13 09:09:01188 const string& data = queued_data_.front();
189 if (queued_data_.size() == 1 && fin_buffered_) {
190 fin = true;
191 }
[email protected]cff7b7b52013-01-11 08:49:07192 QuicConsumedData consumed_data = WriteDataInternal(data, fin);
193 if (consumed_data.bytes_consumed == data.size() &&
194 fin == consumed_data.fin_consumed) {
[email protected]a5d4eee22012-12-13 09:09:01195 queued_data_.pop_front();
196 } else {
[email protected]cff7b7b52013-01-11 08:49:07197 queued_data_.front().erase(0, consumed_data.bytes_consumed);
[email protected]610a7e942012-12-18 00:21:39198 break;
[email protected]a5d4eee22012-12-13 09:09:01199 }
200 }
201}
202
[email protected]cff7b7b52013-01-11 08:49:07203QuicConsumedData ReliableQuicStream::WriteDataInternal(
204 StringPiece data, bool fin) {
[email protected]9c0b1352012-11-04 00:03:27205 if (write_side_closed_) {
206 DLOG(ERROR) << "Attempt to write when the write side is closed";
[email protected]cff7b7b52013-01-11 08:49:07207 return QuicConsumedData(0, false);
[email protected]9c0b1352012-11-04 00:03:27208 }
209
[email protected]cff7b7b52013-01-11 08:49:07210 QuicConsumedData consumed_data =
[email protected]9db443912013-02-25 05:27:03211 session()->WriteData(id(), data, stream_bytes_written_, fin);
[email protected]cff7b7b52013-01-11 08:49:07212 stream_bytes_written_ += consumed_data.bytes_consumed;
213 if (consumed_data.bytes_consumed == data.length()) {
214 if (fin && consumed_data.fin_consumed) {
[email protected]a5d4eee22012-12-13 09:09:01215 fin_sent_ = true;
216 CloseWriteSide();
[email protected]25c31dc2013-06-05 17:56:04217 } else if (fin && !consumed_data.fin_consumed) {
218 session_->MarkWriteBlocked(id());
[email protected]a5d4eee22012-12-13 09:09:01219 }
220 } else {
221 session_->MarkWriteBlocked(id());
[email protected]9c0b1352012-11-04 00:03:27222 }
[email protected]cff7b7b52013-01-11 08:49:07223 return consumed_data;
[email protected]9c0b1352012-11-04 00:03:27224}
225
226void ReliableQuicStream::CloseReadSide() {
[email protected]044ac2b2012-11-13 21:41:06227 if (read_side_closed_) {
228 return;
229 }
[email protected]9c0b1352012-11-04 00:03:27230 DLOG(INFO) << "Done reading from stream " << id();
231
232 read_side_closed_ = true;
233 if (write_side_closed_) {
[email protected]f702d572012-12-04 15:56:20234 DLOG(INFO) << "Closing stream: " << id();
[email protected]9c0b1352012-11-04 00:03:27235 session_->CloseStream(id());
236 }
237}
238
[email protected]c244c5a12013-05-07 20:55:04239uint32 ReliableQuicStream::ProcessRawData(const char* data, uint32 data_len) {
240 if (id() == kCryptoStreamId) {
241 // The crypto stream does not use compression.
242 return ProcessData(data, data_len);
243 }
244 uint32 total_bytes_consumed = 0;
245 if (headers_id_ == 0u) {
246 // The headers ID has not yet been read. Strip it from the beginning of
247 // the data stream.
248 DCHECK_GT(4u, headers_id_buffer_.length());
249 size_t missing_size = 4 - headers_id_buffer_.length();
250 if (data_len < missing_size) {
251 StringPiece(data, data_len).AppendToString(&headers_id_buffer_);
252 return data_len;
253 }
254 total_bytes_consumed += missing_size;
255 StringPiece(data, missing_size).AppendToString(&headers_id_buffer_);
256 DCHECK_EQ(4u, headers_id_buffer_.length());
257 memcpy(&headers_id_, headers_id_buffer_.data(), 4);
258 headers_id_buffer_.clear();
259 data += missing_size;
260 data_len -= missing_size;
261 }
262 DCHECK_NE(0u, headers_id_);
263
264 // Once the headers are finished, we simply pass the data through.
[email protected]2532de12013-05-09 12:29:33265 if (headers_decompressed_) {
266 // Some buffered header data remains.
267 if (!decompressed_headers_.empty()) {
268 ProcessHeaderData();
269 }
270 if (decompressed_headers_.empty()) {
271 DVLOG(1) << "Delegating procesing to ProcessData";
272 total_bytes_consumed += ProcessData(data, data_len);
273 }
274 return total_bytes_consumed;
[email protected]c244c5a12013-05-07 20:55:04275 }
276
277 QuicHeaderId current_header_id =
278 session_->decompressor()->current_header_id();
279 // Ensure that this header id looks sane.
280 if (headers_id_ < current_header_id ||
281 headers_id_ > kMaxHeaderIdDelta + current_header_id) {
[email protected]2532de12013-05-09 12:29:33282 DVLOG(1) << "Invalid headers for stream: " << id()
283 << " header_id: " << headers_id_
284 << " current_header_id: " << current_header_id;
[email protected]c244c5a12013-05-07 20:55:04285 session_->connection()->SendConnectionClose(QUIC_INVALID_HEADER_ID);
[email protected]2532de12013-05-09 12:29:33286 return total_bytes_consumed;
[email protected]c244c5a12013-05-07 20:55:04287 }
288
289 // If we are head-of-line blocked on decompression, then back up.
290 if (current_header_id != headers_id_) {
291 session_->MarkDecompressionBlocked(headers_id_, id());
[email protected]821555c2013-05-16 20:20:17292 DVLOG(1) << "Unable to decompress header data for stream: " << id()
293 << " header_id: " << headers_id_;
[email protected]c244c5a12013-05-07 20:55:04294 return total_bytes_consumed;
295 }
296
297 // Decompressed data will be delivered to decompressed_headers_.
298 size_t bytes_consumed = session_->decompressor()->DecompressData(
299 StringPiece(data, data_len), this);
300 total_bytes_consumed += bytes_consumed;
301
302 // Headers are complete if the decompressor has moved on to the
303 // next stream.
[email protected]2532de12013-05-09 12:29:33304 headers_decompressed_ =
[email protected]c244c5a12013-05-07 20:55:04305 session_->decompressor()->current_header_id() != headers_id_;
306
[email protected]2532de12013-05-09 12:29:33307 ProcessHeaderData();
[email protected]c244c5a12013-05-07 20:55:04308
309 // We have processed all of the decompressed data but we might
310 // have some more raw data to process.
311 if (decompressed_headers_.empty() || total_bytes_consumed < data_len) {
312 total_bytes_consumed += ProcessData(data + bytes_consumed,
313 data_len - bytes_consumed);
314 }
315
316 // The sequencer will push any additional buffered frames if this data
317 // has been completely consumed.
318 return total_bytes_consumed;
319}
320
321uint32 ReliableQuicStream::ProcessHeaderData() {
322 if (decompressed_headers_.empty()) {
323 return 0;
324 }
325
326 size_t bytes_processed = ProcessData(decompressed_headers_.data(),
327 decompressed_headers_.length());
328 if (bytes_processed == decompressed_headers_.length()) {
329 decompressed_headers_.clear();
330 } else {
331 decompressed_headers_ = decompressed_headers_.erase(0, bytes_processed);
332 }
333 return bytes_processed;
334}
335
336void ReliableQuicStream::OnDecompressorAvailable() {
337 DCHECK_EQ(headers_id_,
338 session_->decompressor()->current_header_id());
[email protected]2532de12013-05-09 12:29:33339 DCHECK(!headers_decompressed_);
[email protected]c244c5a12013-05-07 20:55:04340 DCHECK_EQ(0u, decompressed_headers_.length());
341
342 size_t total_bytes_consumed = 0;
343 struct iovec iovecs[5];
[email protected]2532de12013-05-09 12:29:33344 while (!headers_decompressed_) {
[email protected]c244c5a12013-05-07 20:55:04345 size_t num_iovecs =
346 sequencer_.GetReadableRegions(iovecs, arraysize(iovecs));
347
348 if (num_iovecs == 0) {
349 return;
350 }
[email protected]2532de12013-05-09 12:29:33351 for (size_t i = 0; i < num_iovecs && !headers_decompressed_; i++) {
[email protected]c244c5a12013-05-07 20:55:04352 total_bytes_consumed += session_->decompressor()->DecompressData(
353 StringPiece(static_cast<char*>(iovecs[i].iov_base),
354 iovecs[i].iov_len), this);
355
[email protected]2532de12013-05-09 12:29:33356 headers_decompressed_ =
[email protected]c244c5a12013-05-07 20:55:04357 session_->decompressor()->current_header_id() != headers_id_;
358 }
359 }
360
361 // Either the headers are complete, or the all data as been consumed.
362 sequencer_.MarkConsumed(total_bytes_consumed);
[email protected]c244c5a12013-05-07 20:55:04363 ProcessHeaderData(); // Unprocessed headers remain in decompressed_headers_.
[email protected]4e49b6a2013-06-18 16:39:28364 if (IsHalfClosed()) {
365 TerminateFromPeer(true);
366 } else if (headers_decompressed_ && decompressed_headers_.empty()) {
[email protected]c244c5a12013-05-07 20:55:04367 sequencer_.FlushBufferedFrames();
368 }
369}
370
371bool ReliableQuicStream::OnDecompressedData(StringPiece data) {
372 data.AppendToString(&decompressed_headers_);
373 return true;
374}
375
[email protected]899951652013-05-16 12:52:39376void ReliableQuicStream::OnDecompressionError() {
377 session_->connection()->SendConnectionClose(QUIC_DECOMPRESSION_FAILURE);
378}
379
380
[email protected]9c0b1352012-11-04 00:03:27381void ReliableQuicStream::CloseWriteSide() {
[email protected]044ac2b2012-11-13 21:41:06382 if (write_side_closed_) {
383 return;
384 }
[email protected]9c0b1352012-11-04 00:03:27385 DLOG(INFO) << "Done writing to stream " << id();
386
387 write_side_closed_ = true;
388 if (read_side_closed_) {
[email protected]f702d572012-12-04 15:56:20389 DLOG(INFO) << "Closing stream: " << id();
[email protected]9c0b1352012-11-04 00:03:27390 session_->CloseStream(id());
391 }
392}
393
[email protected]63534512012-12-23 18:49:00394void ReliableQuicStream::OnClose() {
[email protected]a57e0272013-04-26 07:31:47395 CloseReadSide();
396 CloseWriteSide();
397
[email protected]63534512012-12-23 18:49:00398 if (visitor_) {
399 Visitor* visitor = visitor_;
400 // Calling Visitor::OnClose() may result the destruction of the visitor,
401 // so we need to ensure we don't call it again.
402 visitor_ = NULL;
403 visitor->OnClose(this);
404 }
405}
406
[email protected]9c0b1352012-11-04 00:03:27407} // namespace net