blob: 1530148869f665c84ddb009f85883078fd1cc881 [file] [log] [blame]
[email protected]2a172e42014-02-21 04:06:101// Copyright 2014 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "components/rappor/log_uploader.h"
6
avif57136c12015-12-25 23:27:457#include <stddef.h>
8#include <stdint.h>
dcheng51ace48a2015-12-26 22:45:179#include <utility>
avif57136c12015-12-25 23:27:4510
asvitkine454600f2015-06-16 16:34:5011#include "base/metrics/histogram_macros.h"
[email protected]2a172e42014-02-21 04:06:1012#include "base/metrics/sparse_histogram.h"
amohammadkhanf76ae112015-09-14 17:34:4313#include "components/data_use_measurement/core/data_use_user_data.h"
[email protected]2a172e42014-02-21 04:06:1014#include "net/base/load_flags.h"
[email protected]f30f4832014-05-07 15:29:5015#include "net/base/net_errors.h"
rhalavati20a32db2017-03-08 11:02:4916#include "net/traffic_annotation/network_traffic_annotation.h"
[email protected]2a172e42014-02-21 04:06:1017#include "net/url_request/url_fetcher.h"
18
19namespace {
20
21// The delay, in seconds, between uploading when there are queued logs to send.
22const int kUnsentLogsIntervalSeconds = 3;
23
24// When uploading metrics to the server fails, we progressively wait longer and
25// longer before sending the next log. This backoff process helps reduce load
26// on a server that is having issues.
27// The following is the multiplier we use to expand that inter-log duration.
28const double kBackoffMultiplier = 1.1;
29
30// The maximum backoff multiplier.
31const int kMaxBackoffIntervalSeconds = 60 * 60;
32
33// The maximum number of unsent logs we will keep.
34// TODO(holte): Limit based on log size instead.
35const size_t kMaxQueuedLogs = 10;
36
37enum DiscardReason {
38 UPLOAD_SUCCESS,
39 UPLOAD_REJECTED,
40 QUEUE_OVERFLOW,
41 NUM_DISCARD_REASONS
42};
43
holte5a7ed7c2015-01-09 23:52:4644void RecordDiscardReason(DiscardReason reason) {
45 UMA_HISTOGRAM_ENUMERATION("Rappor.DiscardReason",
46 reason,
47 NUM_DISCARD_REASONS);
48}
49
[email protected]2a172e42014-02-21 04:06:1050} // namespace
51
52namespace rappor {
53
54LogUploader::LogUploader(const GURL& server_url,
55 const std::string& mime_type,
56 net::URLRequestContextGetter* request_context)
57 : server_url_(server_url),
58 mime_type_(mime_type),
59 request_context_(request_context),
holte5a7ed7c2015-01-09 23:52:4660 is_running_(false),
[email protected]2a172e42014-02-21 04:06:1061 has_callback_pending_(false),
62 upload_interval_(base::TimeDelta::FromSeconds(
63 kUnsentLogsIntervalSeconds)) {
64}
65
66LogUploader::~LogUploader() {}
67
holte5a7ed7c2015-01-09 23:52:4668void LogUploader::Start() {
69 is_running_ = true;
70 StartScheduledUpload();
71}
72
73void LogUploader::Stop() {
74 is_running_ = false;
75 // Rather than interrupting the current upload, just let it finish/fail and
76 // then inhibit any retry attempts.
77}
78
[email protected]2a172e42014-02-21 04:06:1079void LogUploader::QueueLog(const std::string& log) {
80 queued_logs_.push(log);
holte5a7ed7c2015-01-09 23:52:4681 // Don't drop logs yet if an upload is in progress. They will be dropped
82 // when it finishes.
83 if (!has_callback_pending_)
84 DropExcessLogs();
85 StartScheduledUpload();
86}
87
88void LogUploader::DropExcessLogs() {
89 while (queued_logs_.size() > kMaxQueuedLogs) {
90 DVLOG(2) << "Dropping excess log.";
91 RecordDiscardReason(QUEUE_OVERFLOW);
92 queued_logs_.pop();
93 }
[email protected]2a172e42014-02-21 04:06:1094}
95
96bool LogUploader::IsUploadScheduled() const {
97 return upload_timer_.IsRunning();
98}
99
100void LogUploader::ScheduleNextUpload(base::TimeDelta interval) {
[email protected]2a172e42014-02-21 04:06:10101 upload_timer_.Start(
102 FROM_HERE, interval, this, &LogUploader::StartScheduledUpload);
103}
104
holte5a7ed7c2015-01-09 23:52:46105bool LogUploader::CanStartUpload() const {
106 return is_running_ &&
107 !queued_logs_.empty() &&
108 !IsUploadScheduled() &&
109 !has_callback_pending_;
110}
111
[email protected]2a172e42014-02-21 04:06:10112void LogUploader::StartScheduledUpload() {
holte5a7ed7c2015-01-09 23:52:46113 if (!CanStartUpload())
114 return;
115 DVLOG(2) << "Upload to " << server_url_.spec() << " starting.";
[email protected]2a172e42014-02-21 04:06:10116 has_callback_pending_ = true;
rhalavati20a32db2017-03-08 11:02:49117 net::NetworkTrafficAnnotationTag traffic_annotation =
118 net::DefineNetworkTrafficAnnotation("rappor_report", R"(
119 semantics {
120 sender: "RAPPOR"
121 description:
122 "This service sends RAPPOR anonymous usage statistics to Google."
123 trigger:
124 "Reports are automatically generated on startup and at intervals "
125 "while Chromium is running."
126 data: "A protocol buffer with RAPPOR anonymous usage statistics."
127 destination: GOOGLE_OWNED_SERVICE
128 }
129 policy {
Ramin Halavati3b979782017-07-21 11:40:26130 cookies_allowed: NO
rhalavati20a32db2017-03-08 11:02:49131 setting:
132 "Users can enable or disable this feature by stopping "
133 "'Automatically send usage statistics and crash reports to Google'"
134 "in Chromium's settings under Advanced Settings, Privacy. The "
135 "feature is enabled by default."
rhalavatieaa64e92017-04-03 09:36:43136 chrome_policy {
rhalavati20a32db2017-03-08 11:02:49137 MetricsReportingEnabled {
138 policy_options {mode: MANDATORY}
139 MetricsReportingEnabled: false
140 }
141 }
142 })");
143 current_fetch_ = net::URLFetcher::Create(server_url_, net::URLFetcher::POST,
144 this, traffic_annotation);
amohammadkhanf76ae112015-09-14 17:34:43145 data_use_measurement::DataUseUserData::AttachToFetcher(
146 current_fetch_.get(), data_use_measurement::DataUseUserData::RAPPOR);
[email protected]2a172e42014-02-21 04:06:10147 current_fetch_->SetRequestContext(request_context_.get());
148 current_fetch_->SetUploadData(mime_type_, queued_logs_.front());
149
150 // We already drop cookies server-side, but we might as well strip them out
151 // client-side as well.
152 current_fetch_->SetLoadFlags(net::LOAD_DO_NOT_SAVE_COOKIES |
153 net::LOAD_DO_NOT_SEND_COOKIES);
154 current_fetch_->Start();
155}
156
157// static
158base::TimeDelta LogUploader::BackOffUploadInterval(base::TimeDelta interval) {
159 DCHECK_GT(kBackoffMultiplier, 1.0);
avif57136c12015-12-25 23:27:45160 interval = base::TimeDelta::FromMicroseconds(
161 static_cast<int64_t>(kBackoffMultiplier * interval.InMicroseconds()));
[email protected]2a172e42014-02-21 04:06:10162
163 base::TimeDelta max_interval =
164 base::TimeDelta::FromSeconds(kMaxBackoffIntervalSeconds);
165 return interval > max_interval ? max_interval : interval;
166}
167
168void LogUploader::OnURLFetchComplete(const net::URLFetcher* source) {
169 // We're not allowed to re-use the existing |URLFetcher|s, so free them here.
170 // Note however that |source| is aliased to the fetcher, so we should be
171 // careful not to delete it too early.
172 DCHECK_EQ(current_fetch_.get(), source);
dcheng82beb4f2016-04-26 00:35:02173 std::unique_ptr<net::URLFetcher> fetch(std::move(current_fetch_));
[email protected]2a172e42014-02-21 04:06:10174
[email protected]f30f4832014-05-07 15:29:50175 const net::URLRequestStatus& request_status = source->GetStatus();
176
[email protected]ccb49262014-03-26 04:10:17177 const int response_code = source->GetResponseCode();
holte5a7ed7c2015-01-09 23:52:46178 DVLOG(2) << "Upload fetch complete response code: " << response_code;
[email protected]2a172e42014-02-21 04:06:10179
[email protected]f30f4832014-05-07 15:29:50180 if (request_status.status() != net::URLRequestStatus::SUCCESS) {
181 UMA_HISTOGRAM_SPARSE_SLOWLY("Rappor.FailedUploadErrorCode",
182 -request_status.error());
183 DVLOG(1) << "Rappor server upload failed with error: "
184 << request_status.error() << ": "
185 << net::ErrorToString(request_status.error());
186 DCHECK_EQ(-1, response_code);
187 } else {
188 // Log a histogram to track response success vs. failure rates.
189 UMA_HISTOGRAM_SPARSE_SLOWLY("Rappor.UploadResponseCode", response_code);
190 }
[email protected]2a172e42014-02-21 04:06:10191
[email protected]ccb49262014-03-26 04:10:17192 const bool upload_succeeded = response_code == 200;
[email protected]2a172e42014-02-21 04:06:10193
194 // Determine whether this log should be retransmitted.
195 DiscardReason reason = NUM_DISCARD_REASONS;
196 if (upload_succeeded) {
197 reason = UPLOAD_SUCCESS;
198 } else if (response_code == 400) {
199 reason = UPLOAD_REJECTED;
[email protected]2a172e42014-02-21 04:06:10200 }
201
202 if (reason != NUM_DISCARD_REASONS) {
holte5a7ed7c2015-01-09 23:52:46203 DVLOG(2) << "Log discarded.";
204 RecordDiscardReason(reason);
[email protected]2a172e42014-02-21 04:06:10205 queued_logs_.pop();
206 }
207
holte5a7ed7c2015-01-09 23:52:46208 DropExcessLogs();
209
[email protected]2a172e42014-02-21 04:06:10210 // Error 400 indicates a problem with the log, not with the server, so
211 // don't consider that a sign that the server is in trouble.
[email protected]ccb49262014-03-26 04:10:17212 const bool server_is_healthy = upload_succeeded || response_code == 400;
holte5a7ed7c2015-01-09 23:52:46213 OnUploadFinished(server_is_healthy);
[email protected]2a172e42014-02-21 04:06:10214}
215
holte5a7ed7c2015-01-09 23:52:46216void LogUploader::OnUploadFinished(bool server_is_healthy) {
[email protected]2a172e42014-02-21 04:06:10217 DCHECK(has_callback_pending_);
218 has_callback_pending_ = false;
219 // If the server is having issues, back off. Otherwise, reset to default.
220 if (!server_is_healthy)
221 upload_interval_ = BackOffUploadInterval(upload_interval_);
222 else
223 upload_interval_ = base::TimeDelta::FromSeconds(kUnsentLogsIntervalSeconds);
224
holte5a7ed7c2015-01-09 23:52:46225 if (CanStartUpload())
[email protected]2a172e42014-02-21 04:06:10226 ScheduleNextUpload(upload_interval_);
227}
228
229} // namespace rappor