blob: 158fb94cc881b6f5605d3ca5c9dcdd6e8e62ed49 [file] [log] [blame]
[email protected]9fc44162012-01-23 22:56:411// Copyright (c) 2012 The Chromium Authors. All rights reserved.
license.botbf09a502008-08-24 00:55:552// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
initial.commitd7cae122008-07-26 21:49:384
5#include "base/tracked_objects.h"
6
fdorayf607a842016-12-06 21:44:487#include <ctype.h>
[email protected]c014f2b32013-09-03 23:29:128#include <limits.h>
[email protected]7f8a4eb2012-03-19 21:46:279#include <stdlib.h>
[email protected]a5b94a92008-08-12 23:25:4310
[email protected]7caab6dc2013-12-12 19:29:1011#include "base/atomicops.h"
[email protected]915b344f2013-12-11 12:49:1712#include "base/base_switches.h"
13#include "base/command_line.h"
[email protected]75086be2013-03-20 21:18:2214#include "base/compiler_specific.h"
[email protected]bf709abd2013-06-10 11:32:2015#include "base/debug/leak_annotations.h"
[email protected]c014f2b32013-09-03 23:29:1216#include "base/logging.h"
fdorayf607a842016-12-06 21:44:4817#include "base/metrics/histogram_macros.h"
siggide38d0c2016-12-02 20:04:2118#include "base/numerics/safe_conversions.h"
19#include "base/numerics/safe_math.h"
[email protected]dd4b51262013-07-25 21:38:2320#include "base/process/process_handle.h"
[email protected]a0447ff2011-12-04 21:14:0521#include "base/third_party/valgrind/memcheck.h"
zhenyu.shana55ed002016-06-07 21:05:3422#include "base/threading/worker_pool.h"
[email protected]c014f2b32013-09-03 23:29:1223#include "base/tracking_info.h"
avi9b6f42932015-12-26 22:15:1424#include "build/build_config.h"
initial.commitd7cae122008-07-26 21:49:3825
[email protected]e1acf6f2008-10-27 20:43:3326using base::TimeDelta;
27
[email protected]c014f2b32013-09-03 23:29:1228namespace base {
29class TimeDelta;
30}
31
initial.commitd7cae122008-07-26 21:49:3832namespace tracked_objects {
33
[email protected]b2a9bbd2011-10-31 22:36:2134namespace {
fdorayf607a842016-12-06 21:44:4835
36constexpr char kWorkerThreadSanitizedName[] = "WorkerThread-*";
37
[email protected]b2a9bbd2011-10-31 22:36:2138// When ThreadData is first initialized, should we start in an ACTIVE state to
39// record all of the startup-time tasks, or should we start up DEACTIVATED, so
40// that we only record after parsing the command line flag --enable-tracking.
41// Note that the flag may force either state, so this really controls only the
vadimte2de4732015-04-27 21:43:0242// period of time up until that flag is parsed. If there is no flag seen, then
[email protected]b2a9bbd2011-10-31 22:36:2143// this state may prevail for much or all of the process lifetime.
vadimtcf8983e2015-05-01 19:13:0144const ThreadData::Status kInitialStartupState = ThreadData::PROFILING_ACTIVE;
[email protected]da9ccfb2012-01-28 00:34:4045
vadimta1568312014-11-06 22:27:4346// Possible states of the profiler timing enabledness.
47enum {
48 UNDEFINED_TIMING,
49 ENABLED_TIMING,
50 DISABLED_TIMING,
51};
52
53// State of the profiler timing enabledness.
54base::subtle::Atomic32 g_profiler_timing_enabled = UNDEFINED_TIMING;
55
vadimte2de4732015-04-27 21:43:0256// Returns whether profiler timing is enabled. The default is true, but this
57// may be overridden by a command-line flag. Some platforms may
58// programmatically set this command-line flag to the "off" value if it's not
59// specified.
vadimta1568312014-11-06 22:27:4360// This in turn can be overridden by explicitly calling
61// ThreadData::EnableProfilerTiming, say, based on a field trial.
[email protected]915b344f2013-12-11 12:49:1762inline bool IsProfilerTimingEnabled() {
vadimta1568312014-11-06 22:27:4363 // Reading |g_profiler_timing_enabled| is done without barrier because
64 // multiple initialization is not an issue while the barrier can be relatively
65 // costly given that this method is sometimes called in a tight loop.
[email protected]7caab6dc2013-12-12 19:29:1066 base::subtle::Atomic32 current_timing_enabled =
vadimta1568312014-11-06 22:27:4367 base::subtle::NoBarrier_Load(&g_profiler_timing_enabled);
[email protected]7caab6dc2013-12-12 19:29:1068 if (current_timing_enabled == UNDEFINED_TIMING) {
pgal.u-szeged421dddb2014-11-25 12:55:0269 if (!base::CommandLine::InitializedForCurrentProcess())
[email protected]915b344f2013-12-11 12:49:1770 return true;
[email protected]7caab6dc2013-12-12 19:29:1071 current_timing_enabled =
pgal.u-szeged421dddb2014-11-25 12:55:0272 (base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
[email protected]7caab6dc2013-12-12 19:29:1073 switches::kProfilerTiming) ==
74 switches::kProfilerTimingDisabledValue)
75 ? DISABLED_TIMING
76 : ENABLED_TIMING;
vadimta1568312014-11-06 22:27:4377 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled,
78 current_timing_enabled);
[email protected]915b344f2013-12-11 12:49:1779 }
[email protected]7caab6dc2013-12-12 19:29:1080 return current_timing_enabled == ENABLED_TIMING;
[email protected]915b344f2013-12-11 12:49:1781}
82
fdorayf607a842016-12-06 21:44:4883// Sanitize a thread name by replacing trailing sequence of digits with "*".
84// Examples:
85// 1. "BrowserBlockingWorker1/23857" => "BrowserBlockingWorker1/*"
86// 2. "Chrome_IOThread" => "Chrome_IOThread"
87std::string SanitizeThreadName(const std::string& thread_name) {
88 size_t i = thread_name.length();
89
90 while (i > 0 && isdigit(thread_name[i - 1]))
91 --i;
92
93 if (i == thread_name.length())
94 return thread_name;
95
96 return thread_name.substr(0, i) + '*';
97}
98
[email protected]8aa1e6e2011-12-14 01:36:4899} // namespace
[email protected]84b57952011-10-15 23:52:45100
initial.commitd7cae122008-07-26 21:49:38101//------------------------------------------------------------------------------
[email protected]63f5b0e2011-11-04 00:23:27102// DeathData tallies durations when a death takes place.
initial.commitd7cae122008-07-26 21:49:38103
vadimte2de4732015-04-27 21:43:02104DeathData::DeathData()
105 : count_(0),
106 sample_probability_count_(0),
107 run_duration_sum_(0),
108 queue_duration_sum_(0),
109 run_duration_max_(0),
110 queue_duration_max_(0),
siggide38d0c2016-12-02 20:04:21111 alloc_ops_(0),
112 free_ops_(0),
113 allocated_bytes_(0),
114 freed_bytes_(0),
115 alloc_overhead_bytes_(0),
116 max_allocated_bytes_(0),
vadimte2de4732015-04-27 21:43:02117 run_duration_sample_(0),
118 queue_duration_sample_(0),
siggide38d0c2016-12-02 20:04:21119 last_phase_snapshot_(nullptr) {}
[email protected]b6b2b892011-12-04 07:19:10120
vadimte2de4732015-04-27 21:43:02121DeathData::DeathData(const DeathData& other)
122 : count_(other.count_),
123 sample_probability_count_(other.sample_probability_count_),
124 run_duration_sum_(other.run_duration_sum_),
125 queue_duration_sum_(other.queue_duration_sum_),
126 run_duration_max_(other.run_duration_max_),
127 queue_duration_max_(other.queue_duration_max_),
siggide38d0c2016-12-02 20:04:21128 alloc_ops_(other.alloc_ops_),
129 free_ops_(other.free_ops_),
130 allocated_bytes_(other.allocated_bytes_),
131 freed_bytes_(other.freed_bytes_),
132 alloc_overhead_bytes_(other.alloc_overhead_bytes_),
133 max_allocated_bytes_(other.max_allocated_bytes_),
vadimte2de4732015-04-27 21:43:02134 run_duration_sample_(other.run_duration_sample_),
135 queue_duration_sample_(other.queue_duration_sample_),
136 last_phase_snapshot_(nullptr) {
137 // This constructor will be used by std::map when adding new DeathData values
138 // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't
139 // need to worry about ownership transfer.
140 DCHECK(other.last_phase_snapshot_ == nullptr);
141}
142
143DeathData::~DeathData() {
144 while (last_phase_snapshot_) {
145 const DeathDataPhaseSnapshot* snapshot = last_phase_snapshot_;
146 last_phase_snapshot_ = snapshot->prev;
147 delete snapshot;
148 }
[email protected]b6b2b892011-12-04 07:19:10149}
150
[email protected]7ceb44482011-12-09 03:41:04151// TODO(jar): I need to see if this macro to optimize branching is worth using.
[email protected]b6b2b892011-12-04 07:19:10152//
153// This macro has no branching, so it is surely fast, and is equivalent to:
154// if (assign_it)
155// target = source;
156// We use a macro rather than a template to force this to inline.
157// Related code for calculating max is discussed on the web.
158#define CONDITIONAL_ASSIGN(assign_it, target, source) \
avi9b6f42932015-12-26 22:15:14159 ((target) ^= ((target) ^ (source)) & -static_cast<int32_t>(assign_it))
[email protected]b6b2b892011-12-04 07:19:10160
siggide38d0c2016-12-02 20:04:21161void DeathData::RecordDurations(const int32_t queue_duration,
162 const int32_t run_duration,
163 const uint32_t random_number) {
[email protected]59b15da2013-02-28 04:15:43164 // We'll just clamp at INT_MAX, but we should note this in the UI as such.
165 if (count_ < INT_MAX)
amistry7fd0f762016-01-13 02:18:18166 base::subtle::NoBarrier_Store(&count_, count_ + 1);
vadimte2de4732015-04-27 21:43:02167
amistry7fd0f762016-01-13 02:18:18168 int sample_probability_count =
169 base::subtle::NoBarrier_Load(&sample_probability_count_);
vadimte2de4732015-04-27 21:43:02170 if (sample_probability_count < INT_MAX)
171 ++sample_probability_count;
amistry7fd0f762016-01-13 02:18:18172 base::subtle::NoBarrier_Store(&sample_probability_count_,
173 sample_probability_count);
vadimte2de4732015-04-27 21:43:02174
amistry7fd0f762016-01-13 02:18:18175 base::subtle::NoBarrier_Store(&queue_duration_sum_,
176 queue_duration_sum_ + queue_duration);
177 base::subtle::NoBarrier_Store(&run_duration_sum_,
178 run_duration_sum_ + run_duration);
[email protected]7ceb44482011-12-09 03:41:04179
amistry7fd0f762016-01-13 02:18:18180 if (queue_duration_max() < queue_duration)
181 base::subtle::NoBarrier_Store(&queue_duration_max_, queue_duration);
182 if (run_duration_max() < run_duration)
183 base::subtle::NoBarrier_Store(&run_duration_max_, run_duration);
[email protected]b6b2b892011-12-04 07:19:10184
vadimte2de4732015-04-27 21:43:02185 // Take a uniformly distributed sample over all durations ever supplied during
186 // the current profiling phase.
187 // The probability that we (instead) use this new sample is
188 // 1/sample_probability_count_. This results in a completely uniform selection
189 // of the sample (at least when we don't clamp sample_probability_count_...
190 // but that should be inconsequentially likely). We ignore the fact that we
191 // correlated our selection of a sample to the run and queue times (i.e., we
192 // used them to generate random_number).
193 CHECK_GT(sample_probability_count, 0);
194 if (0 == (random_number % sample_probability_count)) {
amistry7fd0f762016-01-13 02:18:18195 base::subtle::NoBarrier_Store(&queue_duration_sample_, queue_duration);
196 base::subtle::NoBarrier_Store(&run_duration_sample_, run_duration);
[email protected]7ceb44482011-12-09 03:41:04197 }
initial.commitd7cae122008-07-26 21:49:38198}
199
siggide38d0c2016-12-02 20:04:21200void DeathData::RecordAllocations(const uint32_t alloc_ops,
201 const uint32_t free_ops,
202 const uint32_t allocated_bytes,
203 const uint32_t freed_bytes,
204 const uint32_t alloc_overhead_bytes,
205 const uint32_t max_allocated_bytes) {
206 // Use saturating arithmetic.
207 SaturatingMemberAdd(alloc_ops, &alloc_ops_);
208 SaturatingMemberAdd(free_ops, &free_ops_);
209 SaturatingMemberAdd(allocated_bytes, &allocated_bytes_);
210 SaturatingMemberAdd(freed_bytes, &freed_bytes_);
211 SaturatingMemberAdd(alloc_overhead_bytes, &alloc_overhead_bytes_);
212
213 int32_t max = base::saturated_cast<int32_t>(max_allocated_bytes);
214 if (max > max_allocated_bytes_)
215 base::subtle::NoBarrier_Store(&max_allocated_bytes_, max);
216}
217
vadimte2de4732015-04-27 21:43:02218void DeathData::OnProfilingPhaseCompleted(int profiling_phase) {
219 // Snapshotting and storing current state.
siggide38d0c2016-12-02 20:04:21220 last_phase_snapshot_ =
221 new DeathDataPhaseSnapshot(profiling_phase, *this, last_phase_snapshot_);
vadimte2de4732015-04-27 21:43:02222
223 // Not touching fields for which a delta can be computed by comparing with a
224 // snapshot from the previous phase. Resetting other fields. Sample values
225 // will be reset upon next death recording because sample_probability_count_
226 // is set to 0.
227 // We avoid resetting to 0 in favor of deltas whenever possible. The reason
228 // is that for incrementable fields, resetting to 0 from the snapshot thread
229 // potentially in parallel with incrementing in the death thread may result in
230 // significant data corruption that has a potential to grow with time. Not
231 // resetting incrementable fields and using deltas will cause any
232 // off-by-little corruptions to be likely fixed at the next snapshot.
233 // The max values are not incrementable, and cannot be deduced using deltas
234 // for a given phase. Hence, we have to reset them to 0. But the potential
235 // damage is limited to getting the previous phase's max to apply for the next
236 // phase, and the error doesn't have a potential to keep growing with new
237 // resets.
238 // sample_probability_count_ is incrementable, but must be reset to 0 at the
239 // phase end, so that we start a new uniformly randomized sample selection
amistry7fd0f762016-01-13 02:18:18240 // after the reset. These fields are updated using atomics. However, race
241 // conditions are possible since these are updated individually and not
242 // together atomically, resulting in the values being mutually inconsistent.
243 // The damage is limited to selecting a wrong sample, which is not something
244 // that can cause accumulating or cascading effects.
245 // If there were no inconsistencies caused by race conditions, we never send a
vadimte2de4732015-04-27 21:43:02246 // sample for the previous phase in the next phase's snapshot because
247 // ThreadData::SnapshotExecutedTasks doesn't send deltas with 0 count.
amistry7fd0f762016-01-13 02:18:18248 base::subtle::NoBarrier_Store(&sample_probability_count_, 0);
249 base::subtle::NoBarrier_Store(&run_duration_max_, 0);
250 base::subtle::NoBarrier_Store(&queue_duration_max_, 0);
initial.commitd7cae122008-07-26 21:49:38251}
252
siggide38d0c2016-12-02 20:04:21253void DeathData::SaturatingMemberAdd(const uint32_t addend,
254 base::subtle::Atomic32* sum) {
255 // Bail quick if no work or already saturated.
256 if (addend == 0U || *sum == INT_MAX)
257 return;
258
259 base::CheckedNumeric<int32_t> new_sum = *sum;
260 new_sum += addend;
261 base::subtle::NoBarrier_Store(sum, new_sum.ValueOrDefault(INT_MAX));
262}
263
initial.commitd7cae122008-07-26 21:49:38264//------------------------------------------------------------------------------
[email protected]1cb05db2012-04-13 00:39:26265DeathDataSnapshot::DeathDataSnapshot()
266 : count(-1),
267 run_duration_sum(-1),
268 run_duration_max(-1),
269 run_duration_sample(-1),
270 queue_duration_sum(-1),
271 queue_duration_max(-1),
siggide38d0c2016-12-02 20:04:21272 queue_duration_sample(-1),
273 alloc_ops(-1),
274 free_ops(-1),
275 allocated_bytes(-1),
276 freed_bytes(-1),
277 alloc_overhead_bytes(-1),
278 max_allocated_bytes(-1) {}
[email protected]1cb05db2012-04-13 00:39:26279
vadimte2de4732015-04-27 21:43:02280DeathDataSnapshot::DeathDataSnapshot(int count,
avi9b6f42932015-12-26 22:15:14281 int32_t run_duration_sum,
282 int32_t run_duration_max,
283 int32_t run_duration_sample,
284 int32_t queue_duration_sum,
285 int32_t queue_duration_max,
siggide38d0c2016-12-02 20:04:21286 int32_t queue_duration_sample,
287 int32_t alloc_ops,
288 int32_t free_ops,
289 int32_t allocated_bytes,
290 int32_t freed_bytes,
291 int32_t alloc_overhead_bytes,
292 int32_t max_allocated_bytes)
vadimte2de4732015-04-27 21:43:02293 : count(count),
294 run_duration_sum(run_duration_sum),
295 run_duration_max(run_duration_max),
296 run_duration_sample(run_duration_sample),
297 queue_duration_sum(queue_duration_sum),
298 queue_duration_max(queue_duration_max),
siggide38d0c2016-12-02 20:04:21299 queue_duration_sample(queue_duration_sample),
300 alloc_ops(alloc_ops),
301 free_ops(free_ops),
302 allocated_bytes(allocated_bytes),
303 freed_bytes(freed_bytes),
304 alloc_overhead_bytes(alloc_overhead_bytes),
305 max_allocated_bytes(max_allocated_bytes) {}
306
307DeathDataSnapshot::DeathDataSnapshot(const DeathData& death_data)
308 : count(death_data.count()),
309 run_duration_sum(death_data.run_duration_sum()),
310 run_duration_max(death_data.run_duration_max()),
311 run_duration_sample(death_data.run_duration_sample()),
312 queue_duration_sum(death_data.queue_duration_sum()),
313 queue_duration_max(death_data.queue_duration_max()),
314 queue_duration_sample(death_data.queue_duration_sample()),
315 alloc_ops(death_data.alloc_ops()),
316 free_ops(death_data.free_ops()),
317 allocated_bytes(death_data.allocated_bytes()),
318 freed_bytes(death_data.freed_bytes()),
319 alloc_overhead_bytes(death_data.alloc_overhead_bytes()),
320 max_allocated_bytes(death_data.max_allocated_bytes()) {}
321
322DeathDataSnapshot::DeathDataSnapshot(const DeathDataSnapshot& death_data) =
323 default;
[email protected]1cb05db2012-04-13 00:39:26324
325DeathDataSnapshot::~DeathDataSnapshot() {
326}
327
vadimte2de4732015-04-27 21:43:02328DeathDataSnapshot DeathDataSnapshot::Delta(
329 const DeathDataSnapshot& older) const {
siggide38d0c2016-12-02 20:04:21330 return DeathDataSnapshot(
331 count - older.count, run_duration_sum - older.run_duration_sum,
332 run_duration_max, run_duration_sample,
333 queue_duration_sum - older.queue_duration_sum, queue_duration_max,
334 queue_duration_sample, alloc_ops - older.alloc_ops,
335 free_ops - older.free_ops, allocated_bytes - older.allocated_bytes,
336 freed_bytes - older.freed_bytes,
337 alloc_overhead_bytes - older.alloc_overhead_bytes, max_allocated_bytes);
vadimte2de4732015-04-27 21:43:02338}
339
[email protected]1cb05db2012-04-13 00:39:26340//------------------------------------------------------------------------------
[email protected]84baeca2011-10-24 18:55:16341BirthOnThread::BirthOnThread(const Location& location,
342 const ThreadData& current)
initial.commitd7cae122008-07-26 21:49:38343 : location_(location),
[email protected]b6b2b892011-12-04 07:19:10344 birth_thread_(&current) {
345}
346
[email protected]1cb05db2012-04-13 00:39:26347//------------------------------------------------------------------------------
348BirthOnThreadSnapshot::BirthOnThreadSnapshot() {
349}
initial.commitd7cae122008-07-26 21:49:38350
vadimte2de4732015-04-27 21:43:02351BirthOnThreadSnapshot::BirthOnThreadSnapshot(const BirthOnThread& birth)
[email protected]1cb05db2012-04-13 00:39:26352 : location(birth.location()),
fdorayf607a842016-12-06 21:44:48353 sanitized_thread_name(birth.birth_thread()->sanitized_thread_name()) {}
[email protected]1cb05db2012-04-13 00:39:26354
355BirthOnThreadSnapshot::~BirthOnThreadSnapshot() {
[email protected]8aa1e6e2011-12-14 01:36:48356}
357
initial.commitd7cae122008-07-26 21:49:38358//------------------------------------------------------------------------------
[email protected]84baeca2011-10-24 18:55:16359Births::Births(const Location& location, const ThreadData& current)
360 : BirthOnThread(location, current),
[email protected]75b79202009-12-30 07:31:45361 birth_count_(1) { }
initial.commitd7cae122008-07-26 21:49:38362
[email protected]b6b2b892011-12-04 07:19:10363int Births::birth_count() const { return birth_count_; }
364
365void Births::RecordBirth() { ++birth_count_; }
366
initial.commitd7cae122008-07-26 21:49:38367//------------------------------------------------------------------------------
[email protected]b6b2b892011-12-04 07:19:10368// ThreadData maintains the central data for all births and deaths on a single
369// thread.
initial.commitd7cae122008-07-26 21:49:38370
[email protected]b2a9bbd2011-10-31 22:36:21371// TODO(jar): We should pull all these static vars together, into a struct, and
372// optimize layout so that we benefit from locality of reference during accesses
373// to them.
374
[email protected]90895d0f2012-02-15 23:05:01375// static
primianobc5681f52016-02-03 18:53:11376ThreadData::NowFunction* ThreadData::now_function_for_testing_ = NULL;
vadimt12f0f7d2014-09-15 19:19:38377
vadimte2de4732015-04-27 21:43:02378// A TLS slot which points to the ThreadData instance for the current thread.
379// We do a fake initialization here (zeroing out data), and then the real
380// in-place construction happens when we call tls_index_.Initialize().
[email protected]b2a9bbd2011-10-31 22:36:21381// static
[email protected]444b8a3c2012-01-30 16:52:09382base::ThreadLocalStorage::StaticSlot ThreadData::tls_index_ = TLS_INITIALIZER;
[email protected]b2a9bbd2011-10-31 22:36:21383
[email protected]b2a9bbd2011-10-31 22:36:21384// static
[email protected]9a88c902011-11-24 00:00:31385int ThreadData::cleanup_count_ = 0;
[email protected]b2a9bbd2011-10-31 22:36:21386
387// static
388int ThreadData::incarnation_counter_ = 0;
389
initial.commitd7cae122008-07-26 21:49:38390// static
[email protected]84baeca2011-10-24 18:55:16391ThreadData* ThreadData::all_thread_data_list_head_ = NULL;
392
393// static
fdorayf607a842016-12-06 21:44:48394ThreadData* ThreadData::first_retired_thread_data_ = NULL;
[email protected]84baeca2011-10-24 18:55:16395
initial.commitd7cae122008-07-26 21:49:38396// static
[email protected]9fc44162012-01-23 22:56:41397base::LazyInstance<base::Lock>::Leaky
[email protected]6de0fd1d2011-11-15 13:31:49398 ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER;
initial.commitd7cae122008-07-26 21:49:38399
400// static
amistry42d16882015-07-17 03:58:06401base::subtle::Atomic32 ThreadData::status_ = ThreadData::UNINITIALIZED;
initial.commitd7cae122008-07-26 21:49:38402
fdorayf607a842016-12-06 21:44:48403ThreadData::ThreadData(const std::string& sanitized_thread_name)
[email protected]8aa1e6e2011-12-14 01:36:48404 : next_(NULL),
fdorayf607a842016-12-06 21:44:48405 next_retired_thread_data_(NULL),
406 sanitized_thread_name_(sanitized_thread_name),
vadimt12f0f7d2014-09-15 19:19:38407 incarnation_count_for_pool_(-1),
408 current_stopwatch_(NULL) {
fdorayf607a842016-12-06 21:44:48409 DCHECK(sanitized_thread_name_.empty() ||
410 !isdigit(sanitized_thread_name_.back()));
[email protected]63f5b0e2011-11-04 00:23:27411 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_.
[email protected]359d2bf2010-11-19 20:34:18412}
initial.commitd7cae122008-07-26 21:49:38413
vadimte2de4732015-04-27 21:43:02414ThreadData::~ThreadData() {
415}
[email protected]d4799a32010-09-28 22:54:58416
[email protected]84baeca2011-10-24 18:55:16417void ThreadData::PushToHeadOfList() {
[email protected]b6b2b892011-12-04 07:19:10418 // Toss in a hint of randomness (atop the uniniitalized value).
[email protected]ff5e9422011-12-05 15:24:28419 (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_,
[email protected]a0447ff2011-12-04 21:14:05420 sizeof(random_number_));
[email protected]75086be2013-03-20 21:18:22421 MSAN_UNPOISON(&random_number_, sizeof(random_number_));
avi9b6f42932015-12-26 22:15:14422 random_number_ += static_cast<uint32_t>(this - static_cast<ThreadData*>(0));
[email protected]b6b2b892011-12-04 07:19:10423 random_number_ ^= (Now() - TrackedTime()).InMilliseconds();
424
[email protected]84baeca2011-10-24 18:55:16425 DCHECK(!next_);
[email protected]77169a62011-11-14 20:36:46426 base::AutoLock lock(*list_lock_.Pointer());
[email protected]b2a9bbd2011-10-31 22:36:21427 incarnation_count_for_pool_ = incarnation_counter_;
[email protected]84baeca2011-10-24 18:55:16428 next_ = all_thread_data_list_head_;
429 all_thread_data_list_head_ = this;
430}
431
initial.commitd7cae122008-07-26 21:49:38432// static
[email protected]b6b2b892011-12-04 07:19:10433ThreadData* ThreadData::first() {
434 base::AutoLock lock(*list_lock_.Pointer());
435 return all_thread_data_list_head_;
436}
437
438ThreadData* ThreadData::next() const { return next_; }
439
440// static
fdorayf607a842016-12-06 21:44:48441void ThreadData::InitializeThreadContext(const std::string& thread_name) {
zhenyu.shana55ed002016-06-07 21:05:34442 if (base::WorkerPool::RunsTasksOnCurrentThread())
443 return;
fdorayf607a842016-12-06 21:44:48444 DCHECK_NE(thread_name, kWorkerThreadSanitizedName);
zhenyu.shana55ed002016-06-07 21:05:34445 EnsureTlsInitialization();
[email protected]b2a9bbd2011-10-31 22:36:21446 ThreadData* current_thread_data =
447 reinterpret_cast<ThreadData*>(tls_index_.Get());
448 if (current_thread_data)
449 return; // Browser tests instigate this.
fdorayf607a842016-12-06 21:44:48450 current_thread_data =
451 GetRetiredOrCreateThreadData(SanitizeThreadName(thread_name));
[email protected]84baeca2011-10-24 18:55:16452 tls_index_.Set(current_thread_data);
[email protected]84b57952011-10-15 23:52:45453}
initial.commitd7cae122008-07-26 21:49:38454
[email protected]84b57952011-10-15 23:52:45455// static
456ThreadData* ThreadData::Get() {
457 if (!tls_index_.initialized())
458 return NULL; // For unittests only.
[email protected]84baeca2011-10-24 18:55:16459 ThreadData* registered = reinterpret_cast<ThreadData*>(tls_index_.Get());
460 if (registered)
461 return registered;
462
463 // We must be a worker thread, since we didn't pre-register.
fdorayf607a842016-12-06 21:44:48464 ThreadData* worker_thread_data =
465 GetRetiredOrCreateThreadData(kWorkerThreadSanitizedName);
[email protected]84baeca2011-10-24 18:55:16466 tls_index_.Set(worker_thread_data);
467 return worker_thread_data;
[email protected]84b57952011-10-15 23:52:45468}
469
470// static
[email protected]84baeca2011-10-24 18:55:16471void ThreadData::OnThreadTermination(void* thread_data) {
[email protected]8aa1e6e2011-12-14 01:36:48472 DCHECK(thread_data); // TLS should *never* call us with a NULL.
vadimte2de4732015-04-27 21:43:02473 // We must NOT do any allocations during this callback. There is a chance
[email protected]9a88c902011-11-24 00:00:31474 // that the allocator is no longer active on this thread.
[email protected]84baeca2011-10-24 18:55:16475 reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup();
[email protected]84baeca2011-10-24 18:55:16476}
477
[email protected]26cdeb962011-11-20 04:17:07478void ThreadData::OnThreadTerminationCleanup() {
fdorayf607a842016-12-06 21:44:48479 // We must NOT do any allocations during this callback. There is a chance that
480 // the allocator is no longer active on this thread.
481
[email protected]9a88c902011-11-24 00:00:31482 // The list_lock_ was created when we registered the callback, so it won't be
483 // allocated here despite the lazy reference.
[email protected]77169a62011-11-14 20:36:46484 base::AutoLock lock(*list_lock_.Pointer());
[email protected]b2a9bbd2011-10-31 22:36:21485 if (incarnation_counter_ != incarnation_count_for_pool_)
486 return; // ThreadData was constructed in an earlier unit test.
[email protected]9a88c902011-11-24 00:00:31487 ++cleanup_count_;
fdorayf607a842016-12-06 21:44:48488
489 // Add this ThreadData to a retired list so that it can be reused by a thread
490 // with the same name sanitized name in the future.
491 // |next_retired_thread_data_| is expected to be nullptr for a ThreadData
492 // associated with an active thread.
493 DCHECK(!next_retired_thread_data_);
494 next_retired_thread_data_ = first_retired_thread_data_;
495 first_retired_thread_data_ = this;
initial.commitd7cae122008-07-26 21:49:38496}
497
initial.commitd7cae122008-07-26 21:49:38498// static
vadimte2de4732015-04-27 21:43:02499void ThreadData::Snapshot(int current_profiling_phase,
500 ProcessDataSnapshot* process_data_snapshot) {
501 // Get an unchanging copy of a ThreadData list.
502 ThreadData* my_list = ThreadData::first();
503
504 // Gather data serially.
505 // This hackish approach *can* get some slightly corrupt tallies, as we are
506 // grabbing values without the protection of a lock, but it has the advantage
507 // of working even with threads that don't have message loops. If a user
508 // sees any strangeness, they can always just run their stats gathering a
509 // second time.
510 BirthCountMap birth_counts;
511 for (ThreadData* thread_data = my_list; thread_data;
512 thread_data = thread_data->next()) {
513 thread_data->SnapshotExecutedTasks(current_profiling_phase,
514 &process_data_snapshot->phased_snapshots,
515 &birth_counts);
516 }
517
518 // Add births that are still active -- i.e. objects that have tallied a birth,
519 // but have not yet tallied a matching death, and hence must be either
520 // running, queued up, or being held in limbo for future posting.
521 auto* current_phase_tasks =
522 &process_data_snapshot->phased_snapshots[current_profiling_phase].tasks;
523 for (const auto& birth_count : birth_counts) {
524 if (birth_count.second > 0) {
525 current_phase_tasks->push_back(
526 TaskSnapshot(BirthOnThreadSnapshot(*birth_count.first),
siggide38d0c2016-12-02 20:04:21527 DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0,
528 0, 0, 0, 0, 0, 0),
vadimte2de4732015-04-27 21:43:02529 "Still_Alive"));
530 }
531 }
532}
533
534// static
535void ThreadData::OnProfilingPhaseCompleted(int profiling_phase) {
536 // Get an unchanging copy of a ThreadData list.
537 ThreadData* my_list = ThreadData::first();
538
539 // Add snapshots for all instances of death data in all threads serially.
540 // This hackish approach *can* get some slightly corrupt tallies, as we are
541 // grabbing values without the protection of a lock, but it has the advantage
542 // of working even with threads that don't have message loops. Any corruption
543 // shouldn't cause "cascading damage" to anything else (in later phases).
544 for (ThreadData* thread_data = my_list; thread_data;
545 thread_data = thread_data->next()) {
546 thread_data->OnProfilingPhaseCompletedOnThread(profiling_phase);
547 }
[email protected]84baeca2011-10-24 18:55:16548}
549
[email protected]75b79202009-12-30 07:31:45550Births* ThreadData::TallyABirth(const Location& location) {
initial.commitd7cae122008-07-26 21:49:38551 BirthMap::iterator it = birth_map_.find(location);
[email protected]8aa1e6e2011-12-14 01:36:48552 Births* child;
[email protected]75b79202009-12-30 07:31:45553 if (it != birth_map_.end()) {
[email protected]8aa1e6e2011-12-14 01:36:48554 child = it->second;
555 child->RecordBirth();
556 } else {
557 child = new Births(location, *this); // Leak this.
558 // Lock since the map may get relocated now, and other threads sometimes
559 // snapshot it (but they lock before copying it).
560 base::AutoLock lock(map_lock_);
561 birth_map_[location] = child;
[email protected]75b79202009-12-30 07:31:45562 }
initial.commitd7cae122008-07-26 21:49:38563
[email protected]8aa1e6e2011-12-14 01:36:48564 return child;
initial.commitd7cae122008-07-26 21:49:38565}
566
vadimte2de4732015-04-27 21:43:02567void ThreadData::TallyADeath(const Births& births,
avi9b6f42932015-12-26 22:15:14568 int32_t queue_duration,
vadimt12f0f7d2014-09-15 19:19:38569 const TaskStopwatch& stopwatch) {
avi9b6f42932015-12-26 22:15:14570 int32_t run_duration = stopwatch.RunDurationMs();
vadimt12f0f7d2014-09-15 19:19:38571
[email protected]b6b2b892011-12-04 07:19:10572 // Stir in some randomness, plus add constant in case durations are zero.
avi9b6f42932015-12-26 22:15:14573 const uint32_t kSomePrimeNumber = 2147483647;
[email protected]b6b2b892011-12-04 07:19:10574 random_number_ += queue_duration + run_duration + kSomePrimeNumber;
575 // An address is going to have some randomness to it as well ;-).
avi9b6f42932015-12-26 22:15:14576 random_number_ ^=
577 static_cast<uint32_t>(&births - reinterpret_cast<Births*>(0));
[email protected]b6b2b892011-12-04 07:19:10578
vadimte2de4732015-04-27 21:43:02579 DeathMap::iterator it = death_map_.find(&births);
[email protected]84baeca2011-10-24 18:55:16580 DeathData* death_data;
initial.commitd7cae122008-07-26 21:49:38581 if (it != death_map_.end()) {
[email protected]84baeca2011-10-24 18:55:16582 death_data = &it->second;
583 } else {
[email protected]9a88c902011-11-24 00:00:31584 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now.
vadimte2de4732015-04-27 21:43:02585 death_data = &death_map_[&births];
[email protected]84baeca2011-10-24 18:55:16586 } // Release lock ASAP.
siggide38d0c2016-12-02 20:04:21587 death_data->RecordDurations(queue_duration, run_duration, random_number_);
588
589#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
590 if (stopwatch.heap_tracking_enabled()) {
591 base::debug::ThreadHeapUsage heap_usage = stopwatch.heap_usage().usage();
592 // Saturate the 64 bit counts on conversion to 32 bit storage.
593 death_data->RecordAllocations(
594 base::saturated_cast<int32_t>(heap_usage.alloc_ops),
595 base::saturated_cast<int32_t>(heap_usage.free_ops),
596 base::saturated_cast<int32_t>(heap_usage.alloc_bytes),
597 base::saturated_cast<int32_t>(heap_usage.free_bytes),
598 base::saturated_cast<int32_t>(heap_usage.alloc_overhead_bytes),
599 base::saturated_cast<int32_t>(heap_usage.max_allocated_bytes));
600 }
601#endif
initial.commitd7cae122008-07-26 21:49:38602}
603
604// static
[email protected]180c85e2011-07-26 18:25:16605Births* ThreadData::TallyABirthIfActive(const Location& location) {
[email protected]702a12d2012-02-10 19:43:42606 if (!TrackingStatus())
[email protected]84b57952011-10-15 23:52:45607 return NULL;
608 ThreadData* current_thread_data = Get();
609 if (!current_thread_data)
610 return NULL;
611 return current_thread_data->TallyABirth(location);
[email protected]180c85e2011-07-26 18:25:16612}
613
614// static
[email protected]b2a9bbd2011-10-31 22:36:21615void ThreadData::TallyRunOnNamedThreadIfTracking(
616 const base::TrackingInfo& completed_task,
vadimt12f0f7d2014-09-15 19:19:38617 const TaskStopwatch& stopwatch) {
[email protected]b2a9bbd2011-10-31 22:36:21618 // Even if we have been DEACTIVATED, we will process any pending births so
619 // that our data structures (which counted the outstanding births) remain
620 // consistent.
vadimte2de4732015-04-27 21:43:02621 const Births* births = completed_task.birth_tally;
622 if (!births)
[email protected]84b57952011-10-15 23:52:45623 return;
vadimt12f0f7d2014-09-15 19:19:38624 ThreadData* current_thread_data = stopwatch.GetThreadData();
[email protected]84b57952011-10-15 23:52:45625 if (!current_thread_data)
626 return;
627
[email protected]b2a9bbd2011-10-31 22:36:21628 // Watch out for a race where status_ is changing, and hence one or both
[email protected]8aa1e6e2011-12-14 01:36:48629 // of start_of_run or end_of_run is zero. In that case, we didn't bother to
[email protected]b2a9bbd2011-10-31 22:36:21630 // get a time value since we "weren't tracking" and we were trying to be
vadimte2de4732015-04-27 21:43:02631 // efficient by not calling for a genuine time value. For simplicity, we'll
[email protected]b2a9bbd2011-10-31 22:36:21632 // use a default zero duration when we can't calculate a true value.
vadimt12f0f7d2014-09-15 19:19:38633 TrackedTime start_of_run = stopwatch.StartTime();
avi9b6f42932015-12-26 22:15:14634 int32_t queue_duration = 0;
[email protected]b2a9bbd2011-10-31 22:36:21635 if (!start_of_run.is_null()) {
[email protected]e1a38d602013-07-10 17:50:22636 queue_duration = (start_of_run - completed_task.EffectiveTimePosted())
637 .InMilliseconds();
[email protected]b2a9bbd2011-10-31 22:36:21638 }
vadimte2de4732015-04-27 21:43:02639 current_thread_data->TallyADeath(*births, queue_duration, stopwatch);
[email protected]b2a9bbd2011-10-31 22:36:21640}
641
642// static
643void ThreadData::TallyRunOnWorkerThreadIfTracking(
vadimte2de4732015-04-27 21:43:02644 const Births* births,
[email protected]b2a9bbd2011-10-31 22:36:21645 const TrackedTime& time_posted,
vadimt12f0f7d2014-09-15 19:19:38646 const TaskStopwatch& stopwatch) {
[email protected]b2a9bbd2011-10-31 22:36:21647 // Even if we have been DEACTIVATED, we will process any pending births so
648 // that our data structures (which counted the outstanding births) remain
649 // consistent.
vadimte2de4732015-04-27 21:43:02650 if (!births)
[email protected]b2a9bbd2011-10-31 22:36:21651 return;
652
653 // TODO(jar): Support the option to coalesce all worker-thread activity under
654 // one ThreadData instance that uses locks to protect *all* access. This will
655 // reduce memory (making it provably bounded), but run incrementally slower
[email protected]d6992b5b2013-05-20 18:53:13656 // (since we'll use locks on TallyABirth and TallyADeath). The good news is
657 // that the locks on TallyADeath will be *after* the worker thread has run,
vadimte2de4732015-04-27 21:43:02658 // and hence nothing will be waiting for the completion (... besides some
[email protected]d6992b5b2013-05-20 18:53:13659 // other thread that might like to run). Also, the worker threads tasks are
[email protected]b2a9bbd2011-10-31 22:36:21660 // generally longer, and hence the cost of the lock may perchance be amortized
661 // over the long task's lifetime.
vadimt12f0f7d2014-09-15 19:19:38662 ThreadData* current_thread_data = stopwatch.GetThreadData();
[email protected]b2a9bbd2011-10-31 22:36:21663 if (!current_thread_data)
664 return;
665
vadimt12f0f7d2014-09-15 19:19:38666 TrackedTime start_of_run = stopwatch.StartTime();
avi9b6f42932015-12-26 22:15:14667 int32_t queue_duration = 0;
[email protected]b2a9bbd2011-10-31 22:36:21668 if (!start_of_run.is_null()) {
[email protected]c25db182011-11-11 22:40:27669 queue_duration = (start_of_run - time_posted).InMilliseconds();
[email protected]b2a9bbd2011-10-31 22:36:21670 }
vadimte2de4732015-04-27 21:43:02671 current_thread_data->TallyADeath(*births, queue_duration, stopwatch);
[email protected]180c85e2011-07-26 18:25:16672}
673
674// static
[email protected]dbe5d2072011-11-08 17:09:21675void ThreadData::TallyRunInAScopedRegionIfTracking(
vadimte2de4732015-04-27 21:43:02676 const Births* births,
vadimt12f0f7d2014-09-15 19:19:38677 const TaskStopwatch& stopwatch) {
[email protected]dbe5d2072011-11-08 17:09:21678 // Even if we have been DEACTIVATED, we will process any pending births so
679 // that our data structures (which counted the outstanding births) remain
680 // consistent.
vadimte2de4732015-04-27 21:43:02681 if (!births)
[email protected]dbe5d2072011-11-08 17:09:21682 return;
683
vadimt12f0f7d2014-09-15 19:19:38684 ThreadData* current_thread_data = stopwatch.GetThreadData();
[email protected]dbe5d2072011-11-08 17:09:21685 if (!current_thread_data)
686 return;
687
avi9b6f42932015-12-26 22:15:14688 int32_t queue_duration = 0;
vadimte2de4732015-04-27 21:43:02689 current_thread_data->TallyADeath(*births, queue_duration, stopwatch);
vadimt379d7fe2015-04-01 00:09:35690}
691
692void ThreadData::SnapshotExecutedTasks(
vadimte2de4732015-04-27 21:43:02693 int current_profiling_phase,
694 PhasedProcessDataSnapshotMap* phased_snapshots,
vadimt379d7fe2015-04-01 00:09:35695 BirthCountMap* birth_counts) {
[email protected]1cb05db2012-04-13 00:39:26696 // Get copy of data, so that the data will not change during the iterations
697 // and processing.
vadimte2de4732015-04-27 21:43:02698 BirthMap birth_map;
699 DeathsSnapshot deaths;
vadimtcf8983e2015-05-01 19:13:01700 SnapshotMaps(current_profiling_phase, &birth_map, &deaths);
[email protected]1cb05db2012-04-13 00:39:26701
vadimt379d7fe2015-04-01 00:09:35702 for (const auto& birth : birth_map) {
703 (*birth_counts)[birth.second] += birth.second->birth_count();
[email protected]1cb05db2012-04-13 00:39:26704 }
705
vadimte2de4732015-04-27 21:43:02706 for (const auto& death : deaths) {
707 (*birth_counts)[death.first] -= death.first->birth_count();
[email protected]1cb05db2012-04-13 00:39:26708
vadimte2de4732015-04-27 21:43:02709 // For the current death data, walk through all its snapshots, starting from
710 // the current one, then from the previous profiling phase etc., and for
711 // each snapshot calculate the delta between the snapshot and the previous
712 // phase, if any. Store the deltas in the result.
713 for (const DeathDataPhaseSnapshot* phase = &death.second; phase;
714 phase = phase->prev) {
715 const DeathDataSnapshot& death_data =
716 phase->prev ? phase->death_data.Delta(phase->prev->death_data)
717 : phase->death_data;
718
719 if (death_data.count > 0) {
720 (*phased_snapshots)[phase->profiling_phase].tasks.push_back(
721 TaskSnapshot(BirthOnThreadSnapshot(*death.first), death_data,
fdorayf607a842016-12-06 21:44:48722 sanitized_thread_name()));
vadimte2de4732015-04-27 21:43:02723 }
724 }
[email protected]1cb05db2012-04-13 00:39:26725 }
726}
727
initial.commitd7cae122008-07-26 21:49:38728// This may be called from another thread.
vadimte2de4732015-04-27 21:43:02729void ThreadData::SnapshotMaps(int profiling_phase,
730 BirthMap* birth_map,
vadimtcf8983e2015-05-01 19:13:01731 DeathsSnapshot* deaths) {
[email protected]9a88c902011-11-24 00:00:31732 base::AutoLock lock(map_lock_);
vadimte2de4732015-04-27 21:43:02733
vadimt379d7fe2015-04-01 00:09:35734 for (const auto& birth : birth_map_)
735 (*birth_map)[birth.first] = birth.second;
vadimte2de4732015-04-27 21:43:02736
737 for (const auto& death : death_map_) {
738 deaths->push_back(std::make_pair(
739 death.first,
siggide38d0c2016-12-02 20:04:21740 DeathDataPhaseSnapshot(profiling_phase, death.second,
vadimte2de4732015-04-27 21:43:02741 death.second.last_phase_snapshot())));
742 }
initial.commitd7cae122008-07-26 21:49:38743}
744
vadimte2de4732015-04-27 21:43:02745void ThreadData::OnProfilingPhaseCompletedOnThread(int profiling_phase) {
746 base::AutoLock lock(map_lock_);
747
748 for (auto& death : death_map_) {
749 death.second.OnProfilingPhaseCompleted(profiling_phase);
750 }
751}
752
zhenyu.shana55ed002016-06-07 21:05:34753void ThreadData::EnsureTlsInitialization() {
amistry42d16882015-07-17 03:58:06754 if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED)
asvitkined0abaa32015-05-07 16:27:17755 return; // Someone else did the initialization.
[email protected]94b555ee2011-11-15 21:50:36756 // Due to racy lazy initialization in tests, we'll need to recheck status_
757 // after we acquire the lock.
758
759 // Ensure that we don't double initialize tls. We are called when single
760 // threaded in the product, but some tests may be racy and lazy about our
761 // initialization.
762 base::AutoLock lock(*list_lock_.Pointer());
amistry42d16882015-07-17 03:58:06763 if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED)
asvitkined0abaa32015-05-07 16:27:17764 return; // Someone raced in here and beat us.
[email protected]94b555ee2011-11-15 21:50:36765
[email protected]b2a9bbd2011-10-31 22:36:21766 // Perform the "real" TLS initialization now, and leave it intact through
[email protected]84baeca2011-10-24 18:55:16767 // process termination.
[email protected]94b555ee2011-11-15 21:50:36768 if (!tls_index_.initialized()) { // Testing may have initialized this.
amistry42d16882015-07-17 03:58:06769 DCHECK_EQ(base::subtle::NoBarrier_Load(&status_), UNINITIALIZED);
[email protected]84baeca2011-10-24 18:55:16770 tls_index_.Initialize(&ThreadData::OnThreadTermination);
asvitkined0abaa32015-05-07 16:27:17771 DCHECK(tls_index_.initialized());
[email protected]8aa1e6e2011-12-14 01:36:48772 } else {
773 // TLS was initialzed for us earlier.
amistry42d16882015-07-17 03:58:06774 DCHECK_EQ(base::subtle::NoBarrier_Load(&status_), DORMANT_DURING_TESTS);
[email protected]94b555ee2011-11-15 21:50:36775 }
[email protected]3f095c0a2011-10-31 15:32:08776
[email protected]94b555ee2011-11-15 21:50:36777 // Incarnation counter is only significant to testing, as it otherwise will
778 // never again change in this process.
[email protected]b2a9bbd2011-10-31 22:36:21779 ++incarnation_counter_;
[email protected]94b555ee2011-11-15 21:50:36780
vadimte2de4732015-04-27 21:43:02781 // The lock is not critical for setting status_, but it doesn't hurt. It also
[email protected]94b555ee2011-11-15 21:50:36782 // ensures that if we have a racy initialization, that we'll bail as soon as
783 // we get the lock earlier in this method.
amistry42d16882015-07-17 03:58:06784 base::subtle::Release_Store(&status_, kInitialStartupState);
785 DCHECK(base::subtle::NoBarrier_Load(&status_) != UNINITIALIZED);
siggide38d0c2016-12-02 20:04:21786
787#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
788 // Make sure heap tracking is enabled ASAP if the default state is active.
789 if (kInitialStartupState == PROFILING_ACTIVE &&
790 !base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled()) {
791 base::debug::ThreadHeapUsageTracker::EnableHeapTracking();
792 }
793#endif // BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
initial.commitd7cae122008-07-26 21:49:38794}
795
796// static
asvitkined0abaa32015-05-07 16:27:17797void ThreadData::InitializeAndSetTrackingStatus(Status status) {
[email protected]702a12d2012-02-10 19:43:42798 DCHECK_GE(status, DEACTIVATED);
vadimtcf8983e2015-05-01 19:13:01799 DCHECK_LE(status, PROFILING_ACTIVE);
[email protected]702a12d2012-02-10 19:43:42800
zhenyu.shana55ed002016-06-07 21:05:34801 EnsureTlsInitialization(); // No-op if already initialized.
[email protected]b2a9bbd2011-10-31 22:36:21802
siggide38d0c2016-12-02 20:04:21803 if (status > DEACTIVATED) {
[email protected]702a12d2012-02-10 19:43:42804 status = PROFILING_ACTIVE;
siggide38d0c2016-12-02 20:04:21805
806#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
807 if (!base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled())
808 base::debug::ThreadHeapUsageTracker::EnableHeapTracking();
809#endif // BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
810 }
amistry42d16882015-07-17 03:58:06811 base::subtle::Release_Store(&status_, status);
[email protected]b2a9bbd2011-10-31 22:36:21812}
813
814// static
[email protected]702a12d2012-02-10 19:43:42815ThreadData::Status ThreadData::status() {
amistry42d16882015-07-17 03:58:06816 return static_cast<ThreadData::Status>(base::subtle::Acquire_Load(&status_));
[email protected]702a12d2012-02-10 19:43:42817}
818
819// static
820bool ThreadData::TrackingStatus() {
amistry42d16882015-07-17 03:58:06821 return base::subtle::Acquire_Load(&status_) > DEACTIVATED;
initial.commitd7cae122008-07-26 21:49:38822}
823
824// static
vadimta1568312014-11-06 22:27:43825void ThreadData::EnableProfilerTiming() {
826 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, ENABLED_TIMING);
827}
828
829// static
[email protected]b2a9bbd2011-10-31 22:36:21830TrackedTime ThreadData::Now() {
primianobc5681f52016-02-03 18:53:11831 if (now_function_for_testing_)
832 return TrackedTime::FromMilliseconds((*now_function_for_testing_)());
vadimt031d00f2015-04-09 03:14:55833 if (IsProfilerTimingEnabled() && TrackingStatus())
[email protected]b2a9bbd2011-10-31 22:36:21834 return TrackedTime::Now();
835 return TrackedTime(); // Super fast when disabled, or not compiled.
[email protected]84b57952011-10-15 23:52:45836}
initial.commitd7cae122008-07-26 21:49:38837
838// static
[email protected]9a88c902011-11-24 00:00:31839void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count) {
840 base::AutoLock lock(*list_lock_.Pointer());
[email protected]30de3a32014-03-14 18:25:48841
842 // TODO(jar): until this is working on XP, don't run the real test.
843#if 0
[email protected]9a88c902011-11-24 00:00:31844 // Verify that we've at least shutdown/cleanup the major namesd threads. The
845 // caller should tell us how many thread shutdowns should have taken place by
846 // now.
[email protected]9a88c902011-11-24 00:00:31847 CHECK_GT(cleanup_count_, major_threads_shutdown_count);
[email protected]30de3a32014-03-14 18:25:48848#endif
[email protected]9a88c902011-11-24 00:00:31849}
850
851// static
[email protected]b2a9bbd2011-10-31 22:36:21852void ThreadData::ShutdownSingleThreadedCleanup(bool leak) {
[email protected]84baeca2011-10-24 18:55:16853 // This is only called from test code, where we need to cleanup so that
854 // additional tests can be run.
initial.commitd7cae122008-07-26 21:49:38855 // We must be single threaded... but be careful anyway.
asvitkined0abaa32015-05-07 16:27:17856 InitializeAndSetTrackingStatus(DEACTIVATED);
857
initial.commitd7cae122008-07-26 21:49:38858 ThreadData* thread_data_list;
859 {
[email protected]77169a62011-11-14 20:36:46860 base::AutoLock lock(*list_lock_.Pointer());
[email protected]84baeca2011-10-24 18:55:16861 thread_data_list = all_thread_data_list_head_;
862 all_thread_data_list_head_ = NULL;
[email protected]b2a9bbd2011-10-31 22:36:21863 ++incarnation_counter_;
[email protected]26cdeb962011-11-20 04:17:07864 // To be clean, break apart the retired worker list (though we leak them).
fdorayf607a842016-12-06 21:44:48865 while (first_retired_thread_data_) {
866 ThreadData* thread_data = first_retired_thread_data_;
867 first_retired_thread_data_ = thread_data->next_retired_thread_data_;
868 thread_data->next_retired_thread_data_ = nullptr;
[email protected]26cdeb962011-11-20 04:17:07869 }
initial.commitd7cae122008-07-26 21:49:38870 }
871
[email protected]b2a9bbd2011-10-31 22:36:21872 // Put most global static back in pristine shape.
[email protected]9a88c902011-11-24 00:00:31873 cleanup_count_ = 0;
[email protected]b2a9bbd2011-10-31 22:36:21874 tls_index_.Set(NULL);
amistry42d16882015-07-17 03:58:06875 // Almost UNINITIALIZED.
876 base::subtle::Release_Store(&status_, DORMANT_DURING_TESTS);
[email protected]b2a9bbd2011-10-31 22:36:21877
878 // To avoid any chance of racing in unit tests, which is the only place we
879 // call this function, we may sometimes leak all the data structures we
880 // recovered, as they may still be in use on threads from prior tests!
[email protected]bf709abd2013-06-10 11:32:20881 if (leak) {
882 ThreadData* thread_data = thread_data_list;
883 while (thread_data) {
884 ANNOTATE_LEAKING_OBJECT_PTR(thread_data);
885 thread_data = thread_data->next();
886 }
[email protected]b2a9bbd2011-10-31 22:36:21887 return;
[email protected]bf709abd2013-06-10 11:32:20888 }
[email protected]b2a9bbd2011-10-31 22:36:21889
890 // When we want to cleanup (on a single thread), here is what we do.
891
[email protected]84baeca2011-10-24 18:55:16892 // Do actual recursive delete in all ThreadData instances.
initial.commitd7cae122008-07-26 21:49:38893 while (thread_data_list) {
894 ThreadData* next_thread_data = thread_data_list;
895 thread_data_list = thread_data_list->next();
896
897 for (BirthMap::iterator it = next_thread_data->birth_map_.begin();
898 next_thread_data->birth_map_.end() != it; ++it)
899 delete it->second; // Delete the Birth Records.
initial.commitd7cae122008-07-26 21:49:38900 delete next_thread_data; // Includes all Death Records.
901 }
initial.commitd7cae122008-07-26 21:49:38902}
903
fdorayf607a842016-12-06 21:44:48904// static
905ThreadData* ThreadData::GetRetiredOrCreateThreadData(
906 const std::string& sanitized_thread_name) {
907 SCOPED_UMA_HISTOGRAM_TIMER("TrackedObjects.GetRetiredOrCreateThreadData");
908
909 {
910 base::AutoLock lock(*list_lock_.Pointer());
911 ThreadData** pcursor = &first_retired_thread_data_;
912 ThreadData* cursor = first_retired_thread_data_;
913
914 // Assuming that there aren't more than a few tens of retired ThreadData
915 // instances, this lookup should be quick compared to the thread creation
916 // time. Retired ThreadData instances cannot be stored in a map because
917 // insertions are done from OnThreadTerminationCleanup() where allocations
918 // are not allowed.
919 //
920 // Note: Test processes may have more than a few tens of retired ThreadData
921 // instances.
922 while (cursor) {
923 if (cursor->sanitized_thread_name() == sanitized_thread_name) {
924 DCHECK_EQ(*pcursor, cursor);
925 *pcursor = cursor->next_retired_thread_data_;
926 cursor->next_retired_thread_data_ = nullptr;
927 return cursor;
928 }
929 pcursor = &cursor->next_retired_thread_data_;
930 cursor = cursor->next_retired_thread_data_;
931 }
932 }
933
934 return new ThreadData(sanitized_thread_name);
935}
936
initial.commitd7cae122008-07-26 21:49:38937//------------------------------------------------------------------------------
vadimt12f0f7d2014-09-15 19:19:38938TaskStopwatch::TaskStopwatch()
vadimt20175532014-10-28 20:14:20939 : wallclock_duration_ms_(0),
940 current_thread_data_(NULL),
vadimt12f0f7d2014-09-15 19:19:38941 excluded_duration_ms_(0),
942 parent_(NULL) {
danakje649f572015-01-08 23:35:58943#if DCHECK_IS_ON()
vadimt20175532014-10-28 20:14:20944 state_ = CREATED;
vadimt12f0f7d2014-09-15 19:19:38945 child_ = NULL;
946#endif
siggide38d0c2016-12-02 20:04:21947#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
948 heap_tracking_enabled_ =
949 base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled();
950#endif
vadimt20175532014-10-28 20:14:20951}
vadimt12f0f7d2014-09-15 19:19:38952
vadimt20175532014-10-28 20:14:20953TaskStopwatch::~TaskStopwatch() {
danakje649f572015-01-08 23:35:58954#if DCHECK_IS_ON()
vadimt20175532014-10-28 20:14:20955 DCHECK(state_ != RUNNING);
956 DCHECK(child_ == NULL);
957#endif
958}
959
960void TaskStopwatch::Start() {
danakje649f572015-01-08 23:35:58961#if DCHECK_IS_ON()
vadimt20175532014-10-28 20:14:20962 DCHECK(state_ == CREATED);
963 state_ = RUNNING;
964#endif
965
966 start_time_ = ThreadData::Now();
siggide38d0c2016-12-02 20:04:21967#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
968 if (heap_tracking_enabled_)
969 heap_usage_.Start();
970#endif
vadimt20175532014-10-28 20:14:20971
972 current_thread_data_ = ThreadData::Get();
vadimt12f0f7d2014-09-15 19:19:38973 if (!current_thread_data_)
974 return;
975
976 parent_ = current_thread_data_->current_stopwatch_;
danakje649f572015-01-08 23:35:58977#if DCHECK_IS_ON()
vadimt12f0f7d2014-09-15 19:19:38978 if (parent_) {
979 DCHECK(parent_->state_ == RUNNING);
980 DCHECK(parent_->child_ == NULL);
981 parent_->child_ = this;
982 }
983#endif
984 current_thread_data_->current_stopwatch_ = this;
985}
986
vadimt12f0f7d2014-09-15 19:19:38987void TaskStopwatch::Stop() {
988 const TrackedTime end_time = ThreadData::Now();
danakje649f572015-01-08 23:35:58989#if DCHECK_IS_ON()
vadimt12f0f7d2014-09-15 19:19:38990 DCHECK(state_ == RUNNING);
991 state_ = STOPPED;
992 DCHECK(child_ == NULL);
993#endif
siggide38d0c2016-12-02 20:04:21994#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
995 if (heap_tracking_enabled_)
996 heap_usage_.Stop(true);
997#endif
vadimt12f0f7d2014-09-15 19:19:38998
999 if (!start_time_.is_null() && !end_time.is_null()) {
1000 wallclock_duration_ms_ = (end_time - start_time_).InMilliseconds();
1001 }
1002
1003 if (!current_thread_data_)
1004 return;
1005
1006 DCHECK(current_thread_data_->current_stopwatch_ == this);
1007 current_thread_data_->current_stopwatch_ = parent_;
1008 if (!parent_)
1009 return;
1010
danakje649f572015-01-08 23:35:581011#if DCHECK_IS_ON()
vadimt12f0f7d2014-09-15 19:19:381012 DCHECK(parent_->state_ == RUNNING);
1013 DCHECK(parent_->child_ == this);
1014 parent_->child_ = NULL;
1015#endif
vadimt20175532014-10-28 20:14:201016 parent_->excluded_duration_ms_ += wallclock_duration_ms_;
vadimt12f0f7d2014-09-15 19:19:381017 parent_ = NULL;
1018}
1019
1020TrackedTime TaskStopwatch::StartTime() const {
danakje649f572015-01-08 23:35:581021#if DCHECK_IS_ON()
vadimt20175532014-10-28 20:14:201022 DCHECK(state_ != CREATED);
1023#endif
1024
vadimt12f0f7d2014-09-15 19:19:381025 return start_time_;
1026}
1027
avi9b6f42932015-12-26 22:15:141028int32_t TaskStopwatch::RunDurationMs() const {
danakje649f572015-01-08 23:35:581029#if DCHECK_IS_ON()
vadimt12f0f7d2014-09-15 19:19:381030 DCHECK(state_ == STOPPED);
1031#endif
1032
1033 return wallclock_duration_ms_ - excluded_duration_ms_;
1034}
1035
1036ThreadData* TaskStopwatch::GetThreadData() const {
danakje649f572015-01-08 23:35:581037#if DCHECK_IS_ON()
vadimt20175532014-10-28 20:14:201038 DCHECK(state_ != CREATED);
1039#endif
1040
vadimt12f0f7d2014-09-15 19:19:381041 return current_thread_data_;
1042}
1043
1044//------------------------------------------------------------------------------
vadimte2de4732015-04-27 21:43:021045// DeathDataPhaseSnapshot
1046
1047DeathDataPhaseSnapshot::DeathDataPhaseSnapshot(
1048 int profiling_phase,
siggide38d0c2016-12-02 20:04:211049 const DeathData& death,
vadimte2de4732015-04-27 21:43:021050 const DeathDataPhaseSnapshot* prev)
siggide38d0c2016-12-02 20:04:211051 : profiling_phase(profiling_phase), death_data(death), prev(prev) {}
vadimte2de4732015-04-27 21:43:021052
1053//------------------------------------------------------------------------------
1054// TaskSnapshot
1055
[email protected]1cb05db2012-04-13 00:39:261056TaskSnapshot::TaskSnapshot() {
initial.commitd7cae122008-07-26 21:49:381057}
1058
vadimte2de4732015-04-27 21:43:021059TaskSnapshot::TaskSnapshot(const BirthOnThreadSnapshot& birth,
1060 const DeathDataSnapshot& death_data,
fdorayf607a842016-12-06 21:44:481061 const std::string& death_sanitized_thread_name)
[email protected]1cb05db2012-04-13 00:39:261062 : birth(birth),
1063 death_data(death_data),
fdorayf607a842016-12-06 21:44:481064 death_sanitized_thread_name(death_sanitized_thread_name) {}
initial.commitd7cae122008-07-26 21:49:381065
[email protected]1cb05db2012-04-13 00:39:261066TaskSnapshot::~TaskSnapshot() {
[email protected]84baeca2011-10-24 18:55:161067}
1068
initial.commitd7cae122008-07-26 21:49:381069//------------------------------------------------------------------------------
vadimt379d7fe2015-04-01 00:09:351070// ProcessDataPhaseSnapshot
1071
1072ProcessDataPhaseSnapshot::ProcessDataPhaseSnapshot() {
1073}
1074
vmpstr7c7877062016-02-18 22:12:241075ProcessDataPhaseSnapshot::ProcessDataPhaseSnapshot(
1076 const ProcessDataPhaseSnapshot& other) = default;
1077
vadimt379d7fe2015-04-01 00:09:351078ProcessDataPhaseSnapshot::~ProcessDataPhaseSnapshot() {
1079}
1080
1081//------------------------------------------------------------------------------
1082// ProcessDataPhaseSnapshot
[email protected]1cb05db2012-04-13 00:39:261083
1084ProcessDataSnapshot::ProcessDataSnapshot()
[email protected]fe5d4062012-04-23 21:18:191085#if !defined(OS_NACL)
[email protected]1cb05db2012-04-13 00:39:261086 : process_id(base::GetCurrentProcId()) {
[email protected]fe5d4062012-04-23 21:18:191087#else
vadimt379d7fe2015-04-01 00:09:351088 : process_id(base::kNullProcessId) {
[email protected]fe5d4062012-04-23 21:18:191089#endif
initial.commitd7cae122008-07-26 21:49:381090}
1091
vmpstre65942b2016-02-25 00:50:311092ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) =
1093 default;
1094
[email protected]1cb05db2012-04-13 00:39:261095ProcessDataSnapshot::~ProcessDataSnapshot() {
[email protected]84baeca2011-10-24 18:55:161096}
1097
initial.commitd7cae122008-07-26 21:49:381098} // namespace tracked_objects