blob: c7a6a3f3c95f1e22f6761f77ba9b5854815ae4b1 [file] [log] [blame]
[email protected]9fc44162012-01-23 22:56:411// Copyright (c) 2012 The Chromium Authors. All rights reserved.
license.botbf09a502008-08-24 00:55:552// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
initial.commitd7cae122008-07-26 21:49:384
5#include "base/tracked_objects.h"
6
[email protected]c014f2b32013-09-03 23:29:127#include <limits.h>
[email protected]7f8a4eb2012-03-19 21:46:278#include <stdlib.h>
[email protected]a5b94a92008-08-12 23:25:439
[email protected]7caab6dc2013-12-12 19:29:1010#include "base/atomicops.h"
[email protected]915b344f2013-12-11 12:49:1711#include "base/base_switches.h"
12#include "base/command_line.h"
[email protected]75086be2013-03-20 21:18:2213#include "base/compiler_specific.h"
[email protected]bf709abd2013-06-10 11:32:2014#include "base/debug/leak_annotations.h"
[email protected]c014f2b32013-09-03 23:29:1215#include "base/logging.h"
[email protected]dd4b51262013-07-25 21:38:2316#include "base/process/process_handle.h"
[email protected]90895d0f2012-02-15 23:05:0117#include "base/profiler/alternate_timer.h"
[email protected]c851cfd2013-06-10 20:11:1418#include "base/strings/stringprintf.h"
[email protected]a0447ff2011-12-04 21:14:0519#include "base/third_party/valgrind/memcheck.h"
[email protected]c014f2b32013-09-03 23:29:1220#include "base/tracking_info.h"
initial.commitd7cae122008-07-26 21:49:3821
[email protected]e1acf6f2008-10-27 20:43:3322using base::TimeDelta;
23
[email protected]c014f2b32013-09-03 23:29:1224namespace base {
25class TimeDelta;
26}
27
initial.commitd7cae122008-07-26 21:49:3828namespace tracked_objects {
29
[email protected]b2a9bbd2011-10-31 22:36:2130namespace {
[email protected]b2a9bbd2011-10-31 22:36:2131// When ThreadData is first initialized, should we start in an ACTIVE state to
32// record all of the startup-time tasks, or should we start up DEACTIVATED, so
33// that we only record after parsing the command line flag --enable-tracking.
34// Note that the flag may force either state, so this really controls only the
vadimte2de4732015-04-27 21:43:0235// period of time up until that flag is parsed. If there is no flag seen, then
[email protected]b2a9bbd2011-10-31 22:36:2136// this state may prevail for much or all of the process lifetime.
vadimtcf8983e2015-05-01 19:13:0137const ThreadData::Status kInitialStartupState = ThreadData::PROFILING_ACTIVE;
[email protected]da9ccfb2012-01-28 00:34:4038
[email protected]90895d0f2012-02-15 23:05:0139// Control whether an alternate time source (Now() function) is supported by
40// the ThreadData class. This compile time flag should be set to true if we
41// want other modules (such as a memory allocator, or a thread-specific CPU time
42// clock) to be able to provide a thread-specific Now() function. Without this
43// compile-time flag, the code will only support the wall-clock time. This flag
44// can be flipped to efficiently disable this path (if there is a performance
45// problem with its presence).
46static const bool kAllowAlternateTimeSourceHandling = true;
[email protected]1cb05db2012-04-13 00:39:2647
vadimta1568312014-11-06 22:27:4348// Possible states of the profiler timing enabledness.
49enum {
50 UNDEFINED_TIMING,
51 ENABLED_TIMING,
52 DISABLED_TIMING,
53};
54
55// State of the profiler timing enabledness.
56base::subtle::Atomic32 g_profiler_timing_enabled = UNDEFINED_TIMING;
57
vadimte2de4732015-04-27 21:43:0258// Returns whether profiler timing is enabled. The default is true, but this
59// may be overridden by a command-line flag. Some platforms may
60// programmatically set this command-line flag to the "off" value if it's not
61// specified.
vadimta1568312014-11-06 22:27:4362// This in turn can be overridden by explicitly calling
63// ThreadData::EnableProfilerTiming, say, based on a field trial.
[email protected]915b344f2013-12-11 12:49:1764inline bool IsProfilerTimingEnabled() {
vadimta1568312014-11-06 22:27:4365 // Reading |g_profiler_timing_enabled| is done without barrier because
66 // multiple initialization is not an issue while the barrier can be relatively
67 // costly given that this method is sometimes called in a tight loop.
[email protected]7caab6dc2013-12-12 19:29:1068 base::subtle::Atomic32 current_timing_enabled =
vadimta1568312014-11-06 22:27:4369 base::subtle::NoBarrier_Load(&g_profiler_timing_enabled);
[email protected]7caab6dc2013-12-12 19:29:1070 if (current_timing_enabled == UNDEFINED_TIMING) {
pgal.u-szeged421dddb2014-11-25 12:55:0271 if (!base::CommandLine::InitializedForCurrentProcess())
[email protected]915b344f2013-12-11 12:49:1772 return true;
[email protected]7caab6dc2013-12-12 19:29:1073 current_timing_enabled =
pgal.u-szeged421dddb2014-11-25 12:55:0274 (base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
[email protected]7caab6dc2013-12-12 19:29:1075 switches::kProfilerTiming) ==
76 switches::kProfilerTimingDisabledValue)
77 ? DISABLED_TIMING
78 : ENABLED_TIMING;
vadimta1568312014-11-06 22:27:4379 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled,
80 current_timing_enabled);
[email protected]915b344f2013-12-11 12:49:1781 }
[email protected]7caab6dc2013-12-12 19:29:1082 return current_timing_enabled == ENABLED_TIMING;
[email protected]915b344f2013-12-11 12:49:1783}
84
[email protected]8aa1e6e2011-12-14 01:36:4885} // namespace
[email protected]84b57952011-10-15 23:52:4586
initial.commitd7cae122008-07-26 21:49:3887//------------------------------------------------------------------------------
[email protected]63f5b0e2011-11-04 00:23:2788// DeathData tallies durations when a death takes place.
initial.commitd7cae122008-07-26 21:49:3889
vadimte2de4732015-04-27 21:43:0290DeathData::DeathData()
91 : count_(0),
92 sample_probability_count_(0),
93 run_duration_sum_(0),
94 queue_duration_sum_(0),
95 run_duration_max_(0),
96 queue_duration_max_(0),
97 run_duration_sample_(0),
98 queue_duration_sample_(0),
99 last_phase_snapshot_(nullptr) {
[email protected]b6b2b892011-12-04 07:19:10100}
101
vadimte2de4732015-04-27 21:43:02102DeathData::DeathData(const DeathData& other)
103 : count_(other.count_),
104 sample_probability_count_(other.sample_probability_count_),
105 run_duration_sum_(other.run_duration_sum_),
106 queue_duration_sum_(other.queue_duration_sum_),
107 run_duration_max_(other.run_duration_max_),
108 queue_duration_max_(other.queue_duration_max_),
109 run_duration_sample_(other.run_duration_sample_),
110 queue_duration_sample_(other.queue_duration_sample_),
111 last_phase_snapshot_(nullptr) {
112 // This constructor will be used by std::map when adding new DeathData values
113 // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't
114 // need to worry about ownership transfer.
115 DCHECK(other.last_phase_snapshot_ == nullptr);
116}
117
118DeathData::~DeathData() {
119 while (last_phase_snapshot_) {
120 const DeathDataPhaseSnapshot* snapshot = last_phase_snapshot_;
121 last_phase_snapshot_ = snapshot->prev;
122 delete snapshot;
123 }
[email protected]b6b2b892011-12-04 07:19:10124}
125
[email protected]7ceb44482011-12-09 03:41:04126// TODO(jar): I need to see if this macro to optimize branching is worth using.
[email protected]b6b2b892011-12-04 07:19:10127//
128// This macro has no branching, so it is surely fast, and is equivalent to:
129// if (assign_it)
130// target = source;
131// We use a macro rather than a template to force this to inline.
132// Related code for calculating max is discussed on the web.
133#define CONDITIONAL_ASSIGN(assign_it, target, source) \
[email protected]c186e962012-03-24 22:17:18134 ((target) ^= ((target) ^ (source)) & -static_cast<int32>(assign_it))
[email protected]b6b2b892011-12-04 07:19:10135
[email protected]c186e962012-03-24 22:17:18136void DeathData::RecordDeath(const int32 queue_duration,
137 const int32 run_duration,
gliderf78125752014-11-12 21:05:33138 const uint32 random_number) {
[email protected]59b15da2013-02-28 04:15:43139 // We'll just clamp at INT_MAX, but we should note this in the UI as such.
140 if (count_ < INT_MAX)
141 ++count_;
vadimte2de4732015-04-27 21:43:02142
143 int sample_probability_count = sample_probability_count_;
144 if (sample_probability_count < INT_MAX)
145 ++sample_probability_count;
146 sample_probability_count_ = sample_probability_count;
147
[email protected]b6b2b892011-12-04 07:19:10148 queue_duration_sum_ += queue_duration;
149 run_duration_sum_ += run_duration;
[email protected]7ceb44482011-12-09 03:41:04150
151 if (queue_duration_max_ < queue_duration)
152 queue_duration_max_ = queue_duration;
153 if (run_duration_max_ < run_duration)
154 run_duration_max_ = run_duration;
[email protected]b6b2b892011-12-04 07:19:10155
vadimte2de4732015-04-27 21:43:02156 // Take a uniformly distributed sample over all durations ever supplied during
157 // the current profiling phase.
158 // The probability that we (instead) use this new sample is
159 // 1/sample_probability_count_. This results in a completely uniform selection
160 // of the sample (at least when we don't clamp sample_probability_count_...
161 // but that should be inconsequentially likely). We ignore the fact that we
162 // correlated our selection of a sample to the run and queue times (i.e., we
163 // used them to generate random_number).
164 CHECK_GT(sample_probability_count, 0);
165 if (0 == (random_number % sample_probability_count)) {
[email protected]7ceb44482011-12-09 03:41:04166 queue_duration_sample_ = queue_duration;
167 run_duration_sample_ = run_duration;
168 }
initial.commitd7cae122008-07-26 21:49:38169}
170
vadimte2de4732015-04-27 21:43:02171void DeathData::OnProfilingPhaseCompleted(int profiling_phase) {
172 // Snapshotting and storing current state.
173 last_phase_snapshot_ = new DeathDataPhaseSnapshot(
174 profiling_phase, count_, run_duration_sum_, run_duration_max_,
175 run_duration_sample_, queue_duration_sum_, queue_duration_max_,
176 queue_duration_sample_, last_phase_snapshot_);
177
178 // Not touching fields for which a delta can be computed by comparing with a
179 // snapshot from the previous phase. Resetting other fields. Sample values
180 // will be reset upon next death recording because sample_probability_count_
181 // is set to 0.
182 // We avoid resetting to 0 in favor of deltas whenever possible. The reason
183 // is that for incrementable fields, resetting to 0 from the snapshot thread
184 // potentially in parallel with incrementing in the death thread may result in
185 // significant data corruption that has a potential to grow with time. Not
186 // resetting incrementable fields and using deltas will cause any
187 // off-by-little corruptions to be likely fixed at the next snapshot.
188 // The max values are not incrementable, and cannot be deduced using deltas
189 // for a given phase. Hence, we have to reset them to 0. But the potential
190 // damage is limited to getting the previous phase's max to apply for the next
191 // phase, and the error doesn't have a potential to keep growing with new
192 // resets.
193 // sample_probability_count_ is incrementable, but must be reset to 0 at the
194 // phase end, so that we start a new uniformly randomized sample selection
195 // after the reset. Corruptions due to race conditions are possible, but the
196 // damage is limited to selecting a wrong sample, which is not something that
197 // can cause accumulating or cascading effects.
198 // If there were no corruptions caused by race conditions, we never send a
199 // sample for the previous phase in the next phase's snapshot because
200 // ThreadData::SnapshotExecutedTasks doesn't send deltas with 0 count.
201 sample_probability_count_ = 0;
[email protected]b6b2b892011-12-04 07:19:10202 run_duration_max_ = 0;
[email protected]b6b2b892011-12-04 07:19:10203 queue_duration_max_ = 0;
initial.commitd7cae122008-07-26 21:49:38204}
205
206//------------------------------------------------------------------------------
[email protected]1cb05db2012-04-13 00:39:26207DeathDataSnapshot::DeathDataSnapshot()
208 : count(-1),
209 run_duration_sum(-1),
210 run_duration_max(-1),
211 run_duration_sample(-1),
212 queue_duration_sum(-1),
213 queue_duration_max(-1),
214 queue_duration_sample(-1) {
215}
216
vadimte2de4732015-04-27 21:43:02217DeathDataSnapshot::DeathDataSnapshot(int count,
218 int32 run_duration_sum,
219 int32 run_duration_max,
220 int32 run_duration_sample,
221 int32 queue_duration_sum,
222 int32 queue_duration_max,
223 int32 queue_duration_sample)
224 : count(count),
225 run_duration_sum(run_duration_sum),
226 run_duration_max(run_duration_max),
227 run_duration_sample(run_duration_sample),
228 queue_duration_sum(queue_duration_sum),
229 queue_duration_max(queue_duration_max),
230 queue_duration_sample(queue_duration_sample) {
[email protected]1cb05db2012-04-13 00:39:26231}
232
233DeathDataSnapshot::~DeathDataSnapshot() {
234}
235
vadimte2de4732015-04-27 21:43:02236DeathDataSnapshot DeathDataSnapshot::Delta(
237 const DeathDataSnapshot& older) const {
238 return DeathDataSnapshot(count - older.count,
239 run_duration_sum - older.run_duration_sum,
240 run_duration_max, run_duration_sample,
241 queue_duration_sum - older.queue_duration_sum,
242 queue_duration_max, queue_duration_sample);
243}
244
[email protected]1cb05db2012-04-13 00:39:26245//------------------------------------------------------------------------------
[email protected]84baeca2011-10-24 18:55:16246BirthOnThread::BirthOnThread(const Location& location,
247 const ThreadData& current)
initial.commitd7cae122008-07-26 21:49:38248 : location_(location),
[email protected]b6b2b892011-12-04 07:19:10249 birth_thread_(&current) {
250}
251
[email protected]1cb05db2012-04-13 00:39:26252//------------------------------------------------------------------------------
253BirthOnThreadSnapshot::BirthOnThreadSnapshot() {
254}
initial.commitd7cae122008-07-26 21:49:38255
vadimte2de4732015-04-27 21:43:02256BirthOnThreadSnapshot::BirthOnThreadSnapshot(const BirthOnThread& birth)
[email protected]1cb05db2012-04-13 00:39:26257 : location(birth.location()),
258 thread_name(birth.birth_thread()->thread_name()) {
259}
260
261BirthOnThreadSnapshot::~BirthOnThreadSnapshot() {
[email protected]8aa1e6e2011-12-14 01:36:48262}
263
initial.commitd7cae122008-07-26 21:49:38264//------------------------------------------------------------------------------
[email protected]84baeca2011-10-24 18:55:16265Births::Births(const Location& location, const ThreadData& current)
266 : BirthOnThread(location, current),
[email protected]75b79202009-12-30 07:31:45267 birth_count_(1) { }
initial.commitd7cae122008-07-26 21:49:38268
[email protected]b6b2b892011-12-04 07:19:10269int Births::birth_count() const { return birth_count_; }
270
271void Births::RecordBirth() { ++birth_count_; }
272
initial.commitd7cae122008-07-26 21:49:38273//------------------------------------------------------------------------------
[email protected]b6b2b892011-12-04 07:19:10274// ThreadData maintains the central data for all births and deaths on a single
275// thread.
initial.commitd7cae122008-07-26 21:49:38276
[email protected]b2a9bbd2011-10-31 22:36:21277// TODO(jar): We should pull all these static vars together, into a struct, and
278// optimize layout so that we benefit from locality of reference during accesses
279// to them.
280
[email protected]90895d0f2012-02-15 23:05:01281// static
282NowFunction* ThreadData::now_function_ = NULL;
283
vadimt12f0f7d2014-09-15 19:19:38284// static
285bool ThreadData::now_function_is_time_ = false;
286
vadimte2de4732015-04-27 21:43:02287// A TLS slot which points to the ThreadData instance for the current thread.
288// We do a fake initialization here (zeroing out data), and then the real
289// in-place construction happens when we call tls_index_.Initialize().
[email protected]b2a9bbd2011-10-31 22:36:21290// static
[email protected]444b8a3c2012-01-30 16:52:09291base::ThreadLocalStorage::StaticSlot ThreadData::tls_index_ = TLS_INITIALIZER;
[email protected]b2a9bbd2011-10-31 22:36:21292
[email protected]b2a9bbd2011-10-31 22:36:21293// static
[email protected]9a88c902011-11-24 00:00:31294int ThreadData::worker_thread_data_creation_count_ = 0;
295
296// static
297int ThreadData::cleanup_count_ = 0;
[email protected]b2a9bbd2011-10-31 22:36:21298
299// static
300int ThreadData::incarnation_counter_ = 0;
301
initial.commitd7cae122008-07-26 21:49:38302// static
[email protected]84baeca2011-10-24 18:55:16303ThreadData* ThreadData::all_thread_data_list_head_ = NULL;
304
305// static
[email protected]26cdeb962011-11-20 04:17:07306ThreadData* ThreadData::first_retired_worker_ = NULL;
[email protected]84baeca2011-10-24 18:55:16307
initial.commitd7cae122008-07-26 21:49:38308// static
[email protected]9fc44162012-01-23 22:56:41309base::LazyInstance<base::Lock>::Leaky
[email protected]6de0fd1d2011-11-15 13:31:49310 ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER;
initial.commitd7cae122008-07-26 21:49:38311
312// static
amistry42d16882015-07-17 03:58:06313base::subtle::Atomic32 ThreadData::status_ = ThreadData::UNINITIALIZED;
initial.commitd7cae122008-07-26 21:49:38314
[email protected]84baeca2011-10-24 18:55:16315ThreadData::ThreadData(const std::string& suggested_name)
[email protected]8aa1e6e2011-12-14 01:36:48316 : next_(NULL),
[email protected]26cdeb962011-11-20 04:17:07317 next_retired_worker_(NULL),
[email protected]8aa1e6e2011-12-14 01:36:48318 worker_thread_number_(0),
vadimt12f0f7d2014-09-15 19:19:38319 incarnation_count_for_pool_(-1),
320 current_stopwatch_(NULL) {
[email protected]84b57952011-10-15 23:52:45321 DCHECK_GE(suggested_name.size(), 0u);
322 thread_name_ = suggested_name;
[email protected]b2a9bbd2011-10-31 22:36:21323 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_.
[email protected]84b57952011-10-15 23:52:45324}
325
[email protected]26cdeb962011-11-20 04:17:07326ThreadData::ThreadData(int thread_number)
[email protected]8aa1e6e2011-12-14 01:36:48327 : next_(NULL),
[email protected]26cdeb962011-11-20 04:17:07328 next_retired_worker_(NULL),
[email protected]8aa1e6e2011-12-14 01:36:48329 worker_thread_number_(thread_number),
vadimt12f0f7d2014-09-15 19:19:38330 incarnation_count_for_pool_(-1),
331 current_stopwatch_(NULL) {
[email protected]26cdeb962011-11-20 04:17:07332 CHECK_GT(thread_number, 0);
333 base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number);
[email protected]63f5b0e2011-11-04 00:23:27334 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_.
[email protected]359d2bf2010-11-19 20:34:18335}
initial.commitd7cae122008-07-26 21:49:38336
vadimte2de4732015-04-27 21:43:02337ThreadData::~ThreadData() {
338}
[email protected]d4799a32010-09-28 22:54:58339
[email protected]84baeca2011-10-24 18:55:16340void ThreadData::PushToHeadOfList() {
[email protected]b6b2b892011-12-04 07:19:10341 // Toss in a hint of randomness (atop the uniniitalized value).
[email protected]ff5e9422011-12-05 15:24:28342 (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_,
[email protected]a0447ff2011-12-04 21:14:05343 sizeof(random_number_));
[email protected]75086be2013-03-20 21:18:22344 MSAN_UNPOISON(&random_number_, sizeof(random_number_));
gliderf78125752014-11-12 21:05:33345 random_number_ += static_cast<uint32>(this - static_cast<ThreadData*>(0));
[email protected]b6b2b892011-12-04 07:19:10346 random_number_ ^= (Now() - TrackedTime()).InMilliseconds();
347
[email protected]84baeca2011-10-24 18:55:16348 DCHECK(!next_);
[email protected]77169a62011-11-14 20:36:46349 base::AutoLock lock(*list_lock_.Pointer());
[email protected]b2a9bbd2011-10-31 22:36:21350 incarnation_count_for_pool_ = incarnation_counter_;
[email protected]84baeca2011-10-24 18:55:16351 next_ = all_thread_data_list_head_;
352 all_thread_data_list_head_ = this;
353}
354
initial.commitd7cae122008-07-26 21:49:38355// static
[email protected]b6b2b892011-12-04 07:19:10356ThreadData* ThreadData::first() {
357 base::AutoLock lock(*list_lock_.Pointer());
358 return all_thread_data_list_head_;
359}
360
361ThreadData* ThreadData::next() const { return next_; }
362
363// static
[email protected]84b57952011-10-15 23:52:45364void ThreadData::InitializeThreadContext(const std::string& suggested_name) {
asvitkined0abaa32015-05-07 16:27:17365 Initialize();
[email protected]b2a9bbd2011-10-31 22:36:21366 ThreadData* current_thread_data =
367 reinterpret_cast<ThreadData*>(tls_index_.Get());
368 if (current_thread_data)
369 return; // Browser tests instigate this.
370 current_thread_data = new ThreadData(suggested_name);
[email protected]84baeca2011-10-24 18:55:16371 tls_index_.Set(current_thread_data);
[email protected]84b57952011-10-15 23:52:45372}
initial.commitd7cae122008-07-26 21:49:38373
[email protected]84b57952011-10-15 23:52:45374// static
375ThreadData* ThreadData::Get() {
376 if (!tls_index_.initialized())
377 return NULL; // For unittests only.
[email protected]84baeca2011-10-24 18:55:16378 ThreadData* registered = reinterpret_cast<ThreadData*>(tls_index_.Get());
379 if (registered)
380 return registered;
381
382 // We must be a worker thread, since we didn't pre-register.
383 ThreadData* worker_thread_data = NULL;
[email protected]9a88c902011-11-24 00:00:31384 int worker_thread_number = 0;
[email protected]84baeca2011-10-24 18:55:16385 {
[email protected]77169a62011-11-14 20:36:46386 base::AutoLock lock(*list_lock_.Pointer());
[email protected]26cdeb962011-11-20 04:17:07387 if (first_retired_worker_) {
388 worker_thread_data = first_retired_worker_;
389 first_retired_worker_ = first_retired_worker_->next_retired_worker_;
390 worker_thread_data->next_retired_worker_ = NULL;
[email protected]445029fb2011-11-18 17:03:33391 } else {
[email protected]9a88c902011-11-24 00:00:31392 worker_thread_number = ++worker_thread_data_creation_count_;
[email protected]84baeca2011-10-24 18:55:16393 }
initial.commitd7cae122008-07-26 21:49:38394 }
[email protected]84baeca2011-10-24 18:55:16395
396 // If we can't find a previously used instance, then we have to create one.
[email protected]9a88c902011-11-24 00:00:31397 if (!worker_thread_data) {
398 DCHECK_GT(worker_thread_number, 0);
399 worker_thread_data = new ThreadData(worker_thread_number);
400 }
[email protected]26cdeb962011-11-20 04:17:07401 DCHECK_GT(worker_thread_data->worker_thread_number_, 0);
[email protected]84baeca2011-10-24 18:55:16402
403 tls_index_.Set(worker_thread_data);
404 return worker_thread_data;
[email protected]84b57952011-10-15 23:52:45405}
406
407// static
[email protected]84baeca2011-10-24 18:55:16408void ThreadData::OnThreadTermination(void* thread_data) {
[email protected]8aa1e6e2011-12-14 01:36:48409 DCHECK(thread_data); // TLS should *never* call us with a NULL.
vadimte2de4732015-04-27 21:43:02410 // We must NOT do any allocations during this callback. There is a chance
[email protected]9a88c902011-11-24 00:00:31411 // that the allocator is no longer active on this thread.
[email protected]84baeca2011-10-24 18:55:16412 reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup();
[email protected]84baeca2011-10-24 18:55:16413}
414
[email protected]26cdeb962011-11-20 04:17:07415void ThreadData::OnThreadTerminationCleanup() {
[email protected]9a88c902011-11-24 00:00:31416 // The list_lock_ was created when we registered the callback, so it won't be
417 // allocated here despite the lazy reference.
[email protected]77169a62011-11-14 20:36:46418 base::AutoLock lock(*list_lock_.Pointer());
[email protected]b2a9bbd2011-10-31 22:36:21419 if (incarnation_counter_ != incarnation_count_for_pool_)
420 return; // ThreadData was constructed in an earlier unit test.
[email protected]9a88c902011-11-24 00:00:31421 ++cleanup_count_;
422 // Only worker threads need to be retired and reused.
423 if (!worker_thread_number_) {
424 return;
425 }
[email protected]26cdeb962011-11-20 04:17:07426 // We must NOT do any allocations during this callback.
427 // Using the simple linked lists avoids all allocations.
428 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL));
429 this->next_retired_worker_ = first_retired_worker_;
430 first_retired_worker_ = this;
initial.commitd7cae122008-07-26 21:49:38431}
432
initial.commitd7cae122008-07-26 21:49:38433// static
vadimte2de4732015-04-27 21:43:02434void ThreadData::Snapshot(int current_profiling_phase,
435 ProcessDataSnapshot* process_data_snapshot) {
436 // Get an unchanging copy of a ThreadData list.
437 ThreadData* my_list = ThreadData::first();
438
439 // Gather data serially.
440 // This hackish approach *can* get some slightly corrupt tallies, as we are
441 // grabbing values without the protection of a lock, but it has the advantage
442 // of working even with threads that don't have message loops. If a user
443 // sees any strangeness, they can always just run their stats gathering a
444 // second time.
445 BirthCountMap birth_counts;
446 for (ThreadData* thread_data = my_list; thread_data;
447 thread_data = thread_data->next()) {
448 thread_data->SnapshotExecutedTasks(current_profiling_phase,
449 &process_data_snapshot->phased_snapshots,
450 &birth_counts);
451 }
452
453 // Add births that are still active -- i.e. objects that have tallied a birth,
454 // but have not yet tallied a matching death, and hence must be either
455 // running, queued up, or being held in limbo for future posting.
456 auto* current_phase_tasks =
457 &process_data_snapshot->phased_snapshots[current_profiling_phase].tasks;
458 for (const auto& birth_count : birth_counts) {
459 if (birth_count.second > 0) {
460 current_phase_tasks->push_back(
461 TaskSnapshot(BirthOnThreadSnapshot(*birth_count.first),
462 DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0),
463 "Still_Alive"));
464 }
465 }
466}
467
468// static
469void ThreadData::OnProfilingPhaseCompleted(int profiling_phase) {
470 // Get an unchanging copy of a ThreadData list.
471 ThreadData* my_list = ThreadData::first();
472
473 // Add snapshots for all instances of death data in all threads serially.
474 // This hackish approach *can* get some slightly corrupt tallies, as we are
475 // grabbing values without the protection of a lock, but it has the advantage
476 // of working even with threads that don't have message loops. Any corruption
477 // shouldn't cause "cascading damage" to anything else (in later phases).
478 for (ThreadData* thread_data = my_list; thread_data;
479 thread_data = thread_data->next()) {
480 thread_data->OnProfilingPhaseCompletedOnThread(profiling_phase);
481 }
[email protected]84baeca2011-10-24 18:55:16482}
483
[email protected]75b79202009-12-30 07:31:45484Births* ThreadData::TallyABirth(const Location& location) {
initial.commitd7cae122008-07-26 21:49:38485 BirthMap::iterator it = birth_map_.find(location);
[email protected]8aa1e6e2011-12-14 01:36:48486 Births* child;
[email protected]75b79202009-12-30 07:31:45487 if (it != birth_map_.end()) {
[email protected]8aa1e6e2011-12-14 01:36:48488 child = it->second;
489 child->RecordBirth();
490 } else {
491 child = new Births(location, *this); // Leak this.
492 // Lock since the map may get relocated now, and other threads sometimes
493 // snapshot it (but they lock before copying it).
494 base::AutoLock lock(map_lock_);
495 birth_map_[location] = child;
[email protected]75b79202009-12-30 07:31:45496 }
initial.commitd7cae122008-07-26 21:49:38497
[email protected]8aa1e6e2011-12-14 01:36:48498 return child;
initial.commitd7cae122008-07-26 21:49:38499}
500
vadimte2de4732015-04-27 21:43:02501void ThreadData::TallyADeath(const Births& births,
[email protected]c186e962012-03-24 22:17:18502 int32 queue_duration,
vadimt12f0f7d2014-09-15 19:19:38503 const TaskStopwatch& stopwatch) {
504 int32 run_duration = stopwatch.RunDurationMs();
505
[email protected]b6b2b892011-12-04 07:19:10506 // Stir in some randomness, plus add constant in case durations are zero.
gliderf78125752014-11-12 21:05:33507 const uint32 kSomePrimeNumber = 2147483647;
[email protected]b6b2b892011-12-04 07:19:10508 random_number_ += queue_duration + run_duration + kSomePrimeNumber;
509 // An address is going to have some randomness to it as well ;-).
vadimte2de4732015-04-27 21:43:02510 random_number_ ^= static_cast<uint32>(&births - reinterpret_cast<Births*>(0));
[email protected]b6b2b892011-12-04 07:19:10511
vadimte2de4732015-04-27 21:43:02512 // We don't have queue durations without OS timer. OS timer is automatically
[email protected]90895d0f2012-02-15 23:05:01513 // used for task-post-timing, so the use of an alternate timer implies all
vadimt12f0f7d2014-09-15 19:19:38514 // queue times are invalid, unless it was explicitly said that we can trust
515 // the alternate timer.
516 if (kAllowAlternateTimeSourceHandling &&
517 now_function_ &&
518 !now_function_is_time_) {
[email protected]90895d0f2012-02-15 23:05:01519 queue_duration = 0;
vadimt12f0f7d2014-09-15 19:19:38520 }
[email protected]90895d0f2012-02-15 23:05:01521
vadimte2de4732015-04-27 21:43:02522 DeathMap::iterator it = death_map_.find(&births);
[email protected]84baeca2011-10-24 18:55:16523 DeathData* death_data;
initial.commitd7cae122008-07-26 21:49:38524 if (it != death_map_.end()) {
[email protected]84baeca2011-10-24 18:55:16525 death_data = &it->second;
526 } else {
[email protected]9a88c902011-11-24 00:00:31527 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now.
vadimte2de4732015-04-27 21:43:02528 death_data = &death_map_[&births];
[email protected]84baeca2011-10-24 18:55:16529 } // Release lock ASAP.
[email protected]b6b2b892011-12-04 07:19:10530 death_data->RecordDeath(queue_duration, run_duration, random_number_);
initial.commitd7cae122008-07-26 21:49:38531}
532
533// static
[email protected]180c85e2011-07-26 18:25:16534Births* ThreadData::TallyABirthIfActive(const Location& location) {
[email protected]702a12d2012-02-10 19:43:42535 if (!TrackingStatus())
[email protected]84b57952011-10-15 23:52:45536 return NULL;
537 ThreadData* current_thread_data = Get();
538 if (!current_thread_data)
539 return NULL;
540 return current_thread_data->TallyABirth(location);
[email protected]180c85e2011-07-26 18:25:16541}
542
543// static
[email protected]b2a9bbd2011-10-31 22:36:21544void ThreadData::TallyRunOnNamedThreadIfTracking(
545 const base::TrackingInfo& completed_task,
vadimt12f0f7d2014-09-15 19:19:38546 const TaskStopwatch& stopwatch) {
[email protected]b2a9bbd2011-10-31 22:36:21547 // Even if we have been DEACTIVATED, we will process any pending births so
548 // that our data structures (which counted the outstanding births) remain
549 // consistent.
vadimte2de4732015-04-27 21:43:02550 const Births* births = completed_task.birth_tally;
551 if (!births)
[email protected]84b57952011-10-15 23:52:45552 return;
vadimt12f0f7d2014-09-15 19:19:38553 ThreadData* current_thread_data = stopwatch.GetThreadData();
[email protected]84b57952011-10-15 23:52:45554 if (!current_thread_data)
555 return;
556
[email protected]b2a9bbd2011-10-31 22:36:21557 // Watch out for a race where status_ is changing, and hence one or both
[email protected]8aa1e6e2011-12-14 01:36:48558 // of start_of_run or end_of_run is zero. In that case, we didn't bother to
[email protected]b2a9bbd2011-10-31 22:36:21559 // get a time value since we "weren't tracking" and we were trying to be
vadimte2de4732015-04-27 21:43:02560 // efficient by not calling for a genuine time value. For simplicity, we'll
[email protected]b2a9bbd2011-10-31 22:36:21561 // use a default zero duration when we can't calculate a true value.
vadimt12f0f7d2014-09-15 19:19:38562 TrackedTime start_of_run = stopwatch.StartTime();
[email protected]c186e962012-03-24 22:17:18563 int32 queue_duration = 0;
[email protected]b2a9bbd2011-10-31 22:36:21564 if (!start_of_run.is_null()) {
[email protected]e1a38d602013-07-10 17:50:22565 queue_duration = (start_of_run - completed_task.EffectiveTimePosted())
566 .InMilliseconds();
[email protected]b2a9bbd2011-10-31 22:36:21567 }
vadimte2de4732015-04-27 21:43:02568 current_thread_data->TallyADeath(*births, queue_duration, stopwatch);
[email protected]b2a9bbd2011-10-31 22:36:21569}
570
571// static
572void ThreadData::TallyRunOnWorkerThreadIfTracking(
vadimte2de4732015-04-27 21:43:02573 const Births* births,
[email protected]b2a9bbd2011-10-31 22:36:21574 const TrackedTime& time_posted,
vadimt12f0f7d2014-09-15 19:19:38575 const TaskStopwatch& stopwatch) {
[email protected]b2a9bbd2011-10-31 22:36:21576 // Even if we have been DEACTIVATED, we will process any pending births so
577 // that our data structures (which counted the outstanding births) remain
578 // consistent.
vadimte2de4732015-04-27 21:43:02579 if (!births)
[email protected]b2a9bbd2011-10-31 22:36:21580 return;
581
582 // TODO(jar): Support the option to coalesce all worker-thread activity under
583 // one ThreadData instance that uses locks to protect *all* access. This will
584 // reduce memory (making it provably bounded), but run incrementally slower
[email protected]d6992b5b2013-05-20 18:53:13585 // (since we'll use locks on TallyABirth and TallyADeath). The good news is
586 // that the locks on TallyADeath will be *after* the worker thread has run,
vadimte2de4732015-04-27 21:43:02587 // and hence nothing will be waiting for the completion (... besides some
[email protected]d6992b5b2013-05-20 18:53:13588 // other thread that might like to run). Also, the worker threads tasks are
[email protected]b2a9bbd2011-10-31 22:36:21589 // generally longer, and hence the cost of the lock may perchance be amortized
590 // over the long task's lifetime.
vadimt12f0f7d2014-09-15 19:19:38591 ThreadData* current_thread_data = stopwatch.GetThreadData();
[email protected]b2a9bbd2011-10-31 22:36:21592 if (!current_thread_data)
593 return;
594
vadimt12f0f7d2014-09-15 19:19:38595 TrackedTime start_of_run = stopwatch.StartTime();
[email protected]c186e962012-03-24 22:17:18596 int32 queue_duration = 0;
[email protected]b2a9bbd2011-10-31 22:36:21597 if (!start_of_run.is_null()) {
[email protected]c25db182011-11-11 22:40:27598 queue_duration = (start_of_run - time_posted).InMilliseconds();
[email protected]b2a9bbd2011-10-31 22:36:21599 }
vadimte2de4732015-04-27 21:43:02600 current_thread_data->TallyADeath(*births, queue_duration, stopwatch);
[email protected]180c85e2011-07-26 18:25:16601}
602
603// static
[email protected]dbe5d2072011-11-08 17:09:21604void ThreadData::TallyRunInAScopedRegionIfTracking(
vadimte2de4732015-04-27 21:43:02605 const Births* births,
vadimt12f0f7d2014-09-15 19:19:38606 const TaskStopwatch& stopwatch) {
[email protected]dbe5d2072011-11-08 17:09:21607 // Even if we have been DEACTIVATED, we will process any pending births so
608 // that our data structures (which counted the outstanding births) remain
609 // consistent.
vadimte2de4732015-04-27 21:43:02610 if (!births)
[email protected]dbe5d2072011-11-08 17:09:21611 return;
612
vadimt12f0f7d2014-09-15 19:19:38613 ThreadData* current_thread_data = stopwatch.GetThreadData();
[email protected]dbe5d2072011-11-08 17:09:21614 if (!current_thread_data)
615 return;
616
[email protected]c186e962012-03-24 22:17:18617 int32 queue_duration = 0;
vadimte2de4732015-04-27 21:43:02618 current_thread_data->TallyADeath(*births, queue_duration, stopwatch);
vadimt379d7fe2015-04-01 00:09:35619}
620
621void ThreadData::SnapshotExecutedTasks(
vadimte2de4732015-04-27 21:43:02622 int current_profiling_phase,
623 PhasedProcessDataSnapshotMap* phased_snapshots,
vadimt379d7fe2015-04-01 00:09:35624 BirthCountMap* birth_counts) {
[email protected]1cb05db2012-04-13 00:39:26625 // Get copy of data, so that the data will not change during the iterations
626 // and processing.
vadimte2de4732015-04-27 21:43:02627 BirthMap birth_map;
628 DeathsSnapshot deaths;
vadimtcf8983e2015-05-01 19:13:01629 SnapshotMaps(current_profiling_phase, &birth_map, &deaths);
[email protected]1cb05db2012-04-13 00:39:26630
vadimt379d7fe2015-04-01 00:09:35631 for (const auto& birth : birth_map) {
632 (*birth_counts)[birth.second] += birth.second->birth_count();
[email protected]1cb05db2012-04-13 00:39:26633 }
634
vadimte2de4732015-04-27 21:43:02635 for (const auto& death : deaths) {
636 (*birth_counts)[death.first] -= death.first->birth_count();
[email protected]1cb05db2012-04-13 00:39:26637
vadimte2de4732015-04-27 21:43:02638 // For the current death data, walk through all its snapshots, starting from
639 // the current one, then from the previous profiling phase etc., and for
640 // each snapshot calculate the delta between the snapshot and the previous
641 // phase, if any. Store the deltas in the result.
642 for (const DeathDataPhaseSnapshot* phase = &death.second; phase;
643 phase = phase->prev) {
644 const DeathDataSnapshot& death_data =
645 phase->prev ? phase->death_data.Delta(phase->prev->death_data)
646 : phase->death_data;
647
648 if (death_data.count > 0) {
649 (*phased_snapshots)[phase->profiling_phase].tasks.push_back(
650 TaskSnapshot(BirthOnThreadSnapshot(*death.first), death_data,
651 thread_name()));
652 }
653 }
[email protected]1cb05db2012-04-13 00:39:26654 }
655}
656
initial.commitd7cae122008-07-26 21:49:38657// This may be called from another thread.
vadimte2de4732015-04-27 21:43:02658void ThreadData::SnapshotMaps(int profiling_phase,
659 BirthMap* birth_map,
vadimtcf8983e2015-05-01 19:13:01660 DeathsSnapshot* deaths) {
[email protected]9a88c902011-11-24 00:00:31661 base::AutoLock lock(map_lock_);
vadimte2de4732015-04-27 21:43:02662
vadimt379d7fe2015-04-01 00:09:35663 for (const auto& birth : birth_map_)
664 (*birth_map)[birth.first] = birth.second;
vadimte2de4732015-04-27 21:43:02665
666 for (const auto& death : death_map_) {
667 deaths->push_back(std::make_pair(
668 death.first,
669 DeathDataPhaseSnapshot(profiling_phase, death.second.count(),
670 death.second.run_duration_sum(),
671 death.second.run_duration_max(),
672 death.second.run_duration_sample(),
673 death.second.queue_duration_sum(),
674 death.second.queue_duration_max(),
675 death.second.queue_duration_sample(),
676 death.second.last_phase_snapshot())));
677 }
initial.commitd7cae122008-07-26 21:49:38678}
679
vadimte2de4732015-04-27 21:43:02680void ThreadData::OnProfilingPhaseCompletedOnThread(int profiling_phase) {
681 base::AutoLock lock(map_lock_);
682
683 for (auto& death : death_map_) {
684 death.second.OnProfilingPhaseCompleted(profiling_phase);
685 }
686}
687
[email protected]90895d0f2012-02-15 23:05:01688static void OptionallyInitializeAlternateTimer() {
[email protected]ed0fd002012-04-25 23:10:34689 NowFunction* alternate_time_source = GetAlternateTimeSource();
690 if (alternate_time_source)
691 ThreadData::SetAlternateTimeSource(alternate_time_source);
[email protected]90895d0f2012-02-15 23:05:01692}
693
asvitkined0abaa32015-05-07 16:27:17694void ThreadData::Initialize() {
amistry42d16882015-07-17 03:58:06695 if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED)
asvitkined0abaa32015-05-07 16:27:17696 return; // Someone else did the initialization.
[email protected]94b555ee2011-11-15 21:50:36697 // Due to racy lazy initialization in tests, we'll need to recheck status_
698 // after we acquire the lock.
699
700 // Ensure that we don't double initialize tls. We are called when single
701 // threaded in the product, but some tests may be racy and lazy about our
702 // initialization.
703 base::AutoLock lock(*list_lock_.Pointer());
amistry42d16882015-07-17 03:58:06704 if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED)
asvitkined0abaa32015-05-07 16:27:17705 return; // Someone raced in here and beat us.
[email protected]94b555ee2011-11-15 21:50:36706
[email protected]90895d0f2012-02-15 23:05:01707 // Put an alternate timer in place if the environment calls for it, such as
708 // for tracking TCMalloc allocations. This insertion is idempotent, so we
709 // don't mind if there is a race, and we'd prefer not to be in a lock while
710 // doing this work.
711 if (kAllowAlternateTimeSourceHandling)
712 OptionallyInitializeAlternateTimer();
713
[email protected]b2a9bbd2011-10-31 22:36:21714 // Perform the "real" TLS initialization now, and leave it intact through
[email protected]84baeca2011-10-24 18:55:16715 // process termination.
[email protected]94b555ee2011-11-15 21:50:36716 if (!tls_index_.initialized()) { // Testing may have initialized this.
amistry42d16882015-07-17 03:58:06717 DCHECK_EQ(base::subtle::NoBarrier_Load(&status_), UNINITIALIZED);
[email protected]84baeca2011-10-24 18:55:16718 tls_index_.Initialize(&ThreadData::OnThreadTermination);
asvitkined0abaa32015-05-07 16:27:17719 DCHECK(tls_index_.initialized());
[email protected]8aa1e6e2011-12-14 01:36:48720 } else {
721 // TLS was initialzed for us earlier.
amistry42d16882015-07-17 03:58:06722 DCHECK_EQ(base::subtle::NoBarrier_Load(&status_), DORMANT_DURING_TESTS);
[email protected]94b555ee2011-11-15 21:50:36723 }
[email protected]3f095c0a2011-10-31 15:32:08724
[email protected]94b555ee2011-11-15 21:50:36725 // Incarnation counter is only significant to testing, as it otherwise will
726 // never again change in this process.
[email protected]b2a9bbd2011-10-31 22:36:21727 ++incarnation_counter_;
[email protected]94b555ee2011-11-15 21:50:36728
vadimte2de4732015-04-27 21:43:02729 // The lock is not critical for setting status_, but it doesn't hurt. It also
[email protected]94b555ee2011-11-15 21:50:36730 // ensures that if we have a racy initialization, that we'll bail as soon as
731 // we get the lock earlier in this method.
amistry42d16882015-07-17 03:58:06732 base::subtle::Release_Store(&status_, kInitialStartupState);
733 DCHECK(base::subtle::NoBarrier_Load(&status_) != UNINITIALIZED);
initial.commitd7cae122008-07-26 21:49:38734}
735
736// static
asvitkined0abaa32015-05-07 16:27:17737void ThreadData::InitializeAndSetTrackingStatus(Status status) {
[email protected]702a12d2012-02-10 19:43:42738 DCHECK_GE(status, DEACTIVATED);
vadimtcf8983e2015-05-01 19:13:01739 DCHECK_LE(status, PROFILING_ACTIVE);
[email protected]702a12d2012-02-10 19:43:42740
asvitkined0abaa32015-05-07 16:27:17741 Initialize(); // No-op if already initialized.
[email protected]b2a9bbd2011-10-31 22:36:21742
vadimtcf8983e2015-05-01 19:13:01743 if (status > DEACTIVATED)
[email protected]702a12d2012-02-10 19:43:42744 status = PROFILING_ACTIVE;
amistry42d16882015-07-17 03:58:06745 base::subtle::Release_Store(&status_, status);
[email protected]b2a9bbd2011-10-31 22:36:21746}
747
748// static
[email protected]702a12d2012-02-10 19:43:42749ThreadData::Status ThreadData::status() {
amistry42d16882015-07-17 03:58:06750 return static_cast<ThreadData::Status>(base::subtle::Acquire_Load(&status_));
[email protected]702a12d2012-02-10 19:43:42751}
752
753// static
754bool ThreadData::TrackingStatus() {
amistry42d16882015-07-17 03:58:06755 return base::subtle::Acquire_Load(&status_) > DEACTIVATED;
initial.commitd7cae122008-07-26 21:49:38756}
757
758// static
[email protected]90895d0f2012-02-15 23:05:01759void ThreadData::SetAlternateTimeSource(NowFunction* now_function) {
760 DCHECK(now_function);
761 if (kAllowAlternateTimeSourceHandling)
762 now_function_ = now_function;
763}
764
765// static
vadimta1568312014-11-06 22:27:43766void ThreadData::EnableProfilerTiming() {
767 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, ENABLED_TIMING);
768}
769
770// static
[email protected]b2a9bbd2011-10-31 22:36:21771TrackedTime ThreadData::Now() {
[email protected]90895d0f2012-02-15 23:05:01772 if (kAllowAlternateTimeSourceHandling && now_function_)
773 return TrackedTime::FromMilliseconds((*now_function_)());
vadimt031d00f2015-04-09 03:14:55774 if (IsProfilerTimingEnabled() && TrackingStatus())
[email protected]b2a9bbd2011-10-31 22:36:21775 return TrackedTime::Now();
776 return TrackedTime(); // Super fast when disabled, or not compiled.
[email protected]84b57952011-10-15 23:52:45777}
initial.commitd7cae122008-07-26 21:49:38778
779// static
[email protected]9a88c902011-11-24 00:00:31780void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count) {
781 base::AutoLock lock(*list_lock_.Pointer());
782 if (worker_thread_data_creation_count_ == 0)
783 return; // We haven't really run much, and couldn't have leaked.
[email protected]30de3a32014-03-14 18:25:48784
785 // TODO(jar): until this is working on XP, don't run the real test.
786#if 0
[email protected]9a88c902011-11-24 00:00:31787 // Verify that we've at least shutdown/cleanup the major namesd threads. The
788 // caller should tell us how many thread shutdowns should have taken place by
789 // now.
[email protected]9a88c902011-11-24 00:00:31790 CHECK_GT(cleanup_count_, major_threads_shutdown_count);
[email protected]30de3a32014-03-14 18:25:48791#endif
[email protected]9a88c902011-11-24 00:00:31792}
793
794// static
[email protected]b2a9bbd2011-10-31 22:36:21795void ThreadData::ShutdownSingleThreadedCleanup(bool leak) {
[email protected]84baeca2011-10-24 18:55:16796 // This is only called from test code, where we need to cleanup so that
797 // additional tests can be run.
initial.commitd7cae122008-07-26 21:49:38798 // We must be single threaded... but be careful anyway.
asvitkined0abaa32015-05-07 16:27:17799 InitializeAndSetTrackingStatus(DEACTIVATED);
800
initial.commitd7cae122008-07-26 21:49:38801 ThreadData* thread_data_list;
802 {
[email protected]77169a62011-11-14 20:36:46803 base::AutoLock lock(*list_lock_.Pointer());
[email protected]84baeca2011-10-24 18:55:16804 thread_data_list = all_thread_data_list_head_;
805 all_thread_data_list_head_ = NULL;
[email protected]b2a9bbd2011-10-31 22:36:21806 ++incarnation_counter_;
[email protected]26cdeb962011-11-20 04:17:07807 // To be clean, break apart the retired worker list (though we leak them).
[email protected]b6b2b892011-12-04 07:19:10808 while (first_retired_worker_) {
[email protected]26cdeb962011-11-20 04:17:07809 ThreadData* worker = first_retired_worker_;
810 CHECK_GT(worker->worker_thread_number_, 0);
811 first_retired_worker_ = worker->next_retired_worker_;
812 worker->next_retired_worker_ = NULL;
813 }
initial.commitd7cae122008-07-26 21:49:38814 }
815
[email protected]b2a9bbd2011-10-31 22:36:21816 // Put most global static back in pristine shape.
[email protected]9a88c902011-11-24 00:00:31817 worker_thread_data_creation_count_ = 0;
818 cleanup_count_ = 0;
[email protected]b2a9bbd2011-10-31 22:36:21819 tls_index_.Set(NULL);
amistry42d16882015-07-17 03:58:06820 // Almost UNINITIALIZED.
821 base::subtle::Release_Store(&status_, DORMANT_DURING_TESTS);
[email protected]b2a9bbd2011-10-31 22:36:21822
823 // To avoid any chance of racing in unit tests, which is the only place we
824 // call this function, we may sometimes leak all the data structures we
825 // recovered, as they may still be in use on threads from prior tests!
[email protected]bf709abd2013-06-10 11:32:20826 if (leak) {
827 ThreadData* thread_data = thread_data_list;
828 while (thread_data) {
829 ANNOTATE_LEAKING_OBJECT_PTR(thread_data);
830 thread_data = thread_data->next();
831 }
[email protected]b2a9bbd2011-10-31 22:36:21832 return;
[email protected]bf709abd2013-06-10 11:32:20833 }
[email protected]b2a9bbd2011-10-31 22:36:21834
835 // When we want to cleanup (on a single thread), here is what we do.
836
[email protected]84baeca2011-10-24 18:55:16837 // Do actual recursive delete in all ThreadData instances.
initial.commitd7cae122008-07-26 21:49:38838 while (thread_data_list) {
839 ThreadData* next_thread_data = thread_data_list;
840 thread_data_list = thread_data_list->next();
841
842 for (BirthMap::iterator it = next_thread_data->birth_map_.begin();
843 next_thread_data->birth_map_.end() != it; ++it)
844 delete it->second; // Delete the Birth Records.
initial.commitd7cae122008-07-26 21:49:38845 delete next_thread_data; // Includes all Death Records.
846 }
initial.commitd7cae122008-07-26 21:49:38847}
848
initial.commitd7cae122008-07-26 21:49:38849//------------------------------------------------------------------------------
vadimt12f0f7d2014-09-15 19:19:38850TaskStopwatch::TaskStopwatch()
vadimt20175532014-10-28 20:14:20851 : wallclock_duration_ms_(0),
852 current_thread_data_(NULL),
vadimt12f0f7d2014-09-15 19:19:38853 excluded_duration_ms_(0),
854 parent_(NULL) {
danakje649f572015-01-08 23:35:58855#if DCHECK_IS_ON()
vadimt20175532014-10-28 20:14:20856 state_ = CREATED;
vadimt12f0f7d2014-09-15 19:19:38857 child_ = NULL;
858#endif
vadimt20175532014-10-28 20:14:20859}
vadimt12f0f7d2014-09-15 19:19:38860
vadimt20175532014-10-28 20:14:20861TaskStopwatch::~TaskStopwatch() {
danakje649f572015-01-08 23:35:58862#if DCHECK_IS_ON()
vadimt20175532014-10-28 20:14:20863 DCHECK(state_ != RUNNING);
864 DCHECK(child_ == NULL);
865#endif
866}
867
868void TaskStopwatch::Start() {
danakje649f572015-01-08 23:35:58869#if DCHECK_IS_ON()
vadimt20175532014-10-28 20:14:20870 DCHECK(state_ == CREATED);
871 state_ = RUNNING;
872#endif
873
874 start_time_ = ThreadData::Now();
875
876 current_thread_data_ = ThreadData::Get();
vadimt12f0f7d2014-09-15 19:19:38877 if (!current_thread_data_)
878 return;
879
880 parent_ = current_thread_data_->current_stopwatch_;
danakje649f572015-01-08 23:35:58881#if DCHECK_IS_ON()
vadimt12f0f7d2014-09-15 19:19:38882 if (parent_) {
883 DCHECK(parent_->state_ == RUNNING);
884 DCHECK(parent_->child_ == NULL);
885 parent_->child_ = this;
886 }
887#endif
888 current_thread_data_->current_stopwatch_ = this;
889}
890
vadimt12f0f7d2014-09-15 19:19:38891void TaskStopwatch::Stop() {
892 const TrackedTime end_time = ThreadData::Now();
danakje649f572015-01-08 23:35:58893#if DCHECK_IS_ON()
vadimt12f0f7d2014-09-15 19:19:38894 DCHECK(state_ == RUNNING);
895 state_ = STOPPED;
896 DCHECK(child_ == NULL);
897#endif
898
899 if (!start_time_.is_null() && !end_time.is_null()) {
900 wallclock_duration_ms_ = (end_time - start_time_).InMilliseconds();
901 }
902
903 if (!current_thread_data_)
904 return;
905
906 DCHECK(current_thread_data_->current_stopwatch_ == this);
907 current_thread_data_->current_stopwatch_ = parent_;
908 if (!parent_)
909 return;
910
danakje649f572015-01-08 23:35:58911#if DCHECK_IS_ON()
vadimt12f0f7d2014-09-15 19:19:38912 DCHECK(parent_->state_ == RUNNING);
913 DCHECK(parent_->child_ == this);
914 parent_->child_ = NULL;
915#endif
vadimt20175532014-10-28 20:14:20916 parent_->excluded_duration_ms_ += wallclock_duration_ms_;
vadimt12f0f7d2014-09-15 19:19:38917 parent_ = NULL;
918}
919
920TrackedTime TaskStopwatch::StartTime() const {
danakje649f572015-01-08 23:35:58921#if DCHECK_IS_ON()
vadimt20175532014-10-28 20:14:20922 DCHECK(state_ != CREATED);
923#endif
924
vadimt12f0f7d2014-09-15 19:19:38925 return start_time_;
926}
927
928int32 TaskStopwatch::RunDurationMs() const {
danakje649f572015-01-08 23:35:58929#if DCHECK_IS_ON()
vadimt12f0f7d2014-09-15 19:19:38930 DCHECK(state_ == STOPPED);
931#endif
932
933 return wallclock_duration_ms_ - excluded_duration_ms_;
934}
935
936ThreadData* TaskStopwatch::GetThreadData() const {
danakje649f572015-01-08 23:35:58937#if DCHECK_IS_ON()
vadimt20175532014-10-28 20:14:20938 DCHECK(state_ != CREATED);
939#endif
940
vadimt12f0f7d2014-09-15 19:19:38941 return current_thread_data_;
942}
943
944//------------------------------------------------------------------------------
vadimte2de4732015-04-27 21:43:02945// DeathDataPhaseSnapshot
946
947DeathDataPhaseSnapshot::DeathDataPhaseSnapshot(
948 int profiling_phase,
949 int count,
950 int32 run_duration_sum,
951 int32 run_duration_max,
952 int32 run_duration_sample,
953 int32 queue_duration_sum,
954 int32 queue_duration_max,
955 int32 queue_duration_sample,
956 const DeathDataPhaseSnapshot* prev)
957 : profiling_phase(profiling_phase),
958 death_data(count,
959 run_duration_sum,
960 run_duration_max,
961 run_duration_sample,
962 queue_duration_sum,
963 queue_duration_max,
964 queue_duration_sample),
965 prev(prev) {
966}
967
968//------------------------------------------------------------------------------
969// TaskSnapshot
970
[email protected]1cb05db2012-04-13 00:39:26971TaskSnapshot::TaskSnapshot() {
initial.commitd7cae122008-07-26 21:49:38972}
973
vadimte2de4732015-04-27 21:43:02974TaskSnapshot::TaskSnapshot(const BirthOnThreadSnapshot& birth,
975 const DeathDataSnapshot& death_data,
[email protected]1cb05db2012-04-13 00:39:26976 const std::string& death_thread_name)
977 : birth(birth),
978 death_data(death_data),
979 death_thread_name(death_thread_name) {
initial.commitd7cae122008-07-26 21:49:38980}
981
[email protected]1cb05db2012-04-13 00:39:26982TaskSnapshot::~TaskSnapshot() {
[email protected]84baeca2011-10-24 18:55:16983}
984
initial.commitd7cae122008-07-26 21:49:38985//------------------------------------------------------------------------------
vadimt379d7fe2015-04-01 00:09:35986// ProcessDataPhaseSnapshot
987
988ProcessDataPhaseSnapshot::ProcessDataPhaseSnapshot() {
989}
990
991ProcessDataPhaseSnapshot::~ProcessDataPhaseSnapshot() {
992}
993
994//------------------------------------------------------------------------------
995// ProcessDataPhaseSnapshot
[email protected]1cb05db2012-04-13 00:39:26996
997ProcessDataSnapshot::ProcessDataSnapshot()
[email protected]fe5d4062012-04-23 21:18:19998#if !defined(OS_NACL)
[email protected]1cb05db2012-04-13 00:39:26999 : process_id(base::GetCurrentProcId()) {
[email protected]fe5d4062012-04-23 21:18:191000#else
vadimt379d7fe2015-04-01 00:09:351001 : process_id(base::kNullProcessId) {
[email protected]fe5d4062012-04-23 21:18:191002#endif
initial.commitd7cae122008-07-26 21:49:381003}
1004
[email protected]1cb05db2012-04-13 00:39:261005ProcessDataSnapshot::~ProcessDataSnapshot() {
[email protected]84baeca2011-10-24 18:55:161006}
1007
initial.commitd7cae122008-07-26 21:49:381008} // namespace tracked_objects