blob: 87af8dc6be24a958e0c368cfea0c292b95c37c15 [file] [log] [blame]
[email protected]9fc44162012-01-23 22:56:411// Copyright (c) 2012 The Chromium Authors. All rights reserved.
license.botbf09a502008-08-24 00:55:552// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
initial.commitd7cae122008-07-26 21:49:384
5#include "base/tracked_objects.h"
6
[email protected]a5b94a92008-08-12 23:25:437#include <math.h>
8
[email protected]34b2b002009-11-20 06:53:289#include "base/format_macros.h"
[email protected]f5393332009-06-03 15:01:2910#include "base/message_loop.h"
[email protected]f1633932010-08-17 23:05:2811#include "base/stringprintf.h"
[email protected]a0447ff2011-12-04 21:14:0512#include "base/third_party/valgrind/memcheck.h"
[email protected]34b99632011-01-01 01:01:0613#include "base/threading/thread_restrictions.h"
[email protected]b2a9bbd2011-10-31 22:36:2114#include "build/build_config.h"
[email protected]445029fb2011-11-18 17:03:3315#include "base/port.h"
initial.commitd7cae122008-07-26 21:49:3816
[email protected]e1acf6f2008-10-27 20:43:3317using base::TimeDelta;
18
initial.commitd7cae122008-07-26 21:49:3819namespace tracked_objects {
20
[email protected]b2a9bbd2011-10-31 22:36:2121namespace {
[email protected]da9ccfb2012-01-28 00:34:4022
[email protected]b2a9bbd2011-10-31 22:36:2123// Flag to compile out almost all of the task tracking code.
[email protected]da9ccfb2012-01-28 00:34:4024const bool kTrackAllTaskObjects = true;
[email protected]3f095c0a2011-10-31 15:32:0825
[email protected]8aa1e6e2011-12-14 01:36:4826// Flag to compile out parent-child link recording.
[email protected]da9ccfb2012-01-28 00:34:4027const bool kTrackParentChildLinks = false;
[email protected]8aa1e6e2011-12-14 01:36:4828
[email protected]b2a9bbd2011-10-31 22:36:2129// When ThreadData is first initialized, should we start in an ACTIVE state to
30// record all of the startup-time tasks, or should we start up DEACTIVATED, so
31// that we only record after parsing the command line flag --enable-tracking.
32// Note that the flag may force either state, so this really controls only the
33// period of time up until that flag is parsed. If there is no flag seen, then
34// this state may prevail for much or all of the process lifetime.
[email protected]da9ccfb2012-01-28 00:34:4035const ThreadData::Status kInitialStartupState =
[email protected]8aa1e6e2011-12-14 01:36:4836 ThreadData::PROFILING_CHILDREN_ACTIVE;
[email protected]da9ccfb2012-01-28 00:34:4037
[email protected]8aa1e6e2011-12-14 01:36:4838} // namespace
[email protected]84b57952011-10-15 23:52:4539
initial.commitd7cae122008-07-26 21:49:3840//------------------------------------------------------------------------------
[email protected]63f5b0e2011-11-04 00:23:2741// DeathData tallies durations when a death takes place.
initial.commitd7cae122008-07-26 21:49:3842
[email protected]b6b2b892011-12-04 07:19:1043DeathData::DeathData() {
44 Clear();
45}
46
47DeathData::DeathData(int count) {
48 Clear();
49 count_ = count;
50}
51
[email protected]7ceb44482011-12-09 03:41:0452// TODO(jar): I need to see if this macro to optimize branching is worth using.
[email protected]b6b2b892011-12-04 07:19:1053//
54// This macro has no branching, so it is surely fast, and is equivalent to:
55// if (assign_it)
56// target = source;
57// We use a macro rather than a template to force this to inline.
58// Related code for calculating max is discussed on the web.
59#define CONDITIONAL_ASSIGN(assign_it, target, source) \
60 ((target) ^= ((target) ^ (source)) & -static_cast<DurationInt>(assign_it))
61
62void DeathData::RecordDeath(const DurationInt queue_duration,
63 const DurationInt run_duration,
64 int32 random_number) {
[email protected]7ceb44482011-12-09 03:41:0465 ++count_;
[email protected]b6b2b892011-12-04 07:19:1066 queue_duration_sum_ += queue_duration;
67 run_duration_sum_ += run_duration;
[email protected]7ceb44482011-12-09 03:41:0468
69 if (queue_duration_max_ < queue_duration)
70 queue_duration_max_ = queue_duration;
71 if (run_duration_max_ < run_duration)
72 run_duration_max_ = run_duration;
[email protected]b6b2b892011-12-04 07:19:1073
74 // Take a uniformly distributed sample over all durations ever supplied.
75 // The probability that we (instead) use this new sample is 1/count_. This
76 // results in a completely uniform selection of the sample.
77 // We ignore the fact that we correlated our selection of a sample of run
78 // and queue times.
[email protected]7ceb44482011-12-09 03:41:0479 if (0 == (random_number % count_)) {
80 queue_duration_sample_ = queue_duration;
81 run_duration_sample_ = run_duration;
82 }
initial.commitd7cae122008-07-26 21:49:3883}
84
[email protected]b6b2b892011-12-04 07:19:1085int DeathData::count() const { return count_; }
86
87DurationInt DeathData::run_duration_sum() const { return run_duration_sum_; }
88
89DurationInt DeathData::run_duration_max() const { return run_duration_max_; }
90
91DurationInt DeathData::run_duration_sample() const {
92 return run_duration_sample_;
initial.commitd7cae122008-07-26 21:49:3893}
94
[email protected]b6b2b892011-12-04 07:19:1095DurationInt DeathData::queue_duration_sum() const {
96 return queue_duration_sum_;
initial.commitd7cae122008-07-26 21:49:3897}
98
[email protected]b6b2b892011-12-04 07:19:1099DurationInt DeathData::queue_duration_max() const {
100 return queue_duration_max_;
initial.commitd7cae122008-07-26 21:49:38101}
102
[email protected]b6b2b892011-12-04 07:19:10103DurationInt DeathData::queue_duration_sample() const {
104 return queue_duration_sample_;
105}
106
107
[email protected]84baeca2011-10-24 18:55:16108base::DictionaryValue* DeathData::ToValue() const {
109 base::DictionaryValue* dictionary = new base::DictionaryValue;
110 dictionary->Set("count", base::Value::CreateIntegerValue(count_));
111 dictionary->Set("run_ms",
[email protected]b6b2b892011-12-04 07:19:10112 base::Value::CreateIntegerValue(run_duration_sum()));
[email protected]63f5b0e2011-11-04 00:23:27113 dictionary->Set("run_ms_max",
[email protected]b6b2b892011-12-04 07:19:10114 base::Value::CreateIntegerValue(run_duration_max()));
115 dictionary->Set("run_ms_sample",
116 base::Value::CreateIntegerValue(run_duration_sample()));
117 dictionary->Set("queue_ms",
118 base::Value::CreateIntegerValue(queue_duration_sum()));
[email protected]63f5b0e2011-11-04 00:23:27119 dictionary->Set("queue_ms_max",
[email protected]b6b2b892011-12-04 07:19:10120 base::Value::CreateIntegerValue(queue_duration_max()));
121 dictionary->Set("queue_ms_sample",
122 base::Value::CreateIntegerValue(queue_duration_sample()));
[email protected]84baeca2011-10-24 18:55:16123 return dictionary;
124}
125
[email protected]b6b2b892011-12-04 07:19:10126void DeathData::ResetMax() {
127 run_duration_max_ = 0;
128 queue_duration_max_ = 0;
129}
130
initial.commitd7cae122008-07-26 21:49:38131void DeathData::Clear() {
132 count_ = 0;
[email protected]b6b2b892011-12-04 07:19:10133 run_duration_sum_ = 0;
134 run_duration_max_ = 0;
135 run_duration_sample_ = 0;
136 queue_duration_sum_ = 0;
137 queue_duration_max_ = 0;
138 queue_duration_sample_ = 0;
initial.commitd7cae122008-07-26 21:49:38139}
140
141//------------------------------------------------------------------------------
[email protected]84baeca2011-10-24 18:55:16142BirthOnThread::BirthOnThread(const Location& location,
143 const ThreadData& current)
initial.commitd7cae122008-07-26 21:49:38144 : location_(location),
[email protected]b6b2b892011-12-04 07:19:10145 birth_thread_(&current) {
146}
147
148const Location BirthOnThread::location() const { return location_; }
149const ThreadData* BirthOnThread::birth_thread() const { return birth_thread_; }
initial.commitd7cae122008-07-26 21:49:38150
[email protected]8aa1e6e2011-12-14 01:36:48151void BirthOnThread::ToValue(const std::string& prefix,
152 base::DictionaryValue* dictionary) const {
153 dictionary->Set(prefix + "_location", location_.ToValue());
154 dictionary->Set(prefix + "_thread",
155 base::Value::CreateStringValue(birth_thread_->thread_name()));
156}
157
initial.commitd7cae122008-07-26 21:49:38158//------------------------------------------------------------------------------
[email protected]84baeca2011-10-24 18:55:16159Births::Births(const Location& location, const ThreadData& current)
160 : BirthOnThread(location, current),
[email protected]75b79202009-12-30 07:31:45161 birth_count_(1) { }
initial.commitd7cae122008-07-26 21:49:38162
[email protected]b6b2b892011-12-04 07:19:10163int Births::birth_count() const { return birth_count_; }
164
165void Births::RecordBirth() { ++birth_count_; }
166
167void Births::ForgetBirth() { --birth_count_; }
168
169void Births::Clear() { birth_count_ = 0; }
170
initial.commitd7cae122008-07-26 21:49:38171//------------------------------------------------------------------------------
[email protected]b6b2b892011-12-04 07:19:10172// ThreadData maintains the central data for all births and deaths on a single
173// thread.
initial.commitd7cae122008-07-26 21:49:38174
[email protected]b2a9bbd2011-10-31 22:36:21175// TODO(jar): We should pull all these static vars together, into a struct, and
176// optimize layout so that we benefit from locality of reference during accesses
177// to them.
178
179// A TLS slot which points to the ThreadData instance for the current thread. We
180// do a fake initialization here (zeroing out data), and then the real in-place
181// construction happens when we call tls_index_.Initialize().
182// static
[email protected]444b8a3c2012-01-30 16:52:09183base::ThreadLocalStorage::StaticSlot ThreadData::tls_index_ = TLS_INITIALIZER;
[email protected]b2a9bbd2011-10-31 22:36:21184
[email protected]b2a9bbd2011-10-31 22:36:21185// static
[email protected]9a88c902011-11-24 00:00:31186int ThreadData::worker_thread_data_creation_count_ = 0;
187
188// static
189int ThreadData::cleanup_count_ = 0;
[email protected]b2a9bbd2011-10-31 22:36:21190
191// static
192int ThreadData::incarnation_counter_ = 0;
193
initial.commitd7cae122008-07-26 21:49:38194// static
[email protected]84baeca2011-10-24 18:55:16195ThreadData* ThreadData::all_thread_data_list_head_ = NULL;
196
197// static
[email protected]26cdeb962011-11-20 04:17:07198ThreadData* ThreadData::first_retired_worker_ = NULL;
[email protected]84baeca2011-10-24 18:55:16199
initial.commitd7cae122008-07-26 21:49:38200// static
[email protected]9fc44162012-01-23 22:56:41201base::LazyInstance<base::Lock>::Leaky
[email protected]6de0fd1d2011-11-15 13:31:49202 ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER;
initial.commitd7cae122008-07-26 21:49:38203
204// static
205ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED;
206
[email protected]84baeca2011-10-24 18:55:16207ThreadData::ThreadData(const std::string& suggested_name)
[email protected]8aa1e6e2011-12-14 01:36:48208 : next_(NULL),
[email protected]26cdeb962011-11-20 04:17:07209 next_retired_worker_(NULL),
[email protected]8aa1e6e2011-12-14 01:36:48210 worker_thread_number_(0),
211 incarnation_count_for_pool_(-1) {
[email protected]84b57952011-10-15 23:52:45212 DCHECK_GE(suggested_name.size(), 0u);
213 thread_name_ = suggested_name;
[email protected]b2a9bbd2011-10-31 22:36:21214 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_.
[email protected]84b57952011-10-15 23:52:45215}
216
[email protected]26cdeb962011-11-20 04:17:07217ThreadData::ThreadData(int thread_number)
[email protected]8aa1e6e2011-12-14 01:36:48218 : next_(NULL),
[email protected]26cdeb962011-11-20 04:17:07219 next_retired_worker_(NULL),
[email protected]8aa1e6e2011-12-14 01:36:48220 worker_thread_number_(thread_number),
221 incarnation_count_for_pool_(-1) {
[email protected]26cdeb962011-11-20 04:17:07222 CHECK_GT(thread_number, 0);
223 base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number);
[email protected]63f5b0e2011-11-04 00:23:27224 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_.
[email protected]359d2bf2010-11-19 20:34:18225}
initial.commitd7cae122008-07-26 21:49:38226
[email protected]d4799a32010-09-28 22:54:58227ThreadData::~ThreadData() {}
228
[email protected]84baeca2011-10-24 18:55:16229void ThreadData::PushToHeadOfList() {
[email protected]b6b2b892011-12-04 07:19:10230 // Toss in a hint of randomness (atop the uniniitalized value).
[email protected]ff5e9422011-12-05 15:24:28231 (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_,
[email protected]a0447ff2011-12-04 21:14:05232 sizeof(random_number_));
[email protected]b6b2b892011-12-04 07:19:10233 random_number_ += static_cast<int32>(this - static_cast<ThreadData*>(0));
234 random_number_ ^= (Now() - TrackedTime()).InMilliseconds();
235
[email protected]84baeca2011-10-24 18:55:16236 DCHECK(!next_);
[email protected]77169a62011-11-14 20:36:46237 base::AutoLock lock(*list_lock_.Pointer());
[email protected]b2a9bbd2011-10-31 22:36:21238 incarnation_count_for_pool_ = incarnation_counter_;
[email protected]84baeca2011-10-24 18:55:16239 next_ = all_thread_data_list_head_;
240 all_thread_data_list_head_ = this;
241}
242
initial.commitd7cae122008-07-26 21:49:38243// static
[email protected]b6b2b892011-12-04 07:19:10244ThreadData* ThreadData::first() {
245 base::AutoLock lock(*list_lock_.Pointer());
246 return all_thread_data_list_head_;
247}
248
249ThreadData* ThreadData::next() const { return next_; }
250
251// static
[email protected]84b57952011-10-15 23:52:45252void ThreadData::InitializeThreadContext(const std::string& suggested_name) {
[email protected]b2a9bbd2011-10-31 22:36:21253 if (!Initialize()) // Always initialize if needed.
254 return;
255 ThreadData* current_thread_data =
256 reinterpret_cast<ThreadData*>(tls_index_.Get());
257 if (current_thread_data)
258 return; // Browser tests instigate this.
259 current_thread_data = new ThreadData(suggested_name);
[email protected]84baeca2011-10-24 18:55:16260 tls_index_.Set(current_thread_data);
[email protected]84b57952011-10-15 23:52:45261}
initial.commitd7cae122008-07-26 21:49:38262
[email protected]84b57952011-10-15 23:52:45263// static
264ThreadData* ThreadData::Get() {
265 if (!tls_index_.initialized())
266 return NULL; // For unittests only.
[email protected]84baeca2011-10-24 18:55:16267 ThreadData* registered = reinterpret_cast<ThreadData*>(tls_index_.Get());
268 if (registered)
269 return registered;
270
271 // We must be a worker thread, since we didn't pre-register.
272 ThreadData* worker_thread_data = NULL;
[email protected]9a88c902011-11-24 00:00:31273 int worker_thread_number = 0;
[email protected]84baeca2011-10-24 18:55:16274 {
[email protected]77169a62011-11-14 20:36:46275 base::AutoLock lock(*list_lock_.Pointer());
[email protected]26cdeb962011-11-20 04:17:07276 if (first_retired_worker_) {
277 worker_thread_data = first_retired_worker_;
278 first_retired_worker_ = first_retired_worker_->next_retired_worker_;
279 worker_thread_data->next_retired_worker_ = NULL;
[email protected]445029fb2011-11-18 17:03:33280 } else {
[email protected]9a88c902011-11-24 00:00:31281 worker_thread_number = ++worker_thread_data_creation_count_;
[email protected]84baeca2011-10-24 18:55:16282 }
initial.commitd7cae122008-07-26 21:49:38283 }
[email protected]84baeca2011-10-24 18:55:16284
285 // If we can't find a previously used instance, then we have to create one.
[email protected]9a88c902011-11-24 00:00:31286 if (!worker_thread_data) {
287 DCHECK_GT(worker_thread_number, 0);
288 worker_thread_data = new ThreadData(worker_thread_number);
289 }
[email protected]26cdeb962011-11-20 04:17:07290 DCHECK_GT(worker_thread_data->worker_thread_number_, 0);
[email protected]84baeca2011-10-24 18:55:16291
292 tls_index_.Set(worker_thread_data);
293 return worker_thread_data;
[email protected]84b57952011-10-15 23:52:45294}
295
296// static
[email protected]84baeca2011-10-24 18:55:16297void ThreadData::OnThreadTermination(void* thread_data) {
[email protected]8aa1e6e2011-12-14 01:36:48298 DCHECK(thread_data); // TLS should *never* call us with a NULL.
[email protected]9a88c902011-11-24 00:00:31299 // We must NOT do any allocations during this callback. There is a chance
300 // that the allocator is no longer active on this thread.
[email protected]84baeca2011-10-24 18:55:16301 if (!kTrackAllTaskObjects)
302 return; // Not compiled in.
[email protected]84baeca2011-10-24 18:55:16303 reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup();
[email protected]84baeca2011-10-24 18:55:16304}
305
[email protected]26cdeb962011-11-20 04:17:07306void ThreadData::OnThreadTerminationCleanup() {
[email protected]9a88c902011-11-24 00:00:31307 // The list_lock_ was created when we registered the callback, so it won't be
308 // allocated here despite the lazy reference.
[email protected]77169a62011-11-14 20:36:46309 base::AutoLock lock(*list_lock_.Pointer());
[email protected]b2a9bbd2011-10-31 22:36:21310 if (incarnation_counter_ != incarnation_count_for_pool_)
311 return; // ThreadData was constructed in an earlier unit test.
[email protected]9a88c902011-11-24 00:00:31312 ++cleanup_count_;
313 // Only worker threads need to be retired and reused.
314 if (!worker_thread_number_) {
315 return;
316 }
[email protected]26cdeb962011-11-20 04:17:07317 // We must NOT do any allocations during this callback.
318 // Using the simple linked lists avoids all allocations.
319 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL));
320 this->next_retired_worker_ = first_retired_worker_;
321 first_retired_worker_ = this;
initial.commitd7cae122008-07-26 21:49:38322}
323
initial.commitd7cae122008-07-26 21:49:38324// static
[email protected]b6b2b892011-12-04 07:19:10325base::DictionaryValue* ThreadData::ToValue(bool reset_max) {
[email protected]84baeca2011-10-24 18:55:16326 DataCollector collected_data; // Gather data.
[email protected]b6b2b892011-12-04 07:19:10327 // Request multiple calls to collected_data.Append() for all threads.
328 SendAllMaps(reset_max, &collected_data);
[email protected]84baeca2011-10-24 18:55:16329 collected_data.AddListOfLivingObjects(); // Add births that are still alive.
[email protected]84baeca2011-10-24 18:55:16330 base::DictionaryValue* dictionary = new base::DictionaryValue();
[email protected]8aa1e6e2011-12-14 01:36:48331 collected_data.ToValue(dictionary);
[email protected]84baeca2011-10-24 18:55:16332 return dictionary;
333}
334
[email protected]75b79202009-12-30 07:31:45335Births* ThreadData::TallyABirth(const Location& location) {
initial.commitd7cae122008-07-26 21:49:38336 BirthMap::iterator it = birth_map_.find(location);
[email protected]8aa1e6e2011-12-14 01:36:48337 Births* child;
[email protected]75b79202009-12-30 07:31:45338 if (it != birth_map_.end()) {
[email protected]8aa1e6e2011-12-14 01:36:48339 child = it->second;
340 child->RecordBirth();
341 } else {
342 child = new Births(location, *this); // Leak this.
343 // Lock since the map may get relocated now, and other threads sometimes
344 // snapshot it (but they lock before copying it).
345 base::AutoLock lock(map_lock_);
346 birth_map_[location] = child;
[email protected]75b79202009-12-30 07:31:45347 }
initial.commitd7cae122008-07-26 21:49:38348
[email protected]8aa1e6e2011-12-14 01:36:48349 if (kTrackParentChildLinks && status_ > PROFILING_ACTIVE &&
350 !parent_stack_.empty()) {
351 const Births* parent = parent_stack_.top();
352 ParentChildPair pair(parent, child);
353 if (parent_child_set_.find(pair) == parent_child_set_.end()) {
354 // Lock since the map may get relocated now, and other threads sometimes
355 // snapshot it (but they lock before copying it).
356 base::AutoLock lock(map_lock_);
357 parent_child_set_.insert(pair);
358 }
359 }
360
361 return child;
initial.commitd7cae122008-07-26 21:49:38362}
363
[email protected]84baeca2011-10-24 18:55:16364void ThreadData::TallyADeath(const Births& birth,
[email protected]c25db182011-11-11 22:40:27365 DurationInt queue_duration,
366 DurationInt run_duration) {
[email protected]b6b2b892011-12-04 07:19:10367 // Stir in some randomness, plus add constant in case durations are zero.
[email protected]8aa1e6e2011-12-14 01:36:48368 const DurationInt kSomePrimeNumber = 2147483647;
[email protected]b6b2b892011-12-04 07:19:10369 random_number_ += queue_duration + run_duration + kSomePrimeNumber;
370 // An address is going to have some randomness to it as well ;-).
371 random_number_ ^= static_cast<int32>(&birth - reinterpret_cast<Births*>(0));
372
[email protected]84baeca2011-10-24 18:55:16373 DeathMap::iterator it = death_map_.find(&birth);
374 DeathData* death_data;
initial.commitd7cae122008-07-26 21:49:38375 if (it != death_map_.end()) {
[email protected]84baeca2011-10-24 18:55:16376 death_data = &it->second;
377 } else {
[email protected]9a88c902011-11-24 00:00:31378 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now.
[email protected]84baeca2011-10-24 18:55:16379 death_data = &death_map_[&birth];
380 } // Release lock ASAP.
[email protected]b6b2b892011-12-04 07:19:10381 death_data->RecordDeath(queue_duration, run_duration, random_number_);
[email protected]8aa1e6e2011-12-14 01:36:48382
383 if (!kTrackParentChildLinks)
384 return;
385 if (!parent_stack_.empty()) { // We might get turned off.
386 DCHECK_EQ(parent_stack_.top(), &birth);
387 parent_stack_.pop();
388 }
initial.commitd7cae122008-07-26 21:49:38389}
390
391// static
[email protected]180c85e2011-07-26 18:25:16392Births* ThreadData::TallyABirthIfActive(const Location& location) {
[email protected]84baeca2011-10-24 18:55:16393 if (!kTrackAllTaskObjects)
394 return NULL; // Not compiled in.
395
[email protected]702a12d2012-02-10 19:43:42396 if (!TrackingStatus())
[email protected]84b57952011-10-15 23:52:45397 return NULL;
398 ThreadData* current_thread_data = Get();
399 if (!current_thread_data)
400 return NULL;
401 return current_thread_data->TallyABirth(location);
[email protected]180c85e2011-07-26 18:25:16402}
403
404// static
[email protected]b2a9bbd2011-10-31 22:36:21405void ThreadData::TallyRunOnNamedThreadIfTracking(
406 const base::TrackingInfo& completed_task,
407 const TrackedTime& start_of_run,
408 const TrackedTime& end_of_run) {
[email protected]84baeca2011-10-24 18:55:16409 if (!kTrackAllTaskObjects)
410 return; // Not compiled in.
411
[email protected]b2a9bbd2011-10-31 22:36:21412 // Even if we have been DEACTIVATED, we will process any pending births so
413 // that our data structures (which counted the outstanding births) remain
414 // consistent.
415 const Births* birth = completed_task.birth_tally;
416 if (!birth)
[email protected]84b57952011-10-15 23:52:45417 return;
418 ThreadData* current_thread_data = Get();
419 if (!current_thread_data)
420 return;
421
422 // To avoid conflating our stats with the delay duration in a PostDelayedTask,
423 // we identify such tasks, and replace their post_time with the time they
[email protected]b2a9bbd2011-10-31 22:36:21424 // were scheduled (requested?) to emerge from the delayed task queue. This
[email protected]84b57952011-10-15 23:52:45425 // means that queueing delay for such tasks will show how long they went
426 // unserviced, after they *could* be serviced. This is the same stat as we
427 // have for non-delayed tasks, and we consistently call it queueing delay.
[email protected]b2a9bbd2011-10-31 22:36:21428 TrackedTime effective_post_time = completed_task.delayed_run_time.is_null()
429 ? tracked_objects::TrackedTime(completed_task.time_posted)
430 : tracked_objects::TrackedTime(completed_task.delayed_run_time);
431
432 // Watch out for a race where status_ is changing, and hence one or both
[email protected]8aa1e6e2011-12-14 01:36:48433 // of start_of_run or end_of_run is zero. In that case, we didn't bother to
[email protected]b2a9bbd2011-10-31 22:36:21434 // get a time value since we "weren't tracking" and we were trying to be
435 // efficient by not calling for a genuine time value. For simplicity, we'll
436 // use a default zero duration when we can't calculate a true value.
[email protected]c25db182011-11-11 22:40:27437 DurationInt queue_duration = 0;
438 DurationInt run_duration = 0;
[email protected]b2a9bbd2011-10-31 22:36:21439 if (!start_of_run.is_null()) {
[email protected]c25db182011-11-11 22:40:27440 queue_duration = (start_of_run - effective_post_time).InMilliseconds();
[email protected]b2a9bbd2011-10-31 22:36:21441 if (!end_of_run.is_null())
[email protected]c25db182011-11-11 22:40:27442 run_duration = (end_of_run - start_of_run).InMilliseconds();
[email protected]b2a9bbd2011-10-31 22:36:21443 }
444 current_thread_data->TallyADeath(*birth, queue_duration, run_duration);
445}
446
447// static
448void ThreadData::TallyRunOnWorkerThreadIfTracking(
449 const Births* birth,
450 const TrackedTime& time_posted,
451 const TrackedTime& start_of_run,
452 const TrackedTime& end_of_run) {
453 if (!kTrackAllTaskObjects)
454 return; // Not compiled in.
455
456 // Even if we have been DEACTIVATED, we will process any pending births so
457 // that our data structures (which counted the outstanding births) remain
458 // consistent.
459 if (!birth)
460 return;
461
462 // TODO(jar): Support the option to coalesce all worker-thread activity under
463 // one ThreadData instance that uses locks to protect *all* access. This will
464 // reduce memory (making it provably bounded), but run incrementally slower
465 // (since we'll use locks on TallyBirth and TallyDeath). The good news is
466 // that the locks on TallyDeath will be *after* the worker thread has run, and
467 // hence nothing will be waiting for the completion (... besides some other
468 // thread that might like to run). Also, the worker threads tasks are
469 // generally longer, and hence the cost of the lock may perchance be amortized
470 // over the long task's lifetime.
471 ThreadData* current_thread_data = Get();
472 if (!current_thread_data)
473 return;
474
[email protected]c25db182011-11-11 22:40:27475 DurationInt queue_duration = 0;
476 DurationInt run_duration = 0;
[email protected]b2a9bbd2011-10-31 22:36:21477 if (!start_of_run.is_null()) {
[email protected]c25db182011-11-11 22:40:27478 queue_duration = (start_of_run - time_posted).InMilliseconds();
[email protected]b2a9bbd2011-10-31 22:36:21479 if (!end_of_run.is_null())
[email protected]c25db182011-11-11 22:40:27480 run_duration = (end_of_run - start_of_run).InMilliseconds();
[email protected]b2a9bbd2011-10-31 22:36:21481 }
[email protected]84baeca2011-10-24 18:55:16482 current_thread_data->TallyADeath(*birth, queue_duration, run_duration);
[email protected]180c85e2011-07-26 18:25:16483}
484
485// static
[email protected]dbe5d2072011-11-08 17:09:21486void ThreadData::TallyRunInAScopedRegionIfTracking(
487 const Births* birth,
488 const TrackedTime& start_of_run,
489 const TrackedTime& end_of_run) {
490 if (!kTrackAllTaskObjects)
491 return; // Not compiled in.
492
493 // Even if we have been DEACTIVATED, we will process any pending births so
494 // that our data structures (which counted the outstanding births) remain
495 // consistent.
496 if (!birth)
497 return;
498
499 ThreadData* current_thread_data = Get();
500 if (!current_thread_data)
501 return;
502
[email protected]c25db182011-11-11 22:40:27503 DurationInt queue_duration = 0;
[email protected]fd0a6452011-11-15 23:59:36504 DurationInt run_duration = 0;
505 if (!start_of_run.is_null() && !end_of_run.is_null())
506 run_duration = (end_of_run - start_of_run).InMilliseconds();
[email protected]dbe5d2072011-11-08 17:09:21507 current_thread_data->TallyADeath(*birth, queue_duration, run_duration);
508}
509
[email protected]b6b2b892011-12-04 07:19:10510const std::string ThreadData::thread_name() const { return thread_name_; }
initial.commitd7cae122008-07-26 21:49:38511
initial.commitd7cae122008-07-26 21:49:38512// This may be called from another thread.
[email protected]b6b2b892011-12-04 07:19:10513void ThreadData::SnapshotMaps(bool reset_max,
514 BirthMap* birth_map,
[email protected]8aa1e6e2011-12-14 01:36:48515 DeathMap* death_map,
516 ParentChildSet* parent_child_set) {
[email protected]9a88c902011-11-24 00:00:31517 base::AutoLock lock(map_lock_);
initial.commitd7cae122008-07-26 21:49:38518 for (BirthMap::const_iterator it = birth_map_.begin();
519 it != birth_map_.end(); ++it)
[email protected]b6b2b892011-12-04 07:19:10520 (*birth_map)[it->first] = it->second;
521 for (DeathMap::iterator it = death_map_.begin();
522 it != death_map_.end(); ++it) {
523 (*death_map)[it->first] = it->second;
524 if (reset_max)
525 it->second.ResetMax();
526 }
[email protected]8aa1e6e2011-12-14 01:36:48527
528 if (!kTrackParentChildLinks)
529 return;
530
531 for (ParentChildSet::iterator it = parent_child_set_.begin();
532 it != parent_child_set_.end(); ++it)
533 parent_child_set->insert(*it);
initial.commitd7cae122008-07-26 21:49:38534}
535
[email protected]b6b2b892011-12-04 07:19:10536// static
537void ThreadData::SendAllMaps(bool reset_max, class DataCollector* target) {
538 if (!kTrackAllTaskObjects)
539 return; // Not compiled in.
540 // Get an unchanging copy of a ThreadData list.
541 ThreadData* my_list = ThreadData::first();
542
543 // Gather data serially.
544 // This hackish approach *can* get some slighly corrupt tallies, as we are
545 // grabbing values without the protection of a lock, but it has the advantage
546 // of working even with threads that don't have message loops. If a user
547 // sees any strangeness, they can always just run their stats gathering a
548 // second time.
549 for (ThreadData* thread_data = my_list;
550 thread_data;
551 thread_data = thread_data->next()) {
552 // Get copy of data.
553 ThreadData::BirthMap birth_map;
554 ThreadData::DeathMap death_map;
[email protected]8aa1e6e2011-12-14 01:36:48555 ThreadData::ParentChildSet parent_child_set;
556 thread_data->SnapshotMaps(reset_max, &birth_map, &death_map,
557 &parent_child_set);
558 target->Append(*thread_data, birth_map, death_map, parent_child_set);
[email protected]b6b2b892011-12-04 07:19:10559 }
initial.commitd7cae122008-07-26 21:49:38560}
561
[email protected]75b79202009-12-30 07:31:45562// static
563void ThreadData::ResetAllThreadData() {
[email protected]84baeca2011-10-24 18:55:16564 ThreadData* my_list = first();
[email protected]75b79202009-12-30 07:31:45565
566 for (ThreadData* thread_data = my_list;
567 thread_data;
568 thread_data = thread_data->next())
569 thread_data->Reset();
570}
571
572void ThreadData::Reset() {
[email protected]9a88c902011-11-24 00:00:31573 base::AutoLock lock(map_lock_);
[email protected]75b79202009-12-30 07:31:45574 for (DeathMap::iterator it = death_map_.begin();
575 it != death_map_.end(); ++it)
576 it->second.Clear();
577 for (BirthMap::iterator it = birth_map_.begin();
578 it != birth_map_.end(); ++it)
579 it->second->Clear();
580}
581
[email protected]b2a9bbd2011-10-31 22:36:21582bool ThreadData::Initialize() {
[email protected]84baeca2011-10-24 18:55:16583 if (!kTrackAllTaskObjects)
584 return false; // Not compiled in.
[email protected]8aa1e6e2011-12-14 01:36:48585 if (status_ >= DEACTIVATED)
[email protected]94b555ee2011-11-15 21:50:36586 return true; // Someone else did the initialization.
587 // Due to racy lazy initialization in tests, we'll need to recheck status_
588 // after we acquire the lock.
589
590 // Ensure that we don't double initialize tls. We are called when single
591 // threaded in the product, but some tests may be racy and lazy about our
592 // initialization.
593 base::AutoLock lock(*list_lock_.Pointer());
[email protected]8aa1e6e2011-12-14 01:36:48594 if (status_ >= DEACTIVATED)
[email protected]94b555ee2011-11-15 21:50:36595 return true; // Someone raced in here and beat us.
596
[email protected]b2a9bbd2011-10-31 22:36:21597 // Perform the "real" TLS initialization now, and leave it intact through
[email protected]84baeca2011-10-24 18:55:16598 // process termination.
[email protected]94b555ee2011-11-15 21:50:36599 if (!tls_index_.initialized()) { // Testing may have initialized this.
[email protected]8aa1e6e2011-12-14 01:36:48600 DCHECK_EQ(status_, UNINITIALIZED);
[email protected]84baeca2011-10-24 18:55:16601 tls_index_.Initialize(&ThreadData::OnThreadTermination);
[email protected]94b555ee2011-11-15 21:50:36602 if (!tls_index_.initialized())
603 return false;
[email protected]8aa1e6e2011-12-14 01:36:48604 } else {
605 // TLS was initialzed for us earlier.
606 DCHECK_EQ(status_, DORMANT_DURING_TESTS);
[email protected]94b555ee2011-11-15 21:50:36607 }
[email protected]3f095c0a2011-10-31 15:32:08608
[email protected]94b555ee2011-11-15 21:50:36609 // Incarnation counter is only significant to testing, as it otherwise will
610 // never again change in this process.
[email protected]b2a9bbd2011-10-31 22:36:21611 ++incarnation_counter_;
[email protected]94b555ee2011-11-15 21:50:36612
613 // The lock is not critical for setting status_, but it doesn't hurt. It also
614 // ensures that if we have a racy initialization, that we'll bail as soon as
615 // we get the lock earlier in this method.
616 status_ = kInitialStartupState;
[email protected]8aa1e6e2011-12-14 01:36:48617 if (!kTrackParentChildLinks &&
618 kInitialStartupState == PROFILING_CHILDREN_ACTIVE)
619 status_ = PROFILING_ACTIVE;
[email protected]94b555ee2011-11-15 21:50:36620 DCHECK(status_ != UNINITIALIZED);
initial.commitd7cae122008-07-26 21:49:38621 return true;
622}
623
624// static
[email protected]702a12d2012-02-10 19:43:42625bool ThreadData::InitializeAndSetTrackingStatus(Status status) {
626 DCHECK_GE(status, DEACTIVATED);
627 DCHECK_LE(status, PROFILING_CHILDREN_ACTIVE);
628
[email protected]b2a9bbd2011-10-31 22:36:21629 if (!Initialize()) // No-op if already initialized.
630 return false; // Not compiled in.
631
[email protected]702a12d2012-02-10 19:43:42632 if (!kTrackParentChildLinks && status > DEACTIVATED)
633 status = PROFILING_ACTIVE;
634 status_ = status;
[email protected]b2a9bbd2011-10-31 22:36:21635 return true;
636}
637
638// static
[email protected]702a12d2012-02-10 19:43:42639ThreadData::Status ThreadData::status() {
640 return status_;
641}
642
643// static
644bool ThreadData::TrackingStatus() {
[email protected]8aa1e6e2011-12-14 01:36:48645 return status_ > DEACTIVATED;
initial.commitd7cae122008-07-26 21:49:38646}
647
648// static
[email protected]702a12d2012-02-10 19:43:42649bool ThreadData::TrackingParentChildStatus() {
[email protected]8aa1e6e2011-12-14 01:36:48650 return status_ >= PROFILING_CHILDREN_ACTIVE;
651}
652
653// static
654TrackedTime ThreadData::NowForStartOfRun(const Births* parent) {
655 if (kTrackParentChildLinks && parent && status_ > PROFILING_ACTIVE) {
656 ThreadData* current_thread_data = Get();
657 if (current_thread_data)
658 current_thread_data->parent_stack_.push(parent);
659 }
[email protected]dda97682011-11-14 05:24:07660 return Now();
661}
662
663// static
664TrackedTime ThreadData::NowForEndOfRun() {
665 return Now();
666}
667
668// static
[email protected]b2a9bbd2011-10-31 22:36:21669TrackedTime ThreadData::Now() {
[email protected]702a12d2012-02-10 19:43:42670 if (kTrackAllTaskObjects && TrackingStatus())
[email protected]b2a9bbd2011-10-31 22:36:21671 return TrackedTime::Now();
672 return TrackedTime(); // Super fast when disabled, or not compiled.
[email protected]84b57952011-10-15 23:52:45673}
initial.commitd7cae122008-07-26 21:49:38674
675// static
[email protected]9a88c902011-11-24 00:00:31676void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count) {
677 base::AutoLock lock(*list_lock_.Pointer());
678 if (worker_thread_data_creation_count_ == 0)
679 return; // We haven't really run much, and couldn't have leaked.
680 // Verify that we've at least shutdown/cleanup the major namesd threads. The
681 // caller should tell us how many thread shutdowns should have taken place by
682 // now.
683 return; // TODO(jar): until this is working on XP, don't run the real test.
684 CHECK_GT(cleanup_count_, major_threads_shutdown_count);
685}
686
687// static
[email protected]b2a9bbd2011-10-31 22:36:21688void ThreadData::ShutdownSingleThreadedCleanup(bool leak) {
[email protected]84baeca2011-10-24 18:55:16689 // This is only called from test code, where we need to cleanup so that
690 // additional tests can be run.
initial.commitd7cae122008-07-26 21:49:38691 // We must be single threaded... but be careful anyway.
[email protected]702a12d2012-02-10 19:43:42692 if (!InitializeAndSetTrackingStatus(DEACTIVATED))
initial.commitd7cae122008-07-26 21:49:38693 return;
694 ThreadData* thread_data_list;
695 {
[email protected]77169a62011-11-14 20:36:46696 base::AutoLock lock(*list_lock_.Pointer());
[email protected]84baeca2011-10-24 18:55:16697 thread_data_list = all_thread_data_list_head_;
698 all_thread_data_list_head_ = NULL;
[email protected]b2a9bbd2011-10-31 22:36:21699 ++incarnation_counter_;
[email protected]26cdeb962011-11-20 04:17:07700 // To be clean, break apart the retired worker list (though we leak them).
[email protected]b6b2b892011-12-04 07:19:10701 while (first_retired_worker_) {
[email protected]26cdeb962011-11-20 04:17:07702 ThreadData* worker = first_retired_worker_;
703 CHECK_GT(worker->worker_thread_number_, 0);
704 first_retired_worker_ = worker->next_retired_worker_;
705 worker->next_retired_worker_ = NULL;
706 }
initial.commitd7cae122008-07-26 21:49:38707 }
708
[email protected]b2a9bbd2011-10-31 22:36:21709 // Put most global static back in pristine shape.
[email protected]9a88c902011-11-24 00:00:31710 worker_thread_data_creation_count_ = 0;
711 cleanup_count_ = 0;
[email protected]b2a9bbd2011-10-31 22:36:21712 tls_index_.Set(NULL);
[email protected]8aa1e6e2011-12-14 01:36:48713 status_ = DORMANT_DURING_TESTS; // Almost UNINITIALIZED.
[email protected]b2a9bbd2011-10-31 22:36:21714
715 // To avoid any chance of racing in unit tests, which is the only place we
716 // call this function, we may sometimes leak all the data structures we
717 // recovered, as they may still be in use on threads from prior tests!
718 if (leak)
719 return;
720
721 // When we want to cleanup (on a single thread), here is what we do.
722
[email protected]84baeca2011-10-24 18:55:16723 // Do actual recursive delete in all ThreadData instances.
initial.commitd7cae122008-07-26 21:49:38724 while (thread_data_list) {
725 ThreadData* next_thread_data = thread_data_list;
726 thread_data_list = thread_data_list->next();
727
728 for (BirthMap::iterator it = next_thread_data->birth_map_.begin();
729 next_thread_data->birth_map_.end() != it; ++it)
730 delete it->second; // Delete the Birth Records.
initial.commitd7cae122008-07-26 21:49:38731 delete next_thread_data; // Includes all Death Records.
732 }
initial.commitd7cae122008-07-26 21:49:38733}
734
initial.commitd7cae122008-07-26 21:49:38735//------------------------------------------------------------------------------
736// Individual 3-tuple of birth (place and thread) along with death thread, and
737// the accumulated stats for instances (DeathData).
738
739Snapshot::Snapshot(const BirthOnThread& birth_on_thread,
740 const ThreadData& death_thread,
741 const DeathData& death_data)
742 : birth_(&birth_on_thread),
743 death_thread_(&death_thread),
744 death_data_(death_data) {
745}
746
747Snapshot::Snapshot(const BirthOnThread& birth_on_thread, int count)
748 : birth_(&birth_on_thread),
749 death_thread_(NULL),
750 death_data_(DeathData(count)) {
751}
752
753const std::string Snapshot::DeathThreadName() const {
754 if (death_thread_)
[email protected]84b57952011-10-15 23:52:45755 return death_thread_->thread_name();
initial.commitd7cae122008-07-26 21:49:38756 return "Still_Alive";
757}
758
[email protected]84baeca2011-10-24 18:55:16759base::DictionaryValue* Snapshot::ToValue() const {
760 base::DictionaryValue* dictionary = new base::DictionaryValue;
[email protected]8aa1e6e2011-12-14 01:36:48761 // TODO(jar): Switch the next two lines to:
762 // birth_->ToValue("birth", dictionary);
763 // ...but that will require fixing unit tests, and JS to take
764 // "birth_location" rather than "location"
[email protected]84baeca2011-10-24 18:55:16765 dictionary->Set("birth_thread",
766 base::Value::CreateStringValue(birth_->birth_thread()->thread_name()));
[email protected]8aa1e6e2011-12-14 01:36:48767 dictionary->Set("location", birth_->location().ToValue());
768
769 dictionary->Set("death_data", death_data_.ToValue());
[email protected]84baeca2011-10-24 18:55:16770 dictionary->Set("death_thread",
771 base::Value::CreateStringValue(DeathThreadName()));
[email protected]84baeca2011-10-24 18:55:16772 return dictionary;
773}
774
initial.commitd7cae122008-07-26 21:49:38775//------------------------------------------------------------------------------
776// DataCollector
777
[email protected]b6b2b892011-12-04 07:19:10778DataCollector::DataCollector() {}
initial.commitd7cae122008-07-26 21:49:38779
[email protected]d4799a32010-09-28 22:54:58780DataCollector::~DataCollector() {
781}
782
[email protected]8aa1e6e2011-12-14 01:36:48783void DataCollector::Append(const ThreadData& thread_data,
784 const ThreadData::BirthMap& birth_map,
785 const ThreadData::DeathMap& death_map,
786 const ThreadData::ParentChildSet& parent_child_set) {
initial.commitd7cae122008-07-26 21:49:38787 for (ThreadData::DeathMap::const_iterator it = death_map.begin();
788 it != death_map.end(); ++it) {
789 collection_.push_back(Snapshot(*it->first, thread_data, it->second));
790 global_birth_count_[it->first] -= it->first->birth_count();
791 }
792
793 for (ThreadData::BirthMap::const_iterator it = birth_map.begin();
794 it != birth_map.end(); ++it) {
795 global_birth_count_[it->second] += it->second->birth_count();
796 }
[email protected]8aa1e6e2011-12-14 01:36:48797
798 if (!kTrackParentChildLinks)
799 return;
800
801 for (ThreadData::ParentChildSet::const_iterator it = parent_child_set.begin();
802 it != parent_child_set.end(); ++it) {
803 parent_child_set_.insert(*it);
804 }
initial.commitd7cae122008-07-26 21:49:38805}
806
807DataCollector::Collection* DataCollector::collection() {
initial.commitd7cae122008-07-26 21:49:38808 return &collection_;
809}
810
811void DataCollector::AddListOfLivingObjects() {
initial.commitd7cae122008-07-26 21:49:38812 for (BirthCount::iterator it = global_birth_count_.begin();
813 it != global_birth_count_.end(); ++it) {
814 if (it->second > 0)
815 collection_.push_back(Snapshot(*it->first, it->second));
816 }
817}
818
[email protected]8aa1e6e2011-12-14 01:36:48819void DataCollector::ToValue(base::DictionaryValue* dictionary) const {
[email protected]84baeca2011-10-24 18:55:16820 base::ListValue* list = new base::ListValue;
821 for (size_t i = 0; i < collection_.size(); ++i) {
822 list->Append(collection_[i].ToValue());
823 }
[email protected]8aa1e6e2011-12-14 01:36:48824 dictionary->Set("list", list);
825
826 base::ListValue* descendants = new base::ListValue;
827 for (ThreadData::ParentChildSet::const_iterator it =
828 parent_child_set_.begin();
829 it != parent_child_set_.end();
830 ++it) {
831 base::DictionaryValue* parent_child = new base::DictionaryValue;
832 it->first->ToValue("parent", parent_child);
833 it->second->ToValue("child", parent_child);
834 descendants->Append(parent_child);
835 }
836 dictionary->Set("descendants", descendants);
[email protected]84baeca2011-10-24 18:55:16837}
838
initial.commitd7cae122008-07-26 21:49:38839} // namespace tracked_objects