[email protected] | 9fc4416 | 2012-01-23 22:56:41 | [diff] [blame] | 1 | // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
license.bot | bf09a50 | 2008-08-24 00:55:55 | [diff] [blame] | 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 4 | |
| 5 | #include "base/tracked_objects.h" |
| 6 | |
[email protected] | a5b94a9 | 2008-08-12 23:25:43 | [diff] [blame] | 7 | #include <math.h> |
| 8 | |
[email protected] | 34b2b00 | 2009-11-20 06:53:28 | [diff] [blame] | 9 | #include "base/format_macros.h" |
[email protected] | f539333 | 2009-06-03 15:01:29 | [diff] [blame] | 10 | #include "base/message_loop.h" |
[email protected] | f163393 | 2010-08-17 23:05:28 | [diff] [blame] | 11 | #include "base/stringprintf.h" |
[email protected] | a0447ff | 2011-12-04 21:14:05 | [diff] [blame] | 12 | #include "base/third_party/valgrind/memcheck.h" |
[email protected] | 34b9963 | 2011-01-01 01:01:06 | [diff] [blame] | 13 | #include "base/threading/thread_restrictions.h" |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 14 | #include "build/build_config.h" |
[email protected] | 445029fb | 2011-11-18 17:03:33 | [diff] [blame] | 15 | #include "base/port.h" |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 16 | |
[email protected] | e1acf6f | 2008-10-27 20:43:33 | [diff] [blame] | 17 | using base::TimeDelta; |
| 18 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 19 | namespace tracked_objects { |
| 20 | |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 21 | namespace { |
[email protected] | da9ccfb | 2012-01-28 00:34:40 | [diff] [blame] | 22 | |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 23 | // Flag to compile out almost all of the task tracking code. |
[email protected] | da9ccfb | 2012-01-28 00:34:40 | [diff] [blame] | 24 | const bool kTrackAllTaskObjects = true; |
[email protected] | 3f095c0a | 2011-10-31 15:32:08 | [diff] [blame] | 25 | |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 26 | // Flag to compile out parent-child link recording. |
[email protected] | da9ccfb | 2012-01-28 00:34:40 | [diff] [blame] | 27 | const bool kTrackParentChildLinks = false; |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 28 | |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 29 | // When ThreadData is first initialized, should we start in an ACTIVE state to |
| 30 | // record all of the startup-time tasks, or should we start up DEACTIVATED, so |
| 31 | // that we only record after parsing the command line flag --enable-tracking. |
| 32 | // Note that the flag may force either state, so this really controls only the |
| 33 | // period of time up until that flag is parsed. If there is no flag seen, then |
| 34 | // this state may prevail for much or all of the process lifetime. |
[email protected] | da9ccfb | 2012-01-28 00:34:40 | [diff] [blame] | 35 | const ThreadData::Status kInitialStartupState = |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 36 | ThreadData::PROFILING_CHILDREN_ACTIVE; |
[email protected] | da9ccfb | 2012-01-28 00:34:40 | [diff] [blame] | 37 | |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 38 | } // namespace |
[email protected] | 84b5795 | 2011-10-15 23:52:45 | [diff] [blame] | 39 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 40 | //------------------------------------------------------------------------------ |
[email protected] | 63f5b0e | 2011-11-04 00:23:27 | [diff] [blame] | 41 | // DeathData tallies durations when a death takes place. |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 42 | |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 43 | DeathData::DeathData() { |
| 44 | Clear(); |
| 45 | } |
| 46 | |
| 47 | DeathData::DeathData(int count) { |
| 48 | Clear(); |
| 49 | count_ = count; |
| 50 | } |
| 51 | |
[email protected] | 7ceb4448 | 2011-12-09 03:41:04 | [diff] [blame] | 52 | // TODO(jar): I need to see if this macro to optimize branching is worth using. |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 53 | // |
| 54 | // This macro has no branching, so it is surely fast, and is equivalent to: |
| 55 | // if (assign_it) |
| 56 | // target = source; |
| 57 | // We use a macro rather than a template to force this to inline. |
| 58 | // Related code for calculating max is discussed on the web. |
| 59 | #define CONDITIONAL_ASSIGN(assign_it, target, source) \ |
| 60 | ((target) ^= ((target) ^ (source)) & -static_cast<DurationInt>(assign_it)) |
| 61 | |
| 62 | void DeathData::RecordDeath(const DurationInt queue_duration, |
| 63 | const DurationInt run_duration, |
| 64 | int32 random_number) { |
[email protected] | 7ceb4448 | 2011-12-09 03:41:04 | [diff] [blame] | 65 | ++count_; |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 66 | queue_duration_sum_ += queue_duration; |
| 67 | run_duration_sum_ += run_duration; |
[email protected] | 7ceb4448 | 2011-12-09 03:41:04 | [diff] [blame] | 68 | |
| 69 | if (queue_duration_max_ < queue_duration) |
| 70 | queue_duration_max_ = queue_duration; |
| 71 | if (run_duration_max_ < run_duration) |
| 72 | run_duration_max_ = run_duration; |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 73 | |
| 74 | // Take a uniformly distributed sample over all durations ever supplied. |
| 75 | // The probability that we (instead) use this new sample is 1/count_. This |
| 76 | // results in a completely uniform selection of the sample. |
| 77 | // We ignore the fact that we correlated our selection of a sample of run |
| 78 | // and queue times. |
[email protected] | 7ceb4448 | 2011-12-09 03:41:04 | [diff] [blame] | 79 | if (0 == (random_number % count_)) { |
| 80 | queue_duration_sample_ = queue_duration; |
| 81 | run_duration_sample_ = run_duration; |
| 82 | } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 83 | } |
| 84 | |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 85 | int DeathData::count() const { return count_; } |
| 86 | |
| 87 | DurationInt DeathData::run_duration_sum() const { return run_duration_sum_; } |
| 88 | |
| 89 | DurationInt DeathData::run_duration_max() const { return run_duration_max_; } |
| 90 | |
| 91 | DurationInt DeathData::run_duration_sample() const { |
| 92 | return run_duration_sample_; |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 93 | } |
| 94 | |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 95 | DurationInt DeathData::queue_duration_sum() const { |
| 96 | return queue_duration_sum_; |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 97 | } |
| 98 | |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 99 | DurationInt DeathData::queue_duration_max() const { |
| 100 | return queue_duration_max_; |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 101 | } |
| 102 | |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 103 | DurationInt DeathData::queue_duration_sample() const { |
| 104 | return queue_duration_sample_; |
| 105 | } |
| 106 | |
| 107 | |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 108 | base::DictionaryValue* DeathData::ToValue() const { |
| 109 | base::DictionaryValue* dictionary = new base::DictionaryValue; |
| 110 | dictionary->Set("count", base::Value::CreateIntegerValue(count_)); |
| 111 | dictionary->Set("run_ms", |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 112 | base::Value::CreateIntegerValue(run_duration_sum())); |
[email protected] | 63f5b0e | 2011-11-04 00:23:27 | [diff] [blame] | 113 | dictionary->Set("run_ms_max", |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 114 | base::Value::CreateIntegerValue(run_duration_max())); |
| 115 | dictionary->Set("run_ms_sample", |
| 116 | base::Value::CreateIntegerValue(run_duration_sample())); |
| 117 | dictionary->Set("queue_ms", |
| 118 | base::Value::CreateIntegerValue(queue_duration_sum())); |
[email protected] | 63f5b0e | 2011-11-04 00:23:27 | [diff] [blame] | 119 | dictionary->Set("queue_ms_max", |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 120 | base::Value::CreateIntegerValue(queue_duration_max())); |
| 121 | dictionary->Set("queue_ms_sample", |
| 122 | base::Value::CreateIntegerValue(queue_duration_sample())); |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 123 | return dictionary; |
| 124 | } |
| 125 | |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 126 | void DeathData::ResetMax() { |
| 127 | run_duration_max_ = 0; |
| 128 | queue_duration_max_ = 0; |
| 129 | } |
| 130 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 131 | void DeathData::Clear() { |
| 132 | count_ = 0; |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 133 | run_duration_sum_ = 0; |
| 134 | run_duration_max_ = 0; |
| 135 | run_duration_sample_ = 0; |
| 136 | queue_duration_sum_ = 0; |
| 137 | queue_duration_max_ = 0; |
| 138 | queue_duration_sample_ = 0; |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 139 | } |
| 140 | |
| 141 | //------------------------------------------------------------------------------ |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 142 | BirthOnThread::BirthOnThread(const Location& location, |
| 143 | const ThreadData& current) |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 144 | : location_(location), |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 145 | birth_thread_(¤t) { |
| 146 | } |
| 147 | |
| 148 | const Location BirthOnThread::location() const { return location_; } |
| 149 | const ThreadData* BirthOnThread::birth_thread() const { return birth_thread_; } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 150 | |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 151 | void BirthOnThread::ToValue(const std::string& prefix, |
| 152 | base::DictionaryValue* dictionary) const { |
| 153 | dictionary->Set(prefix + "_location", location_.ToValue()); |
| 154 | dictionary->Set(prefix + "_thread", |
| 155 | base::Value::CreateStringValue(birth_thread_->thread_name())); |
| 156 | } |
| 157 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 158 | //------------------------------------------------------------------------------ |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 159 | Births::Births(const Location& location, const ThreadData& current) |
| 160 | : BirthOnThread(location, current), |
[email protected] | 75b7920 | 2009-12-30 07:31:45 | [diff] [blame] | 161 | birth_count_(1) { } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 162 | |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 163 | int Births::birth_count() const { return birth_count_; } |
| 164 | |
| 165 | void Births::RecordBirth() { ++birth_count_; } |
| 166 | |
| 167 | void Births::ForgetBirth() { --birth_count_; } |
| 168 | |
| 169 | void Births::Clear() { birth_count_ = 0; } |
| 170 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 171 | //------------------------------------------------------------------------------ |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 172 | // ThreadData maintains the central data for all births and deaths on a single |
| 173 | // thread. |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 174 | |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 175 | // TODO(jar): We should pull all these static vars together, into a struct, and |
| 176 | // optimize layout so that we benefit from locality of reference during accesses |
| 177 | // to them. |
| 178 | |
| 179 | // A TLS slot which points to the ThreadData instance for the current thread. We |
| 180 | // do a fake initialization here (zeroing out data), and then the real in-place |
| 181 | // construction happens when we call tls_index_.Initialize(). |
| 182 | // static |
[email protected] | 444b8a3c | 2012-01-30 16:52:09 | [diff] [blame] | 183 | base::ThreadLocalStorage::StaticSlot ThreadData::tls_index_ = TLS_INITIALIZER; |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 184 | |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 185 | // static |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 186 | int ThreadData::worker_thread_data_creation_count_ = 0; |
| 187 | |
| 188 | // static |
| 189 | int ThreadData::cleanup_count_ = 0; |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 190 | |
| 191 | // static |
| 192 | int ThreadData::incarnation_counter_ = 0; |
| 193 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 194 | // static |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 195 | ThreadData* ThreadData::all_thread_data_list_head_ = NULL; |
| 196 | |
| 197 | // static |
[email protected] | 26cdeb96 | 2011-11-20 04:17:07 | [diff] [blame] | 198 | ThreadData* ThreadData::first_retired_worker_ = NULL; |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 199 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 200 | // static |
[email protected] | 9fc4416 | 2012-01-23 22:56:41 | [diff] [blame] | 201 | base::LazyInstance<base::Lock>::Leaky |
[email protected] | 6de0fd1d | 2011-11-15 13:31:49 | [diff] [blame] | 202 | ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER; |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 203 | |
| 204 | // static |
| 205 | ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED; |
| 206 | |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 207 | ThreadData::ThreadData(const std::string& suggested_name) |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 208 | : next_(NULL), |
[email protected] | 26cdeb96 | 2011-11-20 04:17:07 | [diff] [blame] | 209 | next_retired_worker_(NULL), |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 210 | worker_thread_number_(0), |
| 211 | incarnation_count_for_pool_(-1) { |
[email protected] | 84b5795 | 2011-10-15 23:52:45 | [diff] [blame] | 212 | DCHECK_GE(suggested_name.size(), 0u); |
| 213 | thread_name_ = suggested_name; |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 214 | PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. |
[email protected] | 84b5795 | 2011-10-15 23:52:45 | [diff] [blame] | 215 | } |
| 216 | |
[email protected] | 26cdeb96 | 2011-11-20 04:17:07 | [diff] [blame] | 217 | ThreadData::ThreadData(int thread_number) |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 218 | : next_(NULL), |
[email protected] | 26cdeb96 | 2011-11-20 04:17:07 | [diff] [blame] | 219 | next_retired_worker_(NULL), |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 220 | worker_thread_number_(thread_number), |
| 221 | incarnation_count_for_pool_(-1) { |
[email protected] | 26cdeb96 | 2011-11-20 04:17:07 | [diff] [blame] | 222 | CHECK_GT(thread_number, 0); |
| 223 | base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number); |
[email protected] | 63f5b0e | 2011-11-04 00:23:27 | [diff] [blame] | 224 | PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. |
[email protected] | 359d2bf | 2010-11-19 20:34:18 | [diff] [blame] | 225 | } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 226 | |
[email protected] | d4799a3 | 2010-09-28 22:54:58 | [diff] [blame] | 227 | ThreadData::~ThreadData() {} |
| 228 | |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 229 | void ThreadData::PushToHeadOfList() { |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 230 | // Toss in a hint of randomness (atop the uniniitalized value). |
[email protected] | ff5e942 | 2011-12-05 15:24:28 | [diff] [blame] | 231 | (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_, |
[email protected] | a0447ff | 2011-12-04 21:14:05 | [diff] [blame] | 232 | sizeof(random_number_)); |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 233 | random_number_ += static_cast<int32>(this - static_cast<ThreadData*>(0)); |
| 234 | random_number_ ^= (Now() - TrackedTime()).InMilliseconds(); |
| 235 | |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 236 | DCHECK(!next_); |
[email protected] | 77169a6 | 2011-11-14 20:36:46 | [diff] [blame] | 237 | base::AutoLock lock(*list_lock_.Pointer()); |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 238 | incarnation_count_for_pool_ = incarnation_counter_; |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 239 | next_ = all_thread_data_list_head_; |
| 240 | all_thread_data_list_head_ = this; |
| 241 | } |
| 242 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 243 | // static |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 244 | ThreadData* ThreadData::first() { |
| 245 | base::AutoLock lock(*list_lock_.Pointer()); |
| 246 | return all_thread_data_list_head_; |
| 247 | } |
| 248 | |
| 249 | ThreadData* ThreadData::next() const { return next_; } |
| 250 | |
| 251 | // static |
[email protected] | 84b5795 | 2011-10-15 23:52:45 | [diff] [blame] | 252 | void ThreadData::InitializeThreadContext(const std::string& suggested_name) { |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 253 | if (!Initialize()) // Always initialize if needed. |
| 254 | return; |
| 255 | ThreadData* current_thread_data = |
| 256 | reinterpret_cast<ThreadData*>(tls_index_.Get()); |
| 257 | if (current_thread_data) |
| 258 | return; // Browser tests instigate this. |
| 259 | current_thread_data = new ThreadData(suggested_name); |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 260 | tls_index_.Set(current_thread_data); |
[email protected] | 84b5795 | 2011-10-15 23:52:45 | [diff] [blame] | 261 | } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 262 | |
[email protected] | 84b5795 | 2011-10-15 23:52:45 | [diff] [blame] | 263 | // static |
| 264 | ThreadData* ThreadData::Get() { |
| 265 | if (!tls_index_.initialized()) |
| 266 | return NULL; // For unittests only. |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 267 | ThreadData* registered = reinterpret_cast<ThreadData*>(tls_index_.Get()); |
| 268 | if (registered) |
| 269 | return registered; |
| 270 | |
| 271 | // We must be a worker thread, since we didn't pre-register. |
| 272 | ThreadData* worker_thread_data = NULL; |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 273 | int worker_thread_number = 0; |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 274 | { |
[email protected] | 77169a6 | 2011-11-14 20:36:46 | [diff] [blame] | 275 | base::AutoLock lock(*list_lock_.Pointer()); |
[email protected] | 26cdeb96 | 2011-11-20 04:17:07 | [diff] [blame] | 276 | if (first_retired_worker_) { |
| 277 | worker_thread_data = first_retired_worker_; |
| 278 | first_retired_worker_ = first_retired_worker_->next_retired_worker_; |
| 279 | worker_thread_data->next_retired_worker_ = NULL; |
[email protected] | 445029fb | 2011-11-18 17:03:33 | [diff] [blame] | 280 | } else { |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 281 | worker_thread_number = ++worker_thread_data_creation_count_; |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 282 | } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 283 | } |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 284 | |
| 285 | // If we can't find a previously used instance, then we have to create one. |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 286 | if (!worker_thread_data) { |
| 287 | DCHECK_GT(worker_thread_number, 0); |
| 288 | worker_thread_data = new ThreadData(worker_thread_number); |
| 289 | } |
[email protected] | 26cdeb96 | 2011-11-20 04:17:07 | [diff] [blame] | 290 | DCHECK_GT(worker_thread_data->worker_thread_number_, 0); |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 291 | |
| 292 | tls_index_.Set(worker_thread_data); |
| 293 | return worker_thread_data; |
[email protected] | 84b5795 | 2011-10-15 23:52:45 | [diff] [blame] | 294 | } |
| 295 | |
| 296 | // static |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 297 | void ThreadData::OnThreadTermination(void* thread_data) { |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 298 | DCHECK(thread_data); // TLS should *never* call us with a NULL. |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 299 | // We must NOT do any allocations during this callback. There is a chance |
| 300 | // that the allocator is no longer active on this thread. |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 301 | if (!kTrackAllTaskObjects) |
| 302 | return; // Not compiled in. |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 303 | reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup(); |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 304 | } |
| 305 | |
[email protected] | 26cdeb96 | 2011-11-20 04:17:07 | [diff] [blame] | 306 | void ThreadData::OnThreadTerminationCleanup() { |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 307 | // The list_lock_ was created when we registered the callback, so it won't be |
| 308 | // allocated here despite the lazy reference. |
[email protected] | 77169a6 | 2011-11-14 20:36:46 | [diff] [blame] | 309 | base::AutoLock lock(*list_lock_.Pointer()); |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 310 | if (incarnation_counter_ != incarnation_count_for_pool_) |
| 311 | return; // ThreadData was constructed in an earlier unit test. |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 312 | ++cleanup_count_; |
| 313 | // Only worker threads need to be retired and reused. |
| 314 | if (!worker_thread_number_) { |
| 315 | return; |
| 316 | } |
[email protected] | 26cdeb96 | 2011-11-20 04:17:07 | [diff] [blame] | 317 | // We must NOT do any allocations during this callback. |
| 318 | // Using the simple linked lists avoids all allocations. |
| 319 | DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); |
| 320 | this->next_retired_worker_ = first_retired_worker_; |
| 321 | first_retired_worker_ = this; |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 322 | } |
| 323 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 324 | // static |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 325 | base::DictionaryValue* ThreadData::ToValue(bool reset_max) { |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 326 | DataCollector collected_data; // Gather data. |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 327 | // Request multiple calls to collected_data.Append() for all threads. |
| 328 | SendAllMaps(reset_max, &collected_data); |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 329 | collected_data.AddListOfLivingObjects(); // Add births that are still alive. |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 330 | base::DictionaryValue* dictionary = new base::DictionaryValue(); |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 331 | collected_data.ToValue(dictionary); |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 332 | return dictionary; |
| 333 | } |
| 334 | |
[email protected] | 75b7920 | 2009-12-30 07:31:45 | [diff] [blame] | 335 | Births* ThreadData::TallyABirth(const Location& location) { |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 336 | BirthMap::iterator it = birth_map_.find(location); |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 337 | Births* child; |
[email protected] | 75b7920 | 2009-12-30 07:31:45 | [diff] [blame] | 338 | if (it != birth_map_.end()) { |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 339 | child = it->second; |
| 340 | child->RecordBirth(); |
| 341 | } else { |
| 342 | child = new Births(location, *this); // Leak this. |
| 343 | // Lock since the map may get relocated now, and other threads sometimes |
| 344 | // snapshot it (but they lock before copying it). |
| 345 | base::AutoLock lock(map_lock_); |
| 346 | birth_map_[location] = child; |
[email protected] | 75b7920 | 2009-12-30 07:31:45 | [diff] [blame] | 347 | } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 348 | |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 349 | if (kTrackParentChildLinks && status_ > PROFILING_ACTIVE && |
| 350 | !parent_stack_.empty()) { |
| 351 | const Births* parent = parent_stack_.top(); |
| 352 | ParentChildPair pair(parent, child); |
| 353 | if (parent_child_set_.find(pair) == parent_child_set_.end()) { |
| 354 | // Lock since the map may get relocated now, and other threads sometimes |
| 355 | // snapshot it (but they lock before copying it). |
| 356 | base::AutoLock lock(map_lock_); |
| 357 | parent_child_set_.insert(pair); |
| 358 | } |
| 359 | } |
| 360 | |
| 361 | return child; |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 362 | } |
| 363 | |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 364 | void ThreadData::TallyADeath(const Births& birth, |
[email protected] | c25db18 | 2011-11-11 22:40:27 | [diff] [blame] | 365 | DurationInt queue_duration, |
| 366 | DurationInt run_duration) { |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 367 | // Stir in some randomness, plus add constant in case durations are zero. |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 368 | const DurationInt kSomePrimeNumber = 2147483647; |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 369 | random_number_ += queue_duration + run_duration + kSomePrimeNumber; |
| 370 | // An address is going to have some randomness to it as well ;-). |
| 371 | random_number_ ^= static_cast<int32>(&birth - reinterpret_cast<Births*>(0)); |
| 372 | |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 373 | DeathMap::iterator it = death_map_.find(&birth); |
| 374 | DeathData* death_data; |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 375 | if (it != death_map_.end()) { |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 376 | death_data = &it->second; |
| 377 | } else { |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 378 | base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 379 | death_data = &death_map_[&birth]; |
| 380 | } // Release lock ASAP. |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 381 | death_data->RecordDeath(queue_duration, run_duration, random_number_); |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 382 | |
| 383 | if (!kTrackParentChildLinks) |
| 384 | return; |
| 385 | if (!parent_stack_.empty()) { // We might get turned off. |
| 386 | DCHECK_EQ(parent_stack_.top(), &birth); |
| 387 | parent_stack_.pop(); |
| 388 | } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 389 | } |
| 390 | |
| 391 | // static |
[email protected] | 180c85e | 2011-07-26 18:25:16 | [diff] [blame] | 392 | Births* ThreadData::TallyABirthIfActive(const Location& location) { |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 393 | if (!kTrackAllTaskObjects) |
| 394 | return NULL; // Not compiled in. |
| 395 | |
[email protected] | 702a12d | 2012-02-10 19:43:42 | [diff] [blame] | 396 | if (!TrackingStatus()) |
[email protected] | 84b5795 | 2011-10-15 23:52:45 | [diff] [blame] | 397 | return NULL; |
| 398 | ThreadData* current_thread_data = Get(); |
| 399 | if (!current_thread_data) |
| 400 | return NULL; |
| 401 | return current_thread_data->TallyABirth(location); |
[email protected] | 180c85e | 2011-07-26 18:25:16 | [diff] [blame] | 402 | } |
| 403 | |
| 404 | // static |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 405 | void ThreadData::TallyRunOnNamedThreadIfTracking( |
| 406 | const base::TrackingInfo& completed_task, |
| 407 | const TrackedTime& start_of_run, |
| 408 | const TrackedTime& end_of_run) { |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 409 | if (!kTrackAllTaskObjects) |
| 410 | return; // Not compiled in. |
| 411 | |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 412 | // Even if we have been DEACTIVATED, we will process any pending births so |
| 413 | // that our data structures (which counted the outstanding births) remain |
| 414 | // consistent. |
| 415 | const Births* birth = completed_task.birth_tally; |
| 416 | if (!birth) |
[email protected] | 84b5795 | 2011-10-15 23:52:45 | [diff] [blame] | 417 | return; |
| 418 | ThreadData* current_thread_data = Get(); |
| 419 | if (!current_thread_data) |
| 420 | return; |
| 421 | |
| 422 | // To avoid conflating our stats with the delay duration in a PostDelayedTask, |
| 423 | // we identify such tasks, and replace their post_time with the time they |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 424 | // were scheduled (requested?) to emerge from the delayed task queue. This |
[email protected] | 84b5795 | 2011-10-15 23:52:45 | [diff] [blame] | 425 | // means that queueing delay for such tasks will show how long they went |
| 426 | // unserviced, after they *could* be serviced. This is the same stat as we |
| 427 | // have for non-delayed tasks, and we consistently call it queueing delay. |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 428 | TrackedTime effective_post_time = completed_task.delayed_run_time.is_null() |
| 429 | ? tracked_objects::TrackedTime(completed_task.time_posted) |
| 430 | : tracked_objects::TrackedTime(completed_task.delayed_run_time); |
| 431 | |
| 432 | // Watch out for a race where status_ is changing, and hence one or both |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 433 | // of start_of_run or end_of_run is zero. In that case, we didn't bother to |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 434 | // get a time value since we "weren't tracking" and we were trying to be |
| 435 | // efficient by not calling for a genuine time value. For simplicity, we'll |
| 436 | // use a default zero duration when we can't calculate a true value. |
[email protected] | c25db18 | 2011-11-11 22:40:27 | [diff] [blame] | 437 | DurationInt queue_duration = 0; |
| 438 | DurationInt run_duration = 0; |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 439 | if (!start_of_run.is_null()) { |
[email protected] | c25db18 | 2011-11-11 22:40:27 | [diff] [blame] | 440 | queue_duration = (start_of_run - effective_post_time).InMilliseconds(); |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 441 | if (!end_of_run.is_null()) |
[email protected] | c25db18 | 2011-11-11 22:40:27 | [diff] [blame] | 442 | run_duration = (end_of_run - start_of_run).InMilliseconds(); |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 443 | } |
| 444 | current_thread_data->TallyADeath(*birth, queue_duration, run_duration); |
| 445 | } |
| 446 | |
| 447 | // static |
| 448 | void ThreadData::TallyRunOnWorkerThreadIfTracking( |
| 449 | const Births* birth, |
| 450 | const TrackedTime& time_posted, |
| 451 | const TrackedTime& start_of_run, |
| 452 | const TrackedTime& end_of_run) { |
| 453 | if (!kTrackAllTaskObjects) |
| 454 | return; // Not compiled in. |
| 455 | |
| 456 | // Even if we have been DEACTIVATED, we will process any pending births so |
| 457 | // that our data structures (which counted the outstanding births) remain |
| 458 | // consistent. |
| 459 | if (!birth) |
| 460 | return; |
| 461 | |
| 462 | // TODO(jar): Support the option to coalesce all worker-thread activity under |
| 463 | // one ThreadData instance that uses locks to protect *all* access. This will |
| 464 | // reduce memory (making it provably bounded), but run incrementally slower |
| 465 | // (since we'll use locks on TallyBirth and TallyDeath). The good news is |
| 466 | // that the locks on TallyDeath will be *after* the worker thread has run, and |
| 467 | // hence nothing will be waiting for the completion (... besides some other |
| 468 | // thread that might like to run). Also, the worker threads tasks are |
| 469 | // generally longer, and hence the cost of the lock may perchance be amortized |
| 470 | // over the long task's lifetime. |
| 471 | ThreadData* current_thread_data = Get(); |
| 472 | if (!current_thread_data) |
| 473 | return; |
| 474 | |
[email protected] | c25db18 | 2011-11-11 22:40:27 | [diff] [blame] | 475 | DurationInt queue_duration = 0; |
| 476 | DurationInt run_duration = 0; |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 477 | if (!start_of_run.is_null()) { |
[email protected] | c25db18 | 2011-11-11 22:40:27 | [diff] [blame] | 478 | queue_duration = (start_of_run - time_posted).InMilliseconds(); |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 479 | if (!end_of_run.is_null()) |
[email protected] | c25db18 | 2011-11-11 22:40:27 | [diff] [blame] | 480 | run_duration = (end_of_run - start_of_run).InMilliseconds(); |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 481 | } |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 482 | current_thread_data->TallyADeath(*birth, queue_duration, run_duration); |
[email protected] | 180c85e | 2011-07-26 18:25:16 | [diff] [blame] | 483 | } |
| 484 | |
| 485 | // static |
[email protected] | dbe5d207 | 2011-11-08 17:09:21 | [diff] [blame] | 486 | void ThreadData::TallyRunInAScopedRegionIfTracking( |
| 487 | const Births* birth, |
| 488 | const TrackedTime& start_of_run, |
| 489 | const TrackedTime& end_of_run) { |
| 490 | if (!kTrackAllTaskObjects) |
| 491 | return; // Not compiled in. |
| 492 | |
| 493 | // Even if we have been DEACTIVATED, we will process any pending births so |
| 494 | // that our data structures (which counted the outstanding births) remain |
| 495 | // consistent. |
| 496 | if (!birth) |
| 497 | return; |
| 498 | |
| 499 | ThreadData* current_thread_data = Get(); |
| 500 | if (!current_thread_data) |
| 501 | return; |
| 502 | |
[email protected] | c25db18 | 2011-11-11 22:40:27 | [diff] [blame] | 503 | DurationInt queue_duration = 0; |
[email protected] | fd0a645 | 2011-11-15 23:59:36 | [diff] [blame] | 504 | DurationInt run_duration = 0; |
| 505 | if (!start_of_run.is_null() && !end_of_run.is_null()) |
| 506 | run_duration = (end_of_run - start_of_run).InMilliseconds(); |
[email protected] | dbe5d207 | 2011-11-08 17:09:21 | [diff] [blame] | 507 | current_thread_data->TallyADeath(*birth, queue_duration, run_duration); |
| 508 | } |
| 509 | |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 510 | const std::string ThreadData::thread_name() const { return thread_name_; } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 511 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 512 | // This may be called from another thread. |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 513 | void ThreadData::SnapshotMaps(bool reset_max, |
| 514 | BirthMap* birth_map, |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 515 | DeathMap* death_map, |
| 516 | ParentChildSet* parent_child_set) { |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 517 | base::AutoLock lock(map_lock_); |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 518 | for (BirthMap::const_iterator it = birth_map_.begin(); |
| 519 | it != birth_map_.end(); ++it) |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 520 | (*birth_map)[it->first] = it->second; |
| 521 | for (DeathMap::iterator it = death_map_.begin(); |
| 522 | it != death_map_.end(); ++it) { |
| 523 | (*death_map)[it->first] = it->second; |
| 524 | if (reset_max) |
| 525 | it->second.ResetMax(); |
| 526 | } |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 527 | |
| 528 | if (!kTrackParentChildLinks) |
| 529 | return; |
| 530 | |
| 531 | for (ParentChildSet::iterator it = parent_child_set_.begin(); |
| 532 | it != parent_child_set_.end(); ++it) |
| 533 | parent_child_set->insert(*it); |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 534 | } |
| 535 | |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 536 | // static |
| 537 | void ThreadData::SendAllMaps(bool reset_max, class DataCollector* target) { |
| 538 | if (!kTrackAllTaskObjects) |
| 539 | return; // Not compiled in. |
| 540 | // Get an unchanging copy of a ThreadData list. |
| 541 | ThreadData* my_list = ThreadData::first(); |
| 542 | |
| 543 | // Gather data serially. |
| 544 | // This hackish approach *can* get some slighly corrupt tallies, as we are |
| 545 | // grabbing values without the protection of a lock, but it has the advantage |
| 546 | // of working even with threads that don't have message loops. If a user |
| 547 | // sees any strangeness, they can always just run their stats gathering a |
| 548 | // second time. |
| 549 | for (ThreadData* thread_data = my_list; |
| 550 | thread_data; |
| 551 | thread_data = thread_data->next()) { |
| 552 | // Get copy of data. |
| 553 | ThreadData::BirthMap birth_map; |
| 554 | ThreadData::DeathMap death_map; |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 555 | ThreadData::ParentChildSet parent_child_set; |
| 556 | thread_data->SnapshotMaps(reset_max, &birth_map, &death_map, |
| 557 | &parent_child_set); |
| 558 | target->Append(*thread_data, birth_map, death_map, parent_child_set); |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 559 | } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 560 | } |
| 561 | |
[email protected] | 75b7920 | 2009-12-30 07:31:45 | [diff] [blame] | 562 | // static |
| 563 | void ThreadData::ResetAllThreadData() { |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 564 | ThreadData* my_list = first(); |
[email protected] | 75b7920 | 2009-12-30 07:31:45 | [diff] [blame] | 565 | |
| 566 | for (ThreadData* thread_data = my_list; |
| 567 | thread_data; |
| 568 | thread_data = thread_data->next()) |
| 569 | thread_data->Reset(); |
| 570 | } |
| 571 | |
| 572 | void ThreadData::Reset() { |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 573 | base::AutoLock lock(map_lock_); |
[email protected] | 75b7920 | 2009-12-30 07:31:45 | [diff] [blame] | 574 | for (DeathMap::iterator it = death_map_.begin(); |
| 575 | it != death_map_.end(); ++it) |
| 576 | it->second.Clear(); |
| 577 | for (BirthMap::iterator it = birth_map_.begin(); |
| 578 | it != birth_map_.end(); ++it) |
| 579 | it->second->Clear(); |
| 580 | } |
| 581 | |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 582 | bool ThreadData::Initialize() { |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 583 | if (!kTrackAllTaskObjects) |
| 584 | return false; // Not compiled in. |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 585 | if (status_ >= DEACTIVATED) |
[email protected] | 94b555ee | 2011-11-15 21:50:36 | [diff] [blame] | 586 | return true; // Someone else did the initialization. |
| 587 | // Due to racy lazy initialization in tests, we'll need to recheck status_ |
| 588 | // after we acquire the lock. |
| 589 | |
| 590 | // Ensure that we don't double initialize tls. We are called when single |
| 591 | // threaded in the product, but some tests may be racy and lazy about our |
| 592 | // initialization. |
| 593 | base::AutoLock lock(*list_lock_.Pointer()); |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 594 | if (status_ >= DEACTIVATED) |
[email protected] | 94b555ee | 2011-11-15 21:50:36 | [diff] [blame] | 595 | return true; // Someone raced in here and beat us. |
| 596 | |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 597 | // Perform the "real" TLS initialization now, and leave it intact through |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 598 | // process termination. |
[email protected] | 94b555ee | 2011-11-15 21:50:36 | [diff] [blame] | 599 | if (!tls_index_.initialized()) { // Testing may have initialized this. |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 600 | DCHECK_EQ(status_, UNINITIALIZED); |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 601 | tls_index_.Initialize(&ThreadData::OnThreadTermination); |
[email protected] | 94b555ee | 2011-11-15 21:50:36 | [diff] [blame] | 602 | if (!tls_index_.initialized()) |
| 603 | return false; |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 604 | } else { |
| 605 | // TLS was initialzed for us earlier. |
| 606 | DCHECK_EQ(status_, DORMANT_DURING_TESTS); |
[email protected] | 94b555ee | 2011-11-15 21:50:36 | [diff] [blame] | 607 | } |
[email protected] | 3f095c0a | 2011-10-31 15:32:08 | [diff] [blame] | 608 | |
[email protected] | 94b555ee | 2011-11-15 21:50:36 | [diff] [blame] | 609 | // Incarnation counter is only significant to testing, as it otherwise will |
| 610 | // never again change in this process. |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 611 | ++incarnation_counter_; |
[email protected] | 94b555ee | 2011-11-15 21:50:36 | [diff] [blame] | 612 | |
| 613 | // The lock is not critical for setting status_, but it doesn't hurt. It also |
| 614 | // ensures that if we have a racy initialization, that we'll bail as soon as |
| 615 | // we get the lock earlier in this method. |
| 616 | status_ = kInitialStartupState; |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 617 | if (!kTrackParentChildLinks && |
| 618 | kInitialStartupState == PROFILING_CHILDREN_ACTIVE) |
| 619 | status_ = PROFILING_ACTIVE; |
[email protected] | 94b555ee | 2011-11-15 21:50:36 | [diff] [blame] | 620 | DCHECK(status_ != UNINITIALIZED); |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 621 | return true; |
| 622 | } |
| 623 | |
| 624 | // static |
[email protected] | 702a12d | 2012-02-10 19:43:42 | [diff] [blame] | 625 | bool ThreadData::InitializeAndSetTrackingStatus(Status status) { |
| 626 | DCHECK_GE(status, DEACTIVATED); |
| 627 | DCHECK_LE(status, PROFILING_CHILDREN_ACTIVE); |
| 628 | |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 629 | if (!Initialize()) // No-op if already initialized. |
| 630 | return false; // Not compiled in. |
| 631 | |
[email protected] | 702a12d | 2012-02-10 19:43:42 | [diff] [blame] | 632 | if (!kTrackParentChildLinks && status > DEACTIVATED) |
| 633 | status = PROFILING_ACTIVE; |
| 634 | status_ = status; |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 635 | return true; |
| 636 | } |
| 637 | |
| 638 | // static |
[email protected] | 702a12d | 2012-02-10 19:43:42 | [diff] [blame] | 639 | ThreadData::Status ThreadData::status() { |
| 640 | return status_; |
| 641 | } |
| 642 | |
| 643 | // static |
| 644 | bool ThreadData::TrackingStatus() { |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 645 | return status_ > DEACTIVATED; |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 646 | } |
| 647 | |
| 648 | // static |
[email protected] | 702a12d | 2012-02-10 19:43:42 | [diff] [blame] | 649 | bool ThreadData::TrackingParentChildStatus() { |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 650 | return status_ >= PROFILING_CHILDREN_ACTIVE; |
| 651 | } |
| 652 | |
| 653 | // static |
| 654 | TrackedTime ThreadData::NowForStartOfRun(const Births* parent) { |
| 655 | if (kTrackParentChildLinks && parent && status_ > PROFILING_ACTIVE) { |
| 656 | ThreadData* current_thread_data = Get(); |
| 657 | if (current_thread_data) |
| 658 | current_thread_data->parent_stack_.push(parent); |
| 659 | } |
[email protected] | dda9768 | 2011-11-14 05:24:07 | [diff] [blame] | 660 | return Now(); |
| 661 | } |
| 662 | |
| 663 | // static |
| 664 | TrackedTime ThreadData::NowForEndOfRun() { |
| 665 | return Now(); |
| 666 | } |
| 667 | |
| 668 | // static |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 669 | TrackedTime ThreadData::Now() { |
[email protected] | 702a12d | 2012-02-10 19:43:42 | [diff] [blame] | 670 | if (kTrackAllTaskObjects && TrackingStatus()) |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 671 | return TrackedTime::Now(); |
| 672 | return TrackedTime(); // Super fast when disabled, or not compiled. |
[email protected] | 84b5795 | 2011-10-15 23:52:45 | [diff] [blame] | 673 | } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 674 | |
| 675 | // static |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 676 | void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count) { |
| 677 | base::AutoLock lock(*list_lock_.Pointer()); |
| 678 | if (worker_thread_data_creation_count_ == 0) |
| 679 | return; // We haven't really run much, and couldn't have leaked. |
| 680 | // Verify that we've at least shutdown/cleanup the major namesd threads. The |
| 681 | // caller should tell us how many thread shutdowns should have taken place by |
| 682 | // now. |
| 683 | return; // TODO(jar): until this is working on XP, don't run the real test. |
| 684 | CHECK_GT(cleanup_count_, major_threads_shutdown_count); |
| 685 | } |
| 686 | |
| 687 | // static |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 688 | void ThreadData::ShutdownSingleThreadedCleanup(bool leak) { |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 689 | // This is only called from test code, where we need to cleanup so that |
| 690 | // additional tests can be run. |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 691 | // We must be single threaded... but be careful anyway. |
[email protected] | 702a12d | 2012-02-10 19:43:42 | [diff] [blame] | 692 | if (!InitializeAndSetTrackingStatus(DEACTIVATED)) |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 693 | return; |
| 694 | ThreadData* thread_data_list; |
| 695 | { |
[email protected] | 77169a6 | 2011-11-14 20:36:46 | [diff] [blame] | 696 | base::AutoLock lock(*list_lock_.Pointer()); |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 697 | thread_data_list = all_thread_data_list_head_; |
| 698 | all_thread_data_list_head_ = NULL; |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 699 | ++incarnation_counter_; |
[email protected] | 26cdeb96 | 2011-11-20 04:17:07 | [diff] [blame] | 700 | // To be clean, break apart the retired worker list (though we leak them). |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 701 | while (first_retired_worker_) { |
[email protected] | 26cdeb96 | 2011-11-20 04:17:07 | [diff] [blame] | 702 | ThreadData* worker = first_retired_worker_; |
| 703 | CHECK_GT(worker->worker_thread_number_, 0); |
| 704 | first_retired_worker_ = worker->next_retired_worker_; |
| 705 | worker->next_retired_worker_ = NULL; |
| 706 | } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 707 | } |
| 708 | |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 709 | // Put most global static back in pristine shape. |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 710 | worker_thread_data_creation_count_ = 0; |
| 711 | cleanup_count_ = 0; |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 712 | tls_index_.Set(NULL); |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 713 | status_ = DORMANT_DURING_TESTS; // Almost UNINITIALIZED. |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 714 | |
| 715 | // To avoid any chance of racing in unit tests, which is the only place we |
| 716 | // call this function, we may sometimes leak all the data structures we |
| 717 | // recovered, as they may still be in use on threads from prior tests! |
| 718 | if (leak) |
| 719 | return; |
| 720 | |
| 721 | // When we want to cleanup (on a single thread), here is what we do. |
| 722 | |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 723 | // Do actual recursive delete in all ThreadData instances. |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 724 | while (thread_data_list) { |
| 725 | ThreadData* next_thread_data = thread_data_list; |
| 726 | thread_data_list = thread_data_list->next(); |
| 727 | |
| 728 | for (BirthMap::iterator it = next_thread_data->birth_map_.begin(); |
| 729 | next_thread_data->birth_map_.end() != it; ++it) |
| 730 | delete it->second; // Delete the Birth Records. |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 731 | delete next_thread_data; // Includes all Death Records. |
| 732 | } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 733 | } |
| 734 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 735 | //------------------------------------------------------------------------------ |
| 736 | // Individual 3-tuple of birth (place and thread) along with death thread, and |
| 737 | // the accumulated stats for instances (DeathData). |
| 738 | |
| 739 | Snapshot::Snapshot(const BirthOnThread& birth_on_thread, |
| 740 | const ThreadData& death_thread, |
| 741 | const DeathData& death_data) |
| 742 | : birth_(&birth_on_thread), |
| 743 | death_thread_(&death_thread), |
| 744 | death_data_(death_data) { |
| 745 | } |
| 746 | |
| 747 | Snapshot::Snapshot(const BirthOnThread& birth_on_thread, int count) |
| 748 | : birth_(&birth_on_thread), |
| 749 | death_thread_(NULL), |
| 750 | death_data_(DeathData(count)) { |
| 751 | } |
| 752 | |
| 753 | const std::string Snapshot::DeathThreadName() const { |
| 754 | if (death_thread_) |
[email protected] | 84b5795 | 2011-10-15 23:52:45 | [diff] [blame] | 755 | return death_thread_->thread_name(); |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 756 | return "Still_Alive"; |
| 757 | } |
| 758 | |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 759 | base::DictionaryValue* Snapshot::ToValue() const { |
| 760 | base::DictionaryValue* dictionary = new base::DictionaryValue; |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 761 | // TODO(jar): Switch the next two lines to: |
| 762 | // birth_->ToValue("birth", dictionary); |
| 763 | // ...but that will require fixing unit tests, and JS to take |
| 764 | // "birth_location" rather than "location" |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 765 | dictionary->Set("birth_thread", |
| 766 | base::Value::CreateStringValue(birth_->birth_thread()->thread_name())); |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 767 | dictionary->Set("location", birth_->location().ToValue()); |
| 768 | |
| 769 | dictionary->Set("death_data", death_data_.ToValue()); |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 770 | dictionary->Set("death_thread", |
| 771 | base::Value::CreateStringValue(DeathThreadName())); |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 772 | return dictionary; |
| 773 | } |
| 774 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 775 | //------------------------------------------------------------------------------ |
| 776 | // DataCollector |
| 777 | |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 778 | DataCollector::DataCollector() {} |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 779 | |
[email protected] | d4799a3 | 2010-09-28 22:54:58 | [diff] [blame] | 780 | DataCollector::~DataCollector() { |
| 781 | } |
| 782 | |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 783 | void DataCollector::Append(const ThreadData& thread_data, |
| 784 | const ThreadData::BirthMap& birth_map, |
| 785 | const ThreadData::DeathMap& death_map, |
| 786 | const ThreadData::ParentChildSet& parent_child_set) { |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 787 | for (ThreadData::DeathMap::const_iterator it = death_map.begin(); |
| 788 | it != death_map.end(); ++it) { |
| 789 | collection_.push_back(Snapshot(*it->first, thread_data, it->second)); |
| 790 | global_birth_count_[it->first] -= it->first->birth_count(); |
| 791 | } |
| 792 | |
| 793 | for (ThreadData::BirthMap::const_iterator it = birth_map.begin(); |
| 794 | it != birth_map.end(); ++it) { |
| 795 | global_birth_count_[it->second] += it->second->birth_count(); |
| 796 | } |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 797 | |
| 798 | if (!kTrackParentChildLinks) |
| 799 | return; |
| 800 | |
| 801 | for (ThreadData::ParentChildSet::const_iterator it = parent_child_set.begin(); |
| 802 | it != parent_child_set.end(); ++it) { |
| 803 | parent_child_set_.insert(*it); |
| 804 | } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 805 | } |
| 806 | |
| 807 | DataCollector::Collection* DataCollector::collection() { |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 808 | return &collection_; |
| 809 | } |
| 810 | |
| 811 | void DataCollector::AddListOfLivingObjects() { |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 812 | for (BirthCount::iterator it = global_birth_count_.begin(); |
| 813 | it != global_birth_count_.end(); ++it) { |
| 814 | if (it->second > 0) |
| 815 | collection_.push_back(Snapshot(*it->first, it->second)); |
| 816 | } |
| 817 | } |
| 818 | |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 819 | void DataCollector::ToValue(base::DictionaryValue* dictionary) const { |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 820 | base::ListValue* list = new base::ListValue; |
| 821 | for (size_t i = 0; i < collection_.size(); ++i) { |
| 822 | list->Append(collection_[i].ToValue()); |
| 823 | } |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 824 | dictionary->Set("list", list); |
| 825 | |
| 826 | base::ListValue* descendants = new base::ListValue; |
| 827 | for (ThreadData::ParentChildSet::const_iterator it = |
| 828 | parent_child_set_.begin(); |
| 829 | it != parent_child_set_.end(); |
| 830 | ++it) { |
| 831 | base::DictionaryValue* parent_child = new base::DictionaryValue; |
| 832 | it->first->ToValue("parent", parent_child); |
| 833 | it->second->ToValue("child", parent_child); |
| 834 | descendants->Append(parent_child); |
| 835 | } |
| 836 | dictionary->Set("descendants", descendants); |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 837 | } |
| 838 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 839 | } // namespace tracked_objects |