[email protected] | 9fc4416 | 2012-01-23 22:56:41 | [diff] [blame] | 1 | // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
license.bot | bf09a50 | 2008-08-24 00:55:55 | [diff] [blame] | 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 4 | |
| 5 | #include "base/tracked_objects.h" |
| 6 | |
fdoray | f607a84 | 2016-12-06 21:44:48 | [diff] [blame] | 7 | #include <ctype.h> |
[email protected] | c014f2b3 | 2013-09-03 23:29:12 | [diff] [blame] | 8 | #include <limits.h> |
[email protected] | 7f8a4eb | 2012-03-19 21:46:27 | [diff] [blame] | 9 | #include <stdlib.h> |
[email protected] | a5b94a9 | 2008-08-12 23:25:43 | [diff] [blame] | 10 | |
[email protected] | 7caab6dc | 2013-12-12 19:29:10 | [diff] [blame] | 11 | #include "base/atomicops.h" |
[email protected] | 915b344f | 2013-12-11 12:49:17 | [diff] [blame] | 12 | #include "base/base_switches.h" |
| 13 | #include "base/command_line.h" |
[email protected] | 75086be | 2013-03-20 21:18:22 | [diff] [blame] | 14 | #include "base/compiler_specific.h" |
[email protected] | bf709abd | 2013-06-10 11:32:20 | [diff] [blame] | 15 | #include "base/debug/leak_annotations.h" |
[email protected] | c014f2b3 | 2013-09-03 23:29:12 | [diff] [blame] | 16 | #include "base/logging.h" |
fdoray | f607a84 | 2016-12-06 21:44:48 | [diff] [blame] | 17 | #include "base/metrics/histogram_macros.h" |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 18 | #include "base/numerics/safe_conversions.h" |
| 19 | #include "base/numerics/safe_math.h" |
[email protected] | dd4b5126 | 2013-07-25 21:38:23 | [diff] [blame] | 20 | #include "base/process/process_handle.h" |
[email protected] | a0447ff | 2011-12-04 21:14:05 | [diff] [blame] | 21 | #include "base/third_party/valgrind/memcheck.h" |
zhenyu.shan | a55ed00 | 2016-06-07 21:05:34 | [diff] [blame] | 22 | #include "base/threading/worker_pool.h" |
[email protected] | c014f2b3 | 2013-09-03 23:29:12 | [diff] [blame] | 23 | #include "base/tracking_info.h" |
avi | 9b6f4293 | 2015-12-26 22:15:14 | [diff] [blame] | 24 | #include "build/build_config.h" |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 25 | |
[email protected] | e1acf6f | 2008-10-27 20:43:33 | [diff] [blame] | 26 | using base::TimeDelta; |
| 27 | |
[email protected] | c014f2b3 | 2013-09-03 23:29:12 | [diff] [blame] | 28 | namespace base { |
| 29 | class TimeDelta; |
| 30 | } |
| 31 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 32 | namespace tracked_objects { |
| 33 | |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 34 | namespace { |
fdoray | f607a84 | 2016-12-06 21:44:48 | [diff] [blame] | 35 | |
| 36 | constexpr char kWorkerThreadSanitizedName[] = "WorkerThread-*"; |
| 37 | |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 38 | // When ThreadData is first initialized, should we start in an ACTIVE state to |
| 39 | // record all of the startup-time tasks, or should we start up DEACTIVATED, so |
| 40 | // that we only record after parsing the command line flag --enable-tracking. |
| 41 | // Note that the flag may force either state, so this really controls only the |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 42 | // period of time up until that flag is parsed. If there is no flag seen, then |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 43 | // this state may prevail for much or all of the process lifetime. |
vadimt | cf8983e | 2015-05-01 19:13:01 | [diff] [blame] | 44 | const ThreadData::Status kInitialStartupState = ThreadData::PROFILING_ACTIVE; |
[email protected] | da9ccfb | 2012-01-28 00:34:40 | [diff] [blame] | 45 | |
vadimt | a156831 | 2014-11-06 22:27:43 | [diff] [blame] | 46 | // Possible states of the profiler timing enabledness. |
| 47 | enum { |
| 48 | UNDEFINED_TIMING, |
| 49 | ENABLED_TIMING, |
| 50 | DISABLED_TIMING, |
| 51 | }; |
| 52 | |
| 53 | // State of the profiler timing enabledness. |
| 54 | base::subtle::Atomic32 g_profiler_timing_enabled = UNDEFINED_TIMING; |
| 55 | |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 56 | // Returns whether profiler timing is enabled. The default is true, but this |
| 57 | // may be overridden by a command-line flag. Some platforms may |
| 58 | // programmatically set this command-line flag to the "off" value if it's not |
| 59 | // specified. |
vadimt | a156831 | 2014-11-06 22:27:43 | [diff] [blame] | 60 | // This in turn can be overridden by explicitly calling |
| 61 | // ThreadData::EnableProfilerTiming, say, based on a field trial. |
[email protected] | 915b344f | 2013-12-11 12:49:17 | [diff] [blame] | 62 | inline bool IsProfilerTimingEnabled() { |
vadimt | a156831 | 2014-11-06 22:27:43 | [diff] [blame] | 63 | // Reading |g_profiler_timing_enabled| is done without barrier because |
| 64 | // multiple initialization is not an issue while the barrier can be relatively |
| 65 | // costly given that this method is sometimes called in a tight loop. |
[email protected] | 7caab6dc | 2013-12-12 19:29:10 | [diff] [blame] | 66 | base::subtle::Atomic32 current_timing_enabled = |
vadimt | a156831 | 2014-11-06 22:27:43 | [diff] [blame] | 67 | base::subtle::NoBarrier_Load(&g_profiler_timing_enabled); |
[email protected] | 7caab6dc | 2013-12-12 19:29:10 | [diff] [blame] | 68 | if (current_timing_enabled == UNDEFINED_TIMING) { |
pgal.u-szeged | 421dddb | 2014-11-25 12:55:02 | [diff] [blame] | 69 | if (!base::CommandLine::InitializedForCurrentProcess()) |
[email protected] | 915b344f | 2013-12-11 12:49:17 | [diff] [blame] | 70 | return true; |
[email protected] | 7caab6dc | 2013-12-12 19:29:10 | [diff] [blame] | 71 | current_timing_enabled = |
pgal.u-szeged | 421dddb | 2014-11-25 12:55:02 | [diff] [blame] | 72 | (base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII( |
[email protected] | 7caab6dc | 2013-12-12 19:29:10 | [diff] [blame] | 73 | switches::kProfilerTiming) == |
| 74 | switches::kProfilerTimingDisabledValue) |
| 75 | ? DISABLED_TIMING |
| 76 | : ENABLED_TIMING; |
vadimt | a156831 | 2014-11-06 22:27:43 | [diff] [blame] | 77 | base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, |
| 78 | current_timing_enabled); |
[email protected] | 915b344f | 2013-12-11 12:49:17 | [diff] [blame] | 79 | } |
[email protected] | 7caab6dc | 2013-12-12 19:29:10 | [diff] [blame] | 80 | return current_timing_enabled == ENABLED_TIMING; |
[email protected] | 915b344f | 2013-12-11 12:49:17 | [diff] [blame] | 81 | } |
| 82 | |
fdoray | f607a84 | 2016-12-06 21:44:48 | [diff] [blame] | 83 | // Sanitize a thread name by replacing trailing sequence of digits with "*". |
| 84 | // Examples: |
| 85 | // 1. "BrowserBlockingWorker1/23857" => "BrowserBlockingWorker1/*" |
| 86 | // 2. "Chrome_IOThread" => "Chrome_IOThread" |
| 87 | std::string SanitizeThreadName(const std::string& thread_name) { |
| 88 | size_t i = thread_name.length(); |
| 89 | |
| 90 | while (i > 0 && isdigit(thread_name[i - 1])) |
| 91 | --i; |
| 92 | |
| 93 | if (i == thread_name.length()) |
| 94 | return thread_name; |
| 95 | |
| 96 | return thread_name.substr(0, i) + '*'; |
| 97 | } |
| 98 | |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 99 | } // namespace |
[email protected] | 84b5795 | 2011-10-15 23:52:45 | [diff] [blame] | 100 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 101 | //------------------------------------------------------------------------------ |
[email protected] | 63f5b0e | 2011-11-04 00:23:27 | [diff] [blame] | 102 | // DeathData tallies durations when a death takes place. |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 103 | |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 104 | DeathData::DeathData() |
| 105 | : count_(0), |
| 106 | sample_probability_count_(0), |
| 107 | run_duration_sum_(0), |
| 108 | queue_duration_sum_(0), |
| 109 | run_duration_max_(0), |
| 110 | queue_duration_max_(0), |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 111 | alloc_ops_(0), |
| 112 | free_ops_(0), |
| 113 | allocated_bytes_(0), |
| 114 | freed_bytes_(0), |
| 115 | alloc_overhead_bytes_(0), |
| 116 | max_allocated_bytes_(0), |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 117 | run_duration_sample_(0), |
| 118 | queue_duration_sample_(0), |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 119 | last_phase_snapshot_(nullptr) {} |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 120 | |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 121 | DeathData::DeathData(const DeathData& other) |
| 122 | : count_(other.count_), |
| 123 | sample_probability_count_(other.sample_probability_count_), |
| 124 | run_duration_sum_(other.run_duration_sum_), |
| 125 | queue_duration_sum_(other.queue_duration_sum_), |
| 126 | run_duration_max_(other.run_duration_max_), |
| 127 | queue_duration_max_(other.queue_duration_max_), |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 128 | alloc_ops_(other.alloc_ops_), |
| 129 | free_ops_(other.free_ops_), |
| 130 | allocated_bytes_(other.allocated_bytes_), |
| 131 | freed_bytes_(other.freed_bytes_), |
| 132 | alloc_overhead_bytes_(other.alloc_overhead_bytes_), |
| 133 | max_allocated_bytes_(other.max_allocated_bytes_), |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 134 | run_duration_sample_(other.run_duration_sample_), |
| 135 | queue_duration_sample_(other.queue_duration_sample_), |
| 136 | last_phase_snapshot_(nullptr) { |
| 137 | // This constructor will be used by std::map when adding new DeathData values |
| 138 | // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't |
| 139 | // need to worry about ownership transfer. |
| 140 | DCHECK(other.last_phase_snapshot_ == nullptr); |
| 141 | } |
| 142 | |
| 143 | DeathData::~DeathData() { |
| 144 | while (last_phase_snapshot_) { |
| 145 | const DeathDataPhaseSnapshot* snapshot = last_phase_snapshot_; |
| 146 | last_phase_snapshot_ = snapshot->prev; |
| 147 | delete snapshot; |
| 148 | } |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 149 | } |
| 150 | |
[email protected] | 7ceb4448 | 2011-12-09 03:41:04 | [diff] [blame] | 151 | // TODO(jar): I need to see if this macro to optimize branching is worth using. |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 152 | // |
| 153 | // This macro has no branching, so it is surely fast, and is equivalent to: |
| 154 | // if (assign_it) |
| 155 | // target = source; |
| 156 | // We use a macro rather than a template to force this to inline. |
| 157 | // Related code for calculating max is discussed on the web. |
| 158 | #define CONDITIONAL_ASSIGN(assign_it, target, source) \ |
avi | 9b6f4293 | 2015-12-26 22:15:14 | [diff] [blame] | 159 | ((target) ^= ((target) ^ (source)) & -static_cast<int32_t>(assign_it)) |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 160 | |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 161 | void DeathData::RecordDurations(const int32_t queue_duration, |
| 162 | const int32_t run_duration, |
| 163 | const uint32_t random_number) { |
[email protected] | 59b15da | 2013-02-28 04:15:43 | [diff] [blame] | 164 | // We'll just clamp at INT_MAX, but we should note this in the UI as such. |
| 165 | if (count_ < INT_MAX) |
amistry | 7fd0f76 | 2016-01-13 02:18:18 | [diff] [blame] | 166 | base::subtle::NoBarrier_Store(&count_, count_ + 1); |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 167 | |
amistry | 7fd0f76 | 2016-01-13 02:18:18 | [diff] [blame] | 168 | int sample_probability_count = |
| 169 | base::subtle::NoBarrier_Load(&sample_probability_count_); |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 170 | if (sample_probability_count < INT_MAX) |
| 171 | ++sample_probability_count; |
amistry | 7fd0f76 | 2016-01-13 02:18:18 | [diff] [blame] | 172 | base::subtle::NoBarrier_Store(&sample_probability_count_, |
| 173 | sample_probability_count); |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 174 | |
amistry | 7fd0f76 | 2016-01-13 02:18:18 | [diff] [blame] | 175 | base::subtle::NoBarrier_Store(&queue_duration_sum_, |
| 176 | queue_duration_sum_ + queue_duration); |
| 177 | base::subtle::NoBarrier_Store(&run_duration_sum_, |
| 178 | run_duration_sum_ + run_duration); |
[email protected] | 7ceb4448 | 2011-12-09 03:41:04 | [diff] [blame] | 179 | |
amistry | 7fd0f76 | 2016-01-13 02:18:18 | [diff] [blame] | 180 | if (queue_duration_max() < queue_duration) |
| 181 | base::subtle::NoBarrier_Store(&queue_duration_max_, queue_duration); |
| 182 | if (run_duration_max() < run_duration) |
| 183 | base::subtle::NoBarrier_Store(&run_duration_max_, run_duration); |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 184 | |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 185 | // Take a uniformly distributed sample over all durations ever supplied during |
| 186 | // the current profiling phase. |
| 187 | // The probability that we (instead) use this new sample is |
| 188 | // 1/sample_probability_count_. This results in a completely uniform selection |
| 189 | // of the sample (at least when we don't clamp sample_probability_count_... |
| 190 | // but that should be inconsequentially likely). We ignore the fact that we |
| 191 | // correlated our selection of a sample to the run and queue times (i.e., we |
| 192 | // used them to generate random_number). |
| 193 | CHECK_GT(sample_probability_count, 0); |
| 194 | if (0 == (random_number % sample_probability_count)) { |
amistry | 7fd0f76 | 2016-01-13 02:18:18 | [diff] [blame] | 195 | base::subtle::NoBarrier_Store(&queue_duration_sample_, queue_duration); |
| 196 | base::subtle::NoBarrier_Store(&run_duration_sample_, run_duration); |
[email protected] | 7ceb4448 | 2011-12-09 03:41:04 | [diff] [blame] | 197 | } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 198 | } |
| 199 | |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 200 | void DeathData::RecordAllocations(const uint32_t alloc_ops, |
| 201 | const uint32_t free_ops, |
| 202 | const uint32_t allocated_bytes, |
| 203 | const uint32_t freed_bytes, |
| 204 | const uint32_t alloc_overhead_bytes, |
| 205 | const uint32_t max_allocated_bytes) { |
| 206 | // Use saturating arithmetic. |
| 207 | SaturatingMemberAdd(alloc_ops, &alloc_ops_); |
| 208 | SaturatingMemberAdd(free_ops, &free_ops_); |
| 209 | SaturatingMemberAdd(allocated_bytes, &allocated_bytes_); |
| 210 | SaturatingMemberAdd(freed_bytes, &freed_bytes_); |
| 211 | SaturatingMemberAdd(alloc_overhead_bytes, &alloc_overhead_bytes_); |
| 212 | |
| 213 | int32_t max = base::saturated_cast<int32_t>(max_allocated_bytes); |
| 214 | if (max > max_allocated_bytes_) |
| 215 | base::subtle::NoBarrier_Store(&max_allocated_bytes_, max); |
| 216 | } |
| 217 | |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 218 | void DeathData::OnProfilingPhaseCompleted(int profiling_phase) { |
| 219 | // Snapshotting and storing current state. |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 220 | last_phase_snapshot_ = |
| 221 | new DeathDataPhaseSnapshot(profiling_phase, *this, last_phase_snapshot_); |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 222 | |
| 223 | // Not touching fields for which a delta can be computed by comparing with a |
| 224 | // snapshot from the previous phase. Resetting other fields. Sample values |
| 225 | // will be reset upon next death recording because sample_probability_count_ |
| 226 | // is set to 0. |
| 227 | // We avoid resetting to 0 in favor of deltas whenever possible. The reason |
| 228 | // is that for incrementable fields, resetting to 0 from the snapshot thread |
| 229 | // potentially in parallel with incrementing in the death thread may result in |
| 230 | // significant data corruption that has a potential to grow with time. Not |
| 231 | // resetting incrementable fields and using deltas will cause any |
| 232 | // off-by-little corruptions to be likely fixed at the next snapshot. |
| 233 | // The max values are not incrementable, and cannot be deduced using deltas |
| 234 | // for a given phase. Hence, we have to reset them to 0. But the potential |
| 235 | // damage is limited to getting the previous phase's max to apply for the next |
| 236 | // phase, and the error doesn't have a potential to keep growing with new |
| 237 | // resets. |
| 238 | // sample_probability_count_ is incrementable, but must be reset to 0 at the |
| 239 | // phase end, so that we start a new uniformly randomized sample selection |
amistry | 7fd0f76 | 2016-01-13 02:18:18 | [diff] [blame] | 240 | // after the reset. These fields are updated using atomics. However, race |
| 241 | // conditions are possible since these are updated individually and not |
| 242 | // together atomically, resulting in the values being mutually inconsistent. |
| 243 | // The damage is limited to selecting a wrong sample, which is not something |
| 244 | // that can cause accumulating or cascading effects. |
| 245 | // If there were no inconsistencies caused by race conditions, we never send a |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 246 | // sample for the previous phase in the next phase's snapshot because |
| 247 | // ThreadData::SnapshotExecutedTasks doesn't send deltas with 0 count. |
amistry | 7fd0f76 | 2016-01-13 02:18:18 | [diff] [blame] | 248 | base::subtle::NoBarrier_Store(&sample_probability_count_, 0); |
| 249 | base::subtle::NoBarrier_Store(&run_duration_max_, 0); |
| 250 | base::subtle::NoBarrier_Store(&queue_duration_max_, 0); |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 251 | } |
| 252 | |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 253 | void DeathData::SaturatingMemberAdd(const uint32_t addend, |
| 254 | base::subtle::Atomic32* sum) { |
| 255 | // Bail quick if no work or already saturated. |
| 256 | if (addend == 0U || *sum == INT_MAX) |
| 257 | return; |
| 258 | |
| 259 | base::CheckedNumeric<int32_t> new_sum = *sum; |
| 260 | new_sum += addend; |
| 261 | base::subtle::NoBarrier_Store(sum, new_sum.ValueOrDefault(INT_MAX)); |
| 262 | } |
| 263 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 264 | //------------------------------------------------------------------------------ |
[email protected] | 1cb05db | 2012-04-13 00:39:26 | [diff] [blame] | 265 | DeathDataSnapshot::DeathDataSnapshot() |
| 266 | : count(-1), |
| 267 | run_duration_sum(-1), |
| 268 | run_duration_max(-1), |
| 269 | run_duration_sample(-1), |
| 270 | queue_duration_sum(-1), |
| 271 | queue_duration_max(-1), |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 272 | queue_duration_sample(-1), |
| 273 | alloc_ops(-1), |
| 274 | free_ops(-1), |
| 275 | allocated_bytes(-1), |
| 276 | freed_bytes(-1), |
| 277 | alloc_overhead_bytes(-1), |
| 278 | max_allocated_bytes(-1) {} |
[email protected] | 1cb05db | 2012-04-13 00:39:26 | [diff] [blame] | 279 | |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 280 | DeathDataSnapshot::DeathDataSnapshot(int count, |
avi | 9b6f4293 | 2015-12-26 22:15:14 | [diff] [blame] | 281 | int32_t run_duration_sum, |
| 282 | int32_t run_duration_max, |
| 283 | int32_t run_duration_sample, |
| 284 | int32_t queue_duration_sum, |
| 285 | int32_t queue_duration_max, |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 286 | int32_t queue_duration_sample, |
| 287 | int32_t alloc_ops, |
| 288 | int32_t free_ops, |
| 289 | int32_t allocated_bytes, |
| 290 | int32_t freed_bytes, |
| 291 | int32_t alloc_overhead_bytes, |
| 292 | int32_t max_allocated_bytes) |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 293 | : count(count), |
| 294 | run_duration_sum(run_duration_sum), |
| 295 | run_duration_max(run_duration_max), |
| 296 | run_duration_sample(run_duration_sample), |
| 297 | queue_duration_sum(queue_duration_sum), |
| 298 | queue_duration_max(queue_duration_max), |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 299 | queue_duration_sample(queue_duration_sample), |
| 300 | alloc_ops(alloc_ops), |
| 301 | free_ops(free_ops), |
| 302 | allocated_bytes(allocated_bytes), |
| 303 | freed_bytes(freed_bytes), |
| 304 | alloc_overhead_bytes(alloc_overhead_bytes), |
| 305 | max_allocated_bytes(max_allocated_bytes) {} |
| 306 | |
| 307 | DeathDataSnapshot::DeathDataSnapshot(const DeathData& death_data) |
| 308 | : count(death_data.count()), |
| 309 | run_duration_sum(death_data.run_duration_sum()), |
| 310 | run_duration_max(death_data.run_duration_max()), |
| 311 | run_duration_sample(death_data.run_duration_sample()), |
| 312 | queue_duration_sum(death_data.queue_duration_sum()), |
| 313 | queue_duration_max(death_data.queue_duration_max()), |
| 314 | queue_duration_sample(death_data.queue_duration_sample()), |
| 315 | alloc_ops(death_data.alloc_ops()), |
| 316 | free_ops(death_data.free_ops()), |
| 317 | allocated_bytes(death_data.allocated_bytes()), |
| 318 | freed_bytes(death_data.freed_bytes()), |
| 319 | alloc_overhead_bytes(death_data.alloc_overhead_bytes()), |
| 320 | max_allocated_bytes(death_data.max_allocated_bytes()) {} |
| 321 | |
| 322 | DeathDataSnapshot::DeathDataSnapshot(const DeathDataSnapshot& death_data) = |
| 323 | default; |
[email protected] | 1cb05db | 2012-04-13 00:39:26 | [diff] [blame] | 324 | |
| 325 | DeathDataSnapshot::~DeathDataSnapshot() { |
| 326 | } |
| 327 | |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 328 | DeathDataSnapshot DeathDataSnapshot::Delta( |
| 329 | const DeathDataSnapshot& older) const { |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 330 | return DeathDataSnapshot( |
| 331 | count - older.count, run_duration_sum - older.run_duration_sum, |
| 332 | run_duration_max, run_duration_sample, |
| 333 | queue_duration_sum - older.queue_duration_sum, queue_duration_max, |
| 334 | queue_duration_sample, alloc_ops - older.alloc_ops, |
| 335 | free_ops - older.free_ops, allocated_bytes - older.allocated_bytes, |
| 336 | freed_bytes - older.freed_bytes, |
| 337 | alloc_overhead_bytes - older.alloc_overhead_bytes, max_allocated_bytes); |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 338 | } |
| 339 | |
[email protected] | 1cb05db | 2012-04-13 00:39:26 | [diff] [blame] | 340 | //------------------------------------------------------------------------------ |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 341 | BirthOnThread::BirthOnThread(const Location& location, |
| 342 | const ThreadData& current) |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 343 | : location_(location), |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 344 | birth_thread_(¤t) { |
| 345 | } |
| 346 | |
[email protected] | 1cb05db | 2012-04-13 00:39:26 | [diff] [blame] | 347 | //------------------------------------------------------------------------------ |
| 348 | BirthOnThreadSnapshot::BirthOnThreadSnapshot() { |
| 349 | } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 350 | |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 351 | BirthOnThreadSnapshot::BirthOnThreadSnapshot(const BirthOnThread& birth) |
[email protected] | 1cb05db | 2012-04-13 00:39:26 | [diff] [blame] | 352 | : location(birth.location()), |
fdoray | f607a84 | 2016-12-06 21:44:48 | [diff] [blame] | 353 | sanitized_thread_name(birth.birth_thread()->sanitized_thread_name()) {} |
[email protected] | 1cb05db | 2012-04-13 00:39:26 | [diff] [blame] | 354 | |
| 355 | BirthOnThreadSnapshot::~BirthOnThreadSnapshot() { |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 356 | } |
| 357 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 358 | //------------------------------------------------------------------------------ |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 359 | Births::Births(const Location& location, const ThreadData& current) |
| 360 | : BirthOnThread(location, current), |
[email protected] | 75b7920 | 2009-12-30 07:31:45 | [diff] [blame] | 361 | birth_count_(1) { } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 362 | |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 363 | int Births::birth_count() const { return birth_count_; } |
| 364 | |
| 365 | void Births::RecordBirth() { ++birth_count_; } |
| 366 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 367 | //------------------------------------------------------------------------------ |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 368 | // ThreadData maintains the central data for all births and deaths on a single |
| 369 | // thread. |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 370 | |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 371 | // TODO(jar): We should pull all these static vars together, into a struct, and |
| 372 | // optimize layout so that we benefit from locality of reference during accesses |
| 373 | // to them. |
| 374 | |
[email protected] | 90895d0f | 2012-02-15 23:05:01 | [diff] [blame] | 375 | // static |
primiano | bc5681f5 | 2016-02-03 18:53:11 | [diff] [blame] | 376 | ThreadData::NowFunction* ThreadData::now_function_for_testing_ = NULL; |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 377 | |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 378 | // A TLS slot which points to the ThreadData instance for the current thread. |
| 379 | // We do a fake initialization here (zeroing out data), and then the real |
| 380 | // in-place construction happens when we call tls_index_.Initialize(). |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 381 | // static |
[email protected] | 444b8a3c | 2012-01-30 16:52:09 | [diff] [blame] | 382 | base::ThreadLocalStorage::StaticSlot ThreadData::tls_index_ = TLS_INITIALIZER; |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 383 | |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 384 | // static |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 385 | int ThreadData::cleanup_count_ = 0; |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 386 | |
| 387 | // static |
| 388 | int ThreadData::incarnation_counter_ = 0; |
| 389 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 390 | // static |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 391 | ThreadData* ThreadData::all_thread_data_list_head_ = NULL; |
| 392 | |
| 393 | // static |
fdoray | f607a84 | 2016-12-06 21:44:48 | [diff] [blame] | 394 | ThreadData* ThreadData::first_retired_thread_data_ = NULL; |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 395 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 396 | // static |
[email protected] | 9fc4416 | 2012-01-23 22:56:41 | [diff] [blame] | 397 | base::LazyInstance<base::Lock>::Leaky |
[email protected] | 6de0fd1d | 2011-11-15 13:31:49 | [diff] [blame] | 398 | ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER; |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 399 | |
| 400 | // static |
amistry | 42d1688 | 2015-07-17 03:58:06 | [diff] [blame] | 401 | base::subtle::Atomic32 ThreadData::status_ = ThreadData::UNINITIALIZED; |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 402 | |
fdoray | f607a84 | 2016-12-06 21:44:48 | [diff] [blame] | 403 | ThreadData::ThreadData(const std::string& sanitized_thread_name) |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 404 | : next_(NULL), |
fdoray | f607a84 | 2016-12-06 21:44:48 | [diff] [blame] | 405 | next_retired_thread_data_(NULL), |
| 406 | sanitized_thread_name_(sanitized_thread_name), |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 407 | incarnation_count_for_pool_(-1), |
| 408 | current_stopwatch_(NULL) { |
fdoray | f607a84 | 2016-12-06 21:44:48 | [diff] [blame] | 409 | DCHECK(sanitized_thread_name_.empty() || |
| 410 | !isdigit(sanitized_thread_name_.back())); |
[email protected] | 63f5b0e | 2011-11-04 00:23:27 | [diff] [blame] | 411 | PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. |
[email protected] | 359d2bf | 2010-11-19 20:34:18 | [diff] [blame] | 412 | } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 413 | |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 414 | ThreadData::~ThreadData() { |
| 415 | } |
[email protected] | d4799a3 | 2010-09-28 22:54:58 | [diff] [blame] | 416 | |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 417 | void ThreadData::PushToHeadOfList() { |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 418 | // Toss in a hint of randomness (atop the uniniitalized value). |
[email protected] | ff5e942 | 2011-12-05 15:24:28 | [diff] [blame] | 419 | (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_, |
[email protected] | a0447ff | 2011-12-04 21:14:05 | [diff] [blame] | 420 | sizeof(random_number_)); |
[email protected] | 75086be | 2013-03-20 21:18:22 | [diff] [blame] | 421 | MSAN_UNPOISON(&random_number_, sizeof(random_number_)); |
avi | 9b6f4293 | 2015-12-26 22:15:14 | [diff] [blame] | 422 | random_number_ += static_cast<uint32_t>(this - static_cast<ThreadData*>(0)); |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 423 | random_number_ ^= (Now() - TrackedTime()).InMilliseconds(); |
| 424 | |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 425 | DCHECK(!next_); |
[email protected] | 77169a6 | 2011-11-14 20:36:46 | [diff] [blame] | 426 | base::AutoLock lock(*list_lock_.Pointer()); |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 427 | incarnation_count_for_pool_ = incarnation_counter_; |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 428 | next_ = all_thread_data_list_head_; |
| 429 | all_thread_data_list_head_ = this; |
| 430 | } |
| 431 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 432 | // static |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 433 | ThreadData* ThreadData::first() { |
| 434 | base::AutoLock lock(*list_lock_.Pointer()); |
| 435 | return all_thread_data_list_head_; |
| 436 | } |
| 437 | |
| 438 | ThreadData* ThreadData::next() const { return next_; } |
| 439 | |
| 440 | // static |
fdoray | f607a84 | 2016-12-06 21:44:48 | [diff] [blame] | 441 | void ThreadData::InitializeThreadContext(const std::string& thread_name) { |
zhenyu.shan | a55ed00 | 2016-06-07 21:05:34 | [diff] [blame] | 442 | if (base::WorkerPool::RunsTasksOnCurrentThread()) |
| 443 | return; |
fdoray | f607a84 | 2016-12-06 21:44:48 | [diff] [blame] | 444 | DCHECK_NE(thread_name, kWorkerThreadSanitizedName); |
zhenyu.shan | a55ed00 | 2016-06-07 21:05:34 | [diff] [blame] | 445 | EnsureTlsInitialization(); |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 446 | ThreadData* current_thread_data = |
| 447 | reinterpret_cast<ThreadData*>(tls_index_.Get()); |
| 448 | if (current_thread_data) |
| 449 | return; // Browser tests instigate this. |
fdoray | f607a84 | 2016-12-06 21:44:48 | [diff] [blame] | 450 | current_thread_data = |
| 451 | GetRetiredOrCreateThreadData(SanitizeThreadName(thread_name)); |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 452 | tls_index_.Set(current_thread_data); |
[email protected] | 84b5795 | 2011-10-15 23:52:45 | [diff] [blame] | 453 | } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 454 | |
[email protected] | 84b5795 | 2011-10-15 23:52:45 | [diff] [blame] | 455 | // static |
| 456 | ThreadData* ThreadData::Get() { |
| 457 | if (!tls_index_.initialized()) |
| 458 | return NULL; // For unittests only. |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 459 | ThreadData* registered = reinterpret_cast<ThreadData*>(tls_index_.Get()); |
| 460 | if (registered) |
| 461 | return registered; |
| 462 | |
| 463 | // We must be a worker thread, since we didn't pre-register. |
fdoray | f607a84 | 2016-12-06 21:44:48 | [diff] [blame] | 464 | ThreadData* worker_thread_data = |
| 465 | GetRetiredOrCreateThreadData(kWorkerThreadSanitizedName); |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 466 | tls_index_.Set(worker_thread_data); |
| 467 | return worker_thread_data; |
[email protected] | 84b5795 | 2011-10-15 23:52:45 | [diff] [blame] | 468 | } |
| 469 | |
| 470 | // static |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 471 | void ThreadData::OnThreadTermination(void* thread_data) { |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 472 | DCHECK(thread_data); // TLS should *never* call us with a NULL. |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 473 | // We must NOT do any allocations during this callback. There is a chance |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 474 | // that the allocator is no longer active on this thread. |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 475 | reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup(); |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 476 | } |
| 477 | |
[email protected] | 26cdeb96 | 2011-11-20 04:17:07 | [diff] [blame] | 478 | void ThreadData::OnThreadTerminationCleanup() { |
fdoray | f607a84 | 2016-12-06 21:44:48 | [diff] [blame] | 479 | // We must NOT do any allocations during this callback. There is a chance that |
| 480 | // the allocator is no longer active on this thread. |
| 481 | |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 482 | // The list_lock_ was created when we registered the callback, so it won't be |
| 483 | // allocated here despite the lazy reference. |
[email protected] | 77169a6 | 2011-11-14 20:36:46 | [diff] [blame] | 484 | base::AutoLock lock(*list_lock_.Pointer()); |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 485 | if (incarnation_counter_ != incarnation_count_for_pool_) |
| 486 | return; // ThreadData was constructed in an earlier unit test. |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 487 | ++cleanup_count_; |
fdoray | f607a84 | 2016-12-06 21:44:48 | [diff] [blame] | 488 | |
| 489 | // Add this ThreadData to a retired list so that it can be reused by a thread |
| 490 | // with the same name sanitized name in the future. |
| 491 | // |next_retired_thread_data_| is expected to be nullptr for a ThreadData |
| 492 | // associated with an active thread. |
| 493 | DCHECK(!next_retired_thread_data_); |
| 494 | next_retired_thread_data_ = first_retired_thread_data_; |
| 495 | first_retired_thread_data_ = this; |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 496 | } |
| 497 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 498 | // static |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 499 | void ThreadData::Snapshot(int current_profiling_phase, |
| 500 | ProcessDataSnapshot* process_data_snapshot) { |
| 501 | // Get an unchanging copy of a ThreadData list. |
| 502 | ThreadData* my_list = ThreadData::first(); |
| 503 | |
| 504 | // Gather data serially. |
| 505 | // This hackish approach *can* get some slightly corrupt tallies, as we are |
| 506 | // grabbing values without the protection of a lock, but it has the advantage |
| 507 | // of working even with threads that don't have message loops. If a user |
| 508 | // sees any strangeness, they can always just run their stats gathering a |
| 509 | // second time. |
| 510 | BirthCountMap birth_counts; |
| 511 | for (ThreadData* thread_data = my_list; thread_data; |
| 512 | thread_data = thread_data->next()) { |
| 513 | thread_data->SnapshotExecutedTasks(current_profiling_phase, |
| 514 | &process_data_snapshot->phased_snapshots, |
| 515 | &birth_counts); |
| 516 | } |
| 517 | |
| 518 | // Add births that are still active -- i.e. objects that have tallied a birth, |
| 519 | // but have not yet tallied a matching death, and hence must be either |
| 520 | // running, queued up, or being held in limbo for future posting. |
| 521 | auto* current_phase_tasks = |
| 522 | &process_data_snapshot->phased_snapshots[current_profiling_phase].tasks; |
| 523 | for (const auto& birth_count : birth_counts) { |
| 524 | if (birth_count.second > 0) { |
| 525 | current_phase_tasks->push_back( |
| 526 | TaskSnapshot(BirthOnThreadSnapshot(*birth_count.first), |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 527 | DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0, |
| 528 | 0, 0, 0, 0, 0, 0), |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 529 | "Still_Alive")); |
| 530 | } |
| 531 | } |
| 532 | } |
| 533 | |
| 534 | // static |
| 535 | void ThreadData::OnProfilingPhaseCompleted(int profiling_phase) { |
| 536 | // Get an unchanging copy of a ThreadData list. |
| 537 | ThreadData* my_list = ThreadData::first(); |
| 538 | |
| 539 | // Add snapshots for all instances of death data in all threads serially. |
| 540 | // This hackish approach *can* get some slightly corrupt tallies, as we are |
| 541 | // grabbing values without the protection of a lock, but it has the advantage |
| 542 | // of working even with threads that don't have message loops. Any corruption |
| 543 | // shouldn't cause "cascading damage" to anything else (in later phases). |
| 544 | for (ThreadData* thread_data = my_list; thread_data; |
| 545 | thread_data = thread_data->next()) { |
| 546 | thread_data->OnProfilingPhaseCompletedOnThread(profiling_phase); |
| 547 | } |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 548 | } |
| 549 | |
[email protected] | 75b7920 | 2009-12-30 07:31:45 | [diff] [blame] | 550 | Births* ThreadData::TallyABirth(const Location& location) { |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 551 | BirthMap::iterator it = birth_map_.find(location); |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 552 | Births* child; |
[email protected] | 75b7920 | 2009-12-30 07:31:45 | [diff] [blame] | 553 | if (it != birth_map_.end()) { |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 554 | child = it->second; |
| 555 | child->RecordBirth(); |
| 556 | } else { |
| 557 | child = new Births(location, *this); // Leak this. |
| 558 | // Lock since the map may get relocated now, and other threads sometimes |
| 559 | // snapshot it (but they lock before copying it). |
| 560 | base::AutoLock lock(map_lock_); |
| 561 | birth_map_[location] = child; |
[email protected] | 75b7920 | 2009-12-30 07:31:45 | [diff] [blame] | 562 | } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 563 | |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 564 | return child; |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 565 | } |
| 566 | |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 567 | void ThreadData::TallyADeath(const Births& births, |
avi | 9b6f4293 | 2015-12-26 22:15:14 | [diff] [blame] | 568 | int32_t queue_duration, |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 569 | const TaskStopwatch& stopwatch) { |
avi | 9b6f4293 | 2015-12-26 22:15:14 | [diff] [blame] | 570 | int32_t run_duration = stopwatch.RunDurationMs(); |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 571 | |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 572 | // Stir in some randomness, plus add constant in case durations are zero. |
avi | 9b6f4293 | 2015-12-26 22:15:14 | [diff] [blame] | 573 | const uint32_t kSomePrimeNumber = 2147483647; |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 574 | random_number_ += queue_duration + run_duration + kSomePrimeNumber; |
| 575 | // An address is going to have some randomness to it as well ;-). |
avi | 9b6f4293 | 2015-12-26 22:15:14 | [diff] [blame] | 576 | random_number_ ^= |
| 577 | static_cast<uint32_t>(&births - reinterpret_cast<Births*>(0)); |
[email protected] | b6b2b89 | 2011-12-04 07:19:10 | [diff] [blame] | 578 | |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 579 | DeathMap::iterator it = death_map_.find(&births); |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 580 | DeathData* death_data; |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 581 | if (it != death_map_.end()) { |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 582 | death_data = &it->second; |
| 583 | } else { |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 584 | base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 585 | death_data = &death_map_[&births]; |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 586 | } // Release lock ASAP. |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 587 | death_data->RecordDurations(queue_duration, run_duration, random_number_); |
| 588 | |
| 589 | #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER) |
| 590 | if (stopwatch.heap_tracking_enabled()) { |
| 591 | base::debug::ThreadHeapUsage heap_usage = stopwatch.heap_usage().usage(); |
| 592 | // Saturate the 64 bit counts on conversion to 32 bit storage. |
| 593 | death_data->RecordAllocations( |
| 594 | base::saturated_cast<int32_t>(heap_usage.alloc_ops), |
| 595 | base::saturated_cast<int32_t>(heap_usage.free_ops), |
| 596 | base::saturated_cast<int32_t>(heap_usage.alloc_bytes), |
| 597 | base::saturated_cast<int32_t>(heap_usage.free_bytes), |
| 598 | base::saturated_cast<int32_t>(heap_usage.alloc_overhead_bytes), |
| 599 | base::saturated_cast<int32_t>(heap_usage.max_allocated_bytes)); |
| 600 | } |
| 601 | #endif |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 602 | } |
| 603 | |
| 604 | // static |
[email protected] | 180c85e | 2011-07-26 18:25:16 | [diff] [blame] | 605 | Births* ThreadData::TallyABirthIfActive(const Location& location) { |
[email protected] | 702a12d | 2012-02-10 19:43:42 | [diff] [blame] | 606 | if (!TrackingStatus()) |
[email protected] | 84b5795 | 2011-10-15 23:52:45 | [diff] [blame] | 607 | return NULL; |
| 608 | ThreadData* current_thread_data = Get(); |
| 609 | if (!current_thread_data) |
| 610 | return NULL; |
| 611 | return current_thread_data->TallyABirth(location); |
[email protected] | 180c85e | 2011-07-26 18:25:16 | [diff] [blame] | 612 | } |
| 613 | |
| 614 | // static |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 615 | void ThreadData::TallyRunOnNamedThreadIfTracking( |
| 616 | const base::TrackingInfo& completed_task, |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 617 | const TaskStopwatch& stopwatch) { |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 618 | // Even if we have been DEACTIVATED, we will process any pending births so |
| 619 | // that our data structures (which counted the outstanding births) remain |
| 620 | // consistent. |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 621 | const Births* births = completed_task.birth_tally; |
| 622 | if (!births) |
[email protected] | 84b5795 | 2011-10-15 23:52:45 | [diff] [blame] | 623 | return; |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 624 | ThreadData* current_thread_data = stopwatch.GetThreadData(); |
[email protected] | 84b5795 | 2011-10-15 23:52:45 | [diff] [blame] | 625 | if (!current_thread_data) |
| 626 | return; |
| 627 | |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 628 | // Watch out for a race where status_ is changing, and hence one or both |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 629 | // of start_of_run or end_of_run is zero. In that case, we didn't bother to |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 630 | // get a time value since we "weren't tracking" and we were trying to be |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 631 | // efficient by not calling for a genuine time value. For simplicity, we'll |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 632 | // use a default zero duration when we can't calculate a true value. |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 633 | TrackedTime start_of_run = stopwatch.StartTime(); |
avi | 9b6f4293 | 2015-12-26 22:15:14 | [diff] [blame] | 634 | int32_t queue_duration = 0; |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 635 | if (!start_of_run.is_null()) { |
[email protected] | e1a38d60 | 2013-07-10 17:50:22 | [diff] [blame] | 636 | queue_duration = (start_of_run - completed_task.EffectiveTimePosted()) |
| 637 | .InMilliseconds(); |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 638 | } |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 639 | current_thread_data->TallyADeath(*births, queue_duration, stopwatch); |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 640 | } |
| 641 | |
| 642 | // static |
| 643 | void ThreadData::TallyRunOnWorkerThreadIfTracking( |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 644 | const Births* births, |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 645 | const TrackedTime& time_posted, |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 646 | const TaskStopwatch& stopwatch) { |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 647 | // Even if we have been DEACTIVATED, we will process any pending births so |
| 648 | // that our data structures (which counted the outstanding births) remain |
| 649 | // consistent. |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 650 | if (!births) |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 651 | return; |
| 652 | |
| 653 | // TODO(jar): Support the option to coalesce all worker-thread activity under |
| 654 | // one ThreadData instance that uses locks to protect *all* access. This will |
| 655 | // reduce memory (making it provably bounded), but run incrementally slower |
[email protected] | d6992b5b | 2013-05-20 18:53:13 | [diff] [blame] | 656 | // (since we'll use locks on TallyABirth and TallyADeath). The good news is |
| 657 | // that the locks on TallyADeath will be *after* the worker thread has run, |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 658 | // and hence nothing will be waiting for the completion (... besides some |
[email protected] | d6992b5b | 2013-05-20 18:53:13 | [diff] [blame] | 659 | // other thread that might like to run). Also, the worker threads tasks are |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 660 | // generally longer, and hence the cost of the lock may perchance be amortized |
| 661 | // over the long task's lifetime. |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 662 | ThreadData* current_thread_data = stopwatch.GetThreadData(); |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 663 | if (!current_thread_data) |
| 664 | return; |
| 665 | |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 666 | TrackedTime start_of_run = stopwatch.StartTime(); |
avi | 9b6f4293 | 2015-12-26 22:15:14 | [diff] [blame] | 667 | int32_t queue_duration = 0; |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 668 | if (!start_of_run.is_null()) { |
[email protected] | c25db18 | 2011-11-11 22:40:27 | [diff] [blame] | 669 | queue_duration = (start_of_run - time_posted).InMilliseconds(); |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 670 | } |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 671 | current_thread_data->TallyADeath(*births, queue_duration, stopwatch); |
[email protected] | 180c85e | 2011-07-26 18:25:16 | [diff] [blame] | 672 | } |
| 673 | |
| 674 | // static |
[email protected] | dbe5d207 | 2011-11-08 17:09:21 | [diff] [blame] | 675 | void ThreadData::TallyRunInAScopedRegionIfTracking( |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 676 | const Births* births, |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 677 | const TaskStopwatch& stopwatch) { |
[email protected] | dbe5d207 | 2011-11-08 17:09:21 | [diff] [blame] | 678 | // Even if we have been DEACTIVATED, we will process any pending births so |
| 679 | // that our data structures (which counted the outstanding births) remain |
| 680 | // consistent. |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 681 | if (!births) |
[email protected] | dbe5d207 | 2011-11-08 17:09:21 | [diff] [blame] | 682 | return; |
| 683 | |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 684 | ThreadData* current_thread_data = stopwatch.GetThreadData(); |
[email protected] | dbe5d207 | 2011-11-08 17:09:21 | [diff] [blame] | 685 | if (!current_thread_data) |
| 686 | return; |
| 687 | |
avi | 9b6f4293 | 2015-12-26 22:15:14 | [diff] [blame] | 688 | int32_t queue_duration = 0; |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 689 | current_thread_data->TallyADeath(*births, queue_duration, stopwatch); |
vadimt | 379d7fe | 2015-04-01 00:09:35 | [diff] [blame] | 690 | } |
| 691 | |
| 692 | void ThreadData::SnapshotExecutedTasks( |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 693 | int current_profiling_phase, |
| 694 | PhasedProcessDataSnapshotMap* phased_snapshots, |
vadimt | 379d7fe | 2015-04-01 00:09:35 | [diff] [blame] | 695 | BirthCountMap* birth_counts) { |
[email protected] | 1cb05db | 2012-04-13 00:39:26 | [diff] [blame] | 696 | // Get copy of data, so that the data will not change during the iterations |
| 697 | // and processing. |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 698 | BirthMap birth_map; |
| 699 | DeathsSnapshot deaths; |
vadimt | cf8983e | 2015-05-01 19:13:01 | [diff] [blame] | 700 | SnapshotMaps(current_profiling_phase, &birth_map, &deaths); |
[email protected] | 1cb05db | 2012-04-13 00:39:26 | [diff] [blame] | 701 | |
vadimt | 379d7fe | 2015-04-01 00:09:35 | [diff] [blame] | 702 | for (const auto& birth : birth_map) { |
| 703 | (*birth_counts)[birth.second] += birth.second->birth_count(); |
[email protected] | 1cb05db | 2012-04-13 00:39:26 | [diff] [blame] | 704 | } |
| 705 | |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 706 | for (const auto& death : deaths) { |
| 707 | (*birth_counts)[death.first] -= death.first->birth_count(); |
[email protected] | 1cb05db | 2012-04-13 00:39:26 | [diff] [blame] | 708 | |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 709 | // For the current death data, walk through all its snapshots, starting from |
| 710 | // the current one, then from the previous profiling phase etc., and for |
| 711 | // each snapshot calculate the delta between the snapshot and the previous |
| 712 | // phase, if any. Store the deltas in the result. |
| 713 | for (const DeathDataPhaseSnapshot* phase = &death.second; phase; |
| 714 | phase = phase->prev) { |
| 715 | const DeathDataSnapshot& death_data = |
| 716 | phase->prev ? phase->death_data.Delta(phase->prev->death_data) |
| 717 | : phase->death_data; |
| 718 | |
| 719 | if (death_data.count > 0) { |
| 720 | (*phased_snapshots)[phase->profiling_phase].tasks.push_back( |
| 721 | TaskSnapshot(BirthOnThreadSnapshot(*death.first), death_data, |
fdoray | f607a84 | 2016-12-06 21:44:48 | [diff] [blame] | 722 | sanitized_thread_name())); |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 723 | } |
| 724 | } |
[email protected] | 1cb05db | 2012-04-13 00:39:26 | [diff] [blame] | 725 | } |
| 726 | } |
| 727 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 728 | // This may be called from another thread. |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 729 | void ThreadData::SnapshotMaps(int profiling_phase, |
| 730 | BirthMap* birth_map, |
vadimt | cf8983e | 2015-05-01 19:13:01 | [diff] [blame] | 731 | DeathsSnapshot* deaths) { |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 732 | base::AutoLock lock(map_lock_); |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 733 | |
vadimt | 379d7fe | 2015-04-01 00:09:35 | [diff] [blame] | 734 | for (const auto& birth : birth_map_) |
| 735 | (*birth_map)[birth.first] = birth.second; |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 736 | |
| 737 | for (const auto& death : death_map_) { |
| 738 | deaths->push_back(std::make_pair( |
| 739 | death.first, |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 740 | DeathDataPhaseSnapshot(profiling_phase, death.second, |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 741 | death.second.last_phase_snapshot()))); |
| 742 | } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 743 | } |
| 744 | |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 745 | void ThreadData::OnProfilingPhaseCompletedOnThread(int profiling_phase) { |
| 746 | base::AutoLock lock(map_lock_); |
| 747 | |
| 748 | for (auto& death : death_map_) { |
| 749 | death.second.OnProfilingPhaseCompleted(profiling_phase); |
| 750 | } |
| 751 | } |
| 752 | |
zhenyu.shan | a55ed00 | 2016-06-07 21:05:34 | [diff] [blame] | 753 | void ThreadData::EnsureTlsInitialization() { |
amistry | 42d1688 | 2015-07-17 03:58:06 | [diff] [blame] | 754 | if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED) |
asvitkine | d0abaa3 | 2015-05-07 16:27:17 | [diff] [blame] | 755 | return; // Someone else did the initialization. |
[email protected] | 94b555ee | 2011-11-15 21:50:36 | [diff] [blame] | 756 | // Due to racy lazy initialization in tests, we'll need to recheck status_ |
| 757 | // after we acquire the lock. |
| 758 | |
| 759 | // Ensure that we don't double initialize tls. We are called when single |
| 760 | // threaded in the product, but some tests may be racy and lazy about our |
| 761 | // initialization. |
| 762 | base::AutoLock lock(*list_lock_.Pointer()); |
amistry | 42d1688 | 2015-07-17 03:58:06 | [diff] [blame] | 763 | if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED) |
asvitkine | d0abaa3 | 2015-05-07 16:27:17 | [diff] [blame] | 764 | return; // Someone raced in here and beat us. |
[email protected] | 94b555ee | 2011-11-15 21:50:36 | [diff] [blame] | 765 | |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 766 | // Perform the "real" TLS initialization now, and leave it intact through |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 767 | // process termination. |
[email protected] | 94b555ee | 2011-11-15 21:50:36 | [diff] [blame] | 768 | if (!tls_index_.initialized()) { // Testing may have initialized this. |
amistry | 42d1688 | 2015-07-17 03:58:06 | [diff] [blame] | 769 | DCHECK_EQ(base::subtle::NoBarrier_Load(&status_), UNINITIALIZED); |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 770 | tls_index_.Initialize(&ThreadData::OnThreadTermination); |
asvitkine | d0abaa3 | 2015-05-07 16:27:17 | [diff] [blame] | 771 | DCHECK(tls_index_.initialized()); |
[email protected] | 8aa1e6e | 2011-12-14 01:36:48 | [diff] [blame] | 772 | } else { |
| 773 | // TLS was initialzed for us earlier. |
amistry | 42d1688 | 2015-07-17 03:58:06 | [diff] [blame] | 774 | DCHECK_EQ(base::subtle::NoBarrier_Load(&status_), DORMANT_DURING_TESTS); |
[email protected] | 94b555ee | 2011-11-15 21:50:36 | [diff] [blame] | 775 | } |
[email protected] | 3f095c0a | 2011-10-31 15:32:08 | [diff] [blame] | 776 | |
[email protected] | 94b555ee | 2011-11-15 21:50:36 | [diff] [blame] | 777 | // Incarnation counter is only significant to testing, as it otherwise will |
| 778 | // never again change in this process. |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 779 | ++incarnation_counter_; |
[email protected] | 94b555ee | 2011-11-15 21:50:36 | [diff] [blame] | 780 | |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 781 | // The lock is not critical for setting status_, but it doesn't hurt. It also |
[email protected] | 94b555ee | 2011-11-15 21:50:36 | [diff] [blame] | 782 | // ensures that if we have a racy initialization, that we'll bail as soon as |
| 783 | // we get the lock earlier in this method. |
amistry | 42d1688 | 2015-07-17 03:58:06 | [diff] [blame] | 784 | base::subtle::Release_Store(&status_, kInitialStartupState); |
| 785 | DCHECK(base::subtle::NoBarrier_Load(&status_) != UNINITIALIZED); |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 786 | |
| 787 | #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER) |
| 788 | // Make sure heap tracking is enabled ASAP if the default state is active. |
| 789 | if (kInitialStartupState == PROFILING_ACTIVE && |
| 790 | !base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled()) { |
| 791 | base::debug::ThreadHeapUsageTracker::EnableHeapTracking(); |
| 792 | } |
| 793 | #endif // BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER) |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 794 | } |
| 795 | |
| 796 | // static |
asvitkine | d0abaa3 | 2015-05-07 16:27:17 | [diff] [blame] | 797 | void ThreadData::InitializeAndSetTrackingStatus(Status status) { |
[email protected] | 702a12d | 2012-02-10 19:43:42 | [diff] [blame] | 798 | DCHECK_GE(status, DEACTIVATED); |
vadimt | cf8983e | 2015-05-01 19:13:01 | [diff] [blame] | 799 | DCHECK_LE(status, PROFILING_ACTIVE); |
[email protected] | 702a12d | 2012-02-10 19:43:42 | [diff] [blame] | 800 | |
zhenyu.shan | a55ed00 | 2016-06-07 21:05:34 | [diff] [blame] | 801 | EnsureTlsInitialization(); // No-op if already initialized. |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 802 | |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 803 | if (status > DEACTIVATED) { |
[email protected] | 702a12d | 2012-02-10 19:43:42 | [diff] [blame] | 804 | status = PROFILING_ACTIVE; |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 805 | |
| 806 | #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER) |
| 807 | if (!base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled()) |
| 808 | base::debug::ThreadHeapUsageTracker::EnableHeapTracking(); |
| 809 | #endif // BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER) |
| 810 | } |
amistry | 42d1688 | 2015-07-17 03:58:06 | [diff] [blame] | 811 | base::subtle::Release_Store(&status_, status); |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 812 | } |
| 813 | |
| 814 | // static |
[email protected] | 702a12d | 2012-02-10 19:43:42 | [diff] [blame] | 815 | ThreadData::Status ThreadData::status() { |
amistry | 42d1688 | 2015-07-17 03:58:06 | [diff] [blame] | 816 | return static_cast<ThreadData::Status>(base::subtle::Acquire_Load(&status_)); |
[email protected] | 702a12d | 2012-02-10 19:43:42 | [diff] [blame] | 817 | } |
| 818 | |
| 819 | // static |
| 820 | bool ThreadData::TrackingStatus() { |
amistry | 42d1688 | 2015-07-17 03:58:06 | [diff] [blame] | 821 | return base::subtle::Acquire_Load(&status_) > DEACTIVATED; |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 822 | } |
| 823 | |
| 824 | // static |
vadimt | a156831 | 2014-11-06 22:27:43 | [diff] [blame] | 825 | void ThreadData::EnableProfilerTiming() { |
| 826 | base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, ENABLED_TIMING); |
| 827 | } |
| 828 | |
| 829 | // static |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 830 | TrackedTime ThreadData::Now() { |
primiano | bc5681f5 | 2016-02-03 18:53:11 | [diff] [blame] | 831 | if (now_function_for_testing_) |
| 832 | return TrackedTime::FromMilliseconds((*now_function_for_testing_)()); |
vadimt | 031d00f | 2015-04-09 03:14:55 | [diff] [blame] | 833 | if (IsProfilerTimingEnabled() && TrackingStatus()) |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 834 | return TrackedTime::Now(); |
| 835 | return TrackedTime(); // Super fast when disabled, or not compiled. |
[email protected] | 84b5795 | 2011-10-15 23:52:45 | [diff] [blame] | 836 | } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 837 | |
| 838 | // static |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 839 | void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count) { |
| 840 | base::AutoLock lock(*list_lock_.Pointer()); |
[email protected] | 30de3a3 | 2014-03-14 18:25:48 | [diff] [blame] | 841 | |
| 842 | // TODO(jar): until this is working on XP, don't run the real test. |
| 843 | #if 0 |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 844 | // Verify that we've at least shutdown/cleanup the major namesd threads. The |
| 845 | // caller should tell us how many thread shutdowns should have taken place by |
| 846 | // now. |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 847 | CHECK_GT(cleanup_count_, major_threads_shutdown_count); |
[email protected] | 30de3a3 | 2014-03-14 18:25:48 | [diff] [blame] | 848 | #endif |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 849 | } |
| 850 | |
| 851 | // static |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 852 | void ThreadData::ShutdownSingleThreadedCleanup(bool leak) { |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 853 | // This is only called from test code, where we need to cleanup so that |
| 854 | // additional tests can be run. |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 855 | // We must be single threaded... but be careful anyway. |
asvitkine | d0abaa3 | 2015-05-07 16:27:17 | [diff] [blame] | 856 | InitializeAndSetTrackingStatus(DEACTIVATED); |
| 857 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 858 | ThreadData* thread_data_list; |
| 859 | { |
[email protected] | 77169a6 | 2011-11-14 20:36:46 | [diff] [blame] | 860 | base::AutoLock lock(*list_lock_.Pointer()); |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 861 | thread_data_list = all_thread_data_list_head_; |
| 862 | all_thread_data_list_head_ = NULL; |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 863 | ++incarnation_counter_; |
[email protected] | 26cdeb96 | 2011-11-20 04:17:07 | [diff] [blame] | 864 | // To be clean, break apart the retired worker list (though we leak them). |
fdoray | f607a84 | 2016-12-06 21:44:48 | [diff] [blame] | 865 | while (first_retired_thread_data_) { |
| 866 | ThreadData* thread_data = first_retired_thread_data_; |
| 867 | first_retired_thread_data_ = thread_data->next_retired_thread_data_; |
| 868 | thread_data->next_retired_thread_data_ = nullptr; |
[email protected] | 26cdeb96 | 2011-11-20 04:17:07 | [diff] [blame] | 869 | } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 870 | } |
| 871 | |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 872 | // Put most global static back in pristine shape. |
[email protected] | 9a88c90 | 2011-11-24 00:00:31 | [diff] [blame] | 873 | cleanup_count_ = 0; |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 874 | tls_index_.Set(NULL); |
amistry | 42d1688 | 2015-07-17 03:58:06 | [diff] [blame] | 875 | // Almost UNINITIALIZED. |
| 876 | base::subtle::Release_Store(&status_, DORMANT_DURING_TESTS); |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 877 | |
| 878 | // To avoid any chance of racing in unit tests, which is the only place we |
| 879 | // call this function, we may sometimes leak all the data structures we |
| 880 | // recovered, as they may still be in use on threads from prior tests! |
[email protected] | bf709abd | 2013-06-10 11:32:20 | [diff] [blame] | 881 | if (leak) { |
| 882 | ThreadData* thread_data = thread_data_list; |
| 883 | while (thread_data) { |
| 884 | ANNOTATE_LEAKING_OBJECT_PTR(thread_data); |
| 885 | thread_data = thread_data->next(); |
| 886 | } |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 887 | return; |
[email protected] | bf709abd | 2013-06-10 11:32:20 | [diff] [blame] | 888 | } |
[email protected] | b2a9bbd | 2011-10-31 22:36:21 | [diff] [blame] | 889 | |
| 890 | // When we want to cleanup (on a single thread), here is what we do. |
| 891 | |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 892 | // Do actual recursive delete in all ThreadData instances. |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 893 | while (thread_data_list) { |
| 894 | ThreadData* next_thread_data = thread_data_list; |
| 895 | thread_data_list = thread_data_list->next(); |
| 896 | |
| 897 | for (BirthMap::iterator it = next_thread_data->birth_map_.begin(); |
| 898 | next_thread_data->birth_map_.end() != it; ++it) |
| 899 | delete it->second; // Delete the Birth Records. |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 900 | delete next_thread_data; // Includes all Death Records. |
| 901 | } |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 902 | } |
| 903 | |
fdoray | f607a84 | 2016-12-06 21:44:48 | [diff] [blame] | 904 | // static |
| 905 | ThreadData* ThreadData::GetRetiredOrCreateThreadData( |
| 906 | const std::string& sanitized_thread_name) { |
| 907 | SCOPED_UMA_HISTOGRAM_TIMER("TrackedObjects.GetRetiredOrCreateThreadData"); |
| 908 | |
| 909 | { |
| 910 | base::AutoLock lock(*list_lock_.Pointer()); |
| 911 | ThreadData** pcursor = &first_retired_thread_data_; |
| 912 | ThreadData* cursor = first_retired_thread_data_; |
| 913 | |
| 914 | // Assuming that there aren't more than a few tens of retired ThreadData |
| 915 | // instances, this lookup should be quick compared to the thread creation |
| 916 | // time. Retired ThreadData instances cannot be stored in a map because |
| 917 | // insertions are done from OnThreadTerminationCleanup() where allocations |
| 918 | // are not allowed. |
| 919 | // |
| 920 | // Note: Test processes may have more than a few tens of retired ThreadData |
| 921 | // instances. |
| 922 | while (cursor) { |
| 923 | if (cursor->sanitized_thread_name() == sanitized_thread_name) { |
| 924 | DCHECK_EQ(*pcursor, cursor); |
| 925 | *pcursor = cursor->next_retired_thread_data_; |
| 926 | cursor->next_retired_thread_data_ = nullptr; |
| 927 | return cursor; |
| 928 | } |
| 929 | pcursor = &cursor->next_retired_thread_data_; |
| 930 | cursor = cursor->next_retired_thread_data_; |
| 931 | } |
| 932 | } |
| 933 | |
| 934 | return new ThreadData(sanitized_thread_name); |
| 935 | } |
| 936 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 937 | //------------------------------------------------------------------------------ |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 938 | TaskStopwatch::TaskStopwatch() |
vadimt | 2017553 | 2014-10-28 20:14:20 | [diff] [blame] | 939 | : wallclock_duration_ms_(0), |
| 940 | current_thread_data_(NULL), |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 941 | excluded_duration_ms_(0), |
| 942 | parent_(NULL) { |
danakj | e649f57 | 2015-01-08 23:35:58 | [diff] [blame] | 943 | #if DCHECK_IS_ON() |
vadimt | 2017553 | 2014-10-28 20:14:20 | [diff] [blame] | 944 | state_ = CREATED; |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 945 | child_ = NULL; |
| 946 | #endif |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 947 | #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER) |
| 948 | heap_tracking_enabled_ = |
| 949 | base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled(); |
| 950 | #endif |
vadimt | 2017553 | 2014-10-28 20:14:20 | [diff] [blame] | 951 | } |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 952 | |
vadimt | 2017553 | 2014-10-28 20:14:20 | [diff] [blame] | 953 | TaskStopwatch::~TaskStopwatch() { |
danakj | e649f57 | 2015-01-08 23:35:58 | [diff] [blame] | 954 | #if DCHECK_IS_ON() |
vadimt | 2017553 | 2014-10-28 20:14:20 | [diff] [blame] | 955 | DCHECK(state_ != RUNNING); |
| 956 | DCHECK(child_ == NULL); |
| 957 | #endif |
| 958 | } |
| 959 | |
| 960 | void TaskStopwatch::Start() { |
danakj | e649f57 | 2015-01-08 23:35:58 | [diff] [blame] | 961 | #if DCHECK_IS_ON() |
vadimt | 2017553 | 2014-10-28 20:14:20 | [diff] [blame] | 962 | DCHECK(state_ == CREATED); |
| 963 | state_ = RUNNING; |
| 964 | #endif |
| 965 | |
| 966 | start_time_ = ThreadData::Now(); |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 967 | #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER) |
| 968 | if (heap_tracking_enabled_) |
| 969 | heap_usage_.Start(); |
| 970 | #endif |
vadimt | 2017553 | 2014-10-28 20:14:20 | [diff] [blame] | 971 | |
| 972 | current_thread_data_ = ThreadData::Get(); |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 973 | if (!current_thread_data_) |
| 974 | return; |
| 975 | |
| 976 | parent_ = current_thread_data_->current_stopwatch_; |
danakj | e649f57 | 2015-01-08 23:35:58 | [diff] [blame] | 977 | #if DCHECK_IS_ON() |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 978 | if (parent_) { |
| 979 | DCHECK(parent_->state_ == RUNNING); |
| 980 | DCHECK(parent_->child_ == NULL); |
| 981 | parent_->child_ = this; |
| 982 | } |
| 983 | #endif |
| 984 | current_thread_data_->current_stopwatch_ = this; |
| 985 | } |
| 986 | |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 987 | void TaskStopwatch::Stop() { |
| 988 | const TrackedTime end_time = ThreadData::Now(); |
danakj | e649f57 | 2015-01-08 23:35:58 | [diff] [blame] | 989 | #if DCHECK_IS_ON() |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 990 | DCHECK(state_ == RUNNING); |
| 991 | state_ = STOPPED; |
| 992 | DCHECK(child_ == NULL); |
| 993 | #endif |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 994 | #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER) |
| 995 | if (heap_tracking_enabled_) |
| 996 | heap_usage_.Stop(true); |
| 997 | #endif |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 998 | |
| 999 | if (!start_time_.is_null() && !end_time.is_null()) { |
| 1000 | wallclock_duration_ms_ = (end_time - start_time_).InMilliseconds(); |
| 1001 | } |
| 1002 | |
| 1003 | if (!current_thread_data_) |
| 1004 | return; |
| 1005 | |
| 1006 | DCHECK(current_thread_data_->current_stopwatch_ == this); |
| 1007 | current_thread_data_->current_stopwatch_ = parent_; |
| 1008 | if (!parent_) |
| 1009 | return; |
| 1010 | |
danakj | e649f57 | 2015-01-08 23:35:58 | [diff] [blame] | 1011 | #if DCHECK_IS_ON() |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 1012 | DCHECK(parent_->state_ == RUNNING); |
| 1013 | DCHECK(parent_->child_ == this); |
| 1014 | parent_->child_ = NULL; |
| 1015 | #endif |
vadimt | 2017553 | 2014-10-28 20:14:20 | [diff] [blame] | 1016 | parent_->excluded_duration_ms_ += wallclock_duration_ms_; |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 1017 | parent_ = NULL; |
| 1018 | } |
| 1019 | |
| 1020 | TrackedTime TaskStopwatch::StartTime() const { |
danakj | e649f57 | 2015-01-08 23:35:58 | [diff] [blame] | 1021 | #if DCHECK_IS_ON() |
vadimt | 2017553 | 2014-10-28 20:14:20 | [diff] [blame] | 1022 | DCHECK(state_ != CREATED); |
| 1023 | #endif |
| 1024 | |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 1025 | return start_time_; |
| 1026 | } |
| 1027 | |
avi | 9b6f4293 | 2015-12-26 22:15:14 | [diff] [blame] | 1028 | int32_t TaskStopwatch::RunDurationMs() const { |
danakj | e649f57 | 2015-01-08 23:35:58 | [diff] [blame] | 1029 | #if DCHECK_IS_ON() |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 1030 | DCHECK(state_ == STOPPED); |
| 1031 | #endif |
| 1032 | |
| 1033 | return wallclock_duration_ms_ - excluded_duration_ms_; |
| 1034 | } |
| 1035 | |
| 1036 | ThreadData* TaskStopwatch::GetThreadData() const { |
danakj | e649f57 | 2015-01-08 23:35:58 | [diff] [blame] | 1037 | #if DCHECK_IS_ON() |
vadimt | 2017553 | 2014-10-28 20:14:20 | [diff] [blame] | 1038 | DCHECK(state_ != CREATED); |
| 1039 | #endif |
| 1040 | |
vadimt | 12f0f7d | 2014-09-15 19:19:38 | [diff] [blame] | 1041 | return current_thread_data_; |
| 1042 | } |
| 1043 | |
| 1044 | //------------------------------------------------------------------------------ |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 1045 | // DeathDataPhaseSnapshot |
| 1046 | |
| 1047 | DeathDataPhaseSnapshot::DeathDataPhaseSnapshot( |
| 1048 | int profiling_phase, |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 1049 | const DeathData& death, |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 1050 | const DeathDataPhaseSnapshot* prev) |
siggi | de38d0c | 2016-12-02 20:04:21 | [diff] [blame] | 1051 | : profiling_phase(profiling_phase), death_data(death), prev(prev) {} |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 1052 | |
| 1053 | //------------------------------------------------------------------------------ |
| 1054 | // TaskSnapshot |
| 1055 | |
[email protected] | 1cb05db | 2012-04-13 00:39:26 | [diff] [blame] | 1056 | TaskSnapshot::TaskSnapshot() { |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 1057 | } |
| 1058 | |
vadimt | e2de473 | 2015-04-27 21:43:02 | [diff] [blame] | 1059 | TaskSnapshot::TaskSnapshot(const BirthOnThreadSnapshot& birth, |
| 1060 | const DeathDataSnapshot& death_data, |
fdoray | f607a84 | 2016-12-06 21:44:48 | [diff] [blame] | 1061 | const std::string& death_sanitized_thread_name) |
[email protected] | 1cb05db | 2012-04-13 00:39:26 | [diff] [blame] | 1062 | : birth(birth), |
| 1063 | death_data(death_data), |
fdoray | f607a84 | 2016-12-06 21:44:48 | [diff] [blame] | 1064 | death_sanitized_thread_name(death_sanitized_thread_name) {} |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 1065 | |
[email protected] | 1cb05db | 2012-04-13 00:39:26 | [diff] [blame] | 1066 | TaskSnapshot::~TaskSnapshot() { |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 1067 | } |
| 1068 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 1069 | //------------------------------------------------------------------------------ |
vadimt | 379d7fe | 2015-04-01 00:09:35 | [diff] [blame] | 1070 | // ProcessDataPhaseSnapshot |
| 1071 | |
| 1072 | ProcessDataPhaseSnapshot::ProcessDataPhaseSnapshot() { |
| 1073 | } |
| 1074 | |
vmpstr | 7c787706 | 2016-02-18 22:12:24 | [diff] [blame] | 1075 | ProcessDataPhaseSnapshot::ProcessDataPhaseSnapshot( |
| 1076 | const ProcessDataPhaseSnapshot& other) = default; |
| 1077 | |
vadimt | 379d7fe | 2015-04-01 00:09:35 | [diff] [blame] | 1078 | ProcessDataPhaseSnapshot::~ProcessDataPhaseSnapshot() { |
| 1079 | } |
| 1080 | |
| 1081 | //------------------------------------------------------------------------------ |
| 1082 | // ProcessDataPhaseSnapshot |
[email protected] | 1cb05db | 2012-04-13 00:39:26 | [diff] [blame] | 1083 | |
| 1084 | ProcessDataSnapshot::ProcessDataSnapshot() |
[email protected] | fe5d406 | 2012-04-23 21:18:19 | [diff] [blame] | 1085 | #if !defined(OS_NACL) |
[email protected] | 1cb05db | 2012-04-13 00:39:26 | [diff] [blame] | 1086 | : process_id(base::GetCurrentProcId()) { |
[email protected] | fe5d406 | 2012-04-23 21:18:19 | [diff] [blame] | 1087 | #else |
vadimt | 379d7fe | 2015-04-01 00:09:35 | [diff] [blame] | 1088 | : process_id(base::kNullProcessId) { |
[email protected] | fe5d406 | 2012-04-23 21:18:19 | [diff] [blame] | 1089 | #endif |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 1090 | } |
| 1091 | |
vmpstr | e65942b | 2016-02-25 00:50:31 | [diff] [blame] | 1092 | ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) = |
| 1093 | default; |
| 1094 | |
[email protected] | 1cb05db | 2012-04-13 00:39:26 | [diff] [blame] | 1095 | ProcessDataSnapshot::~ProcessDataSnapshot() { |
[email protected] | 84baeca | 2011-10-24 18:55:16 | [diff] [blame] | 1096 | } |
| 1097 | |
initial.commit | d7cae12 | 2008-07-26 21:49:38 | [diff] [blame] | 1098 | } // namespace tracked_objects |