Reapply "Update Benchmark (#83488)" (#83916)
This reverts commit aec6a04b8e99b42eca431fc0b56947937d3a14c2.
(google/benchmark still at hash 1576991177ba97a4b2ff6c45950f1fa6e9aa678c as it was in #83488. Also reapplied same extra local diffs)
Verified locally.
diff --git a/third-party/benchmark/src/CMakeLists.txt b/third-party/benchmark/src/CMakeLists.txt
index e814a4e..943594b 100644
--- a/third-party/benchmark/src/CMakeLists.txt
+++ b/third-party/benchmark/src/CMakeLists.txt
@@ -25,12 +25,25 @@
SOVERSION ${GENERIC_LIB_SOVERSION}
)
target_include_directories(benchmark PUBLIC
- $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>)
+ $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>
+)
+
+set_property(
+ SOURCE benchmark.cc
+ APPEND
+ PROPERTY COMPILE_DEFINITIONS
+ BENCHMARK_VERSION="${VERSION}"
+)
# libpfm, if available
-if (HAVE_LIBPFM)
- target_link_libraries(benchmark PRIVATE pfm)
- add_definitions(-DHAVE_LIBPFM)
+if (PFM_FOUND)
+ target_link_libraries(benchmark PRIVATE PFM::libpfm)
+ target_compile_definitions(benchmark PRIVATE -DHAVE_LIBPFM)
+endif()
+
+# pthread affinity, if available
+if(HAVE_PTHREAD_AFFINITY)
+ target_compile_definitions(benchmark PRIVATE -DBENCHMARK_HAS_PTHREAD_AFFINITY)
endif()
# Link threads.
@@ -53,6 +66,10 @@
target_link_libraries(benchmark PRIVATE kstat)
endif()
+if (NOT BUILD_SHARED_LIBS)
+ target_compile_definitions(benchmark PUBLIC -DBENCHMARK_STATIC_DEFINE)
+endif()
+
# Benchmark main library
add_library(benchmark_main "benchmark_main.cc")
add_library(benchmark::benchmark_main ALIAS benchmark_main)
@@ -60,10 +77,10 @@
OUTPUT_NAME "benchmark_main"
VERSION ${GENERIC_LIB_VERSION}
SOVERSION ${GENERIC_LIB_SOVERSION}
+ DEFINE_SYMBOL benchmark_EXPORTS
)
target_link_libraries(benchmark_main PUBLIC benchmark::benchmark)
-
set(generated_dir "${PROJECT_BINARY_DIR}")
set(version_config "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake")
@@ -107,6 +124,7 @@
install(
DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark"
+ "${PROJECT_BINARY_DIR}/include/benchmark"
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
FILES_MATCHING PATTERN "*.*h")
diff --git a/third-party/benchmark/src/benchmark.cc b/third-party/benchmark/src/benchmark.cc
index 4731511..495944d 100644
--- a/third-party/benchmark/src/benchmark.cc
+++ b/third-party/benchmark/src/benchmark.cc
@@ -19,7 +19,7 @@
#include "internal_macros.h"
#ifndef BENCHMARK_OS_WINDOWS
-#ifndef BENCHMARK_OS_FUCHSIA
+#if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT)
#include <sys/resource.h>
#endif
#include <sys/time.h>
@@ -65,12 +65,28 @@
// linked into the binary are run.
BM_DEFINE_string(benchmark_filter, "");
-// Minimum number of seconds we should run benchmark before results are
-// considered significant. For cpu-time based tests, this is the lower bound
+// Specification of how long to run the benchmark.
+//
+// It can be either an exact number of iterations (specified as `<integer>x`),
+// or a minimum number of seconds (specified as `<float>s`). If the latter
+// format (ie., min seconds) is used, the system may run the benchmark longer
+// until the results are considered significant.
+//
+// For backward compatibility, the `s` suffix may be omitted, in which case,
+// the specified number is interpreted as the number of seconds.
+//
+// For cpu-time based tests, this is the lower bound
// on the total cpu time used by all threads that make up the test. For
// real-time based tests, this is the lower bound on the elapsed time of the
// benchmark execution, regardless of number of threads.
-BM_DEFINE_double(benchmark_min_time, 0.5);
+BM_DEFINE_string(benchmark_min_time, kDefaultMinTimeStr);
+
+// Minimum number of seconds a benchmark should be run before results should be
+// taken into account. This e.g can be necessary for benchmarks of code which
+// needs to fill some form of cache before performance is of interest.
+// Note: results gathered within this period are discarded and not used for
+// reported result.
+BM_DEFINE_double(benchmark_min_warmup_time, 0.0);
// The number of runs of each benchmark. If greater than 1, the mean and
// standard deviation of the runs will be reported.
@@ -121,6 +137,10 @@
// pairs. Kept internal as it's only used for parsing from env/command line.
BM_DEFINE_kvpairs(benchmark_context, {});
+// Set the default time unit to use for reports
+// Valid values are 'ns', 'us', 'ms' or 's'
+BM_DEFINE_string(benchmark_time_unit, "");
+
// The level of verbose logging to output
BM_DEFINE_int32(v, 0);
@@ -128,23 +148,28 @@
std::map<std::string, std::string>* global_context = nullptr;
+BENCHMARK_EXPORT std::map<std::string, std::string>*& GetGlobalContext() {
+ return global_context;
+}
+
// FIXME: wouldn't LTO mess this up?
void UseCharPointer(char const volatile*) {}
} // namespace internal
-State::State(IterationCount max_iters, const std::vector<int64_t>& ranges,
- int thread_i, int n_threads, internal::ThreadTimer* timer,
- internal::ThreadManager* manager,
+State::State(std::string name, IterationCount max_iters,
+ const std::vector<int64_t>& ranges, int thread_i, int n_threads,
+ internal::ThreadTimer* timer, internal::ThreadManager* manager,
internal::PerfCountersMeasurement* perf_counters_measurement)
: total_iterations_(0),
batch_leftover_(0),
max_iterations(max_iters),
started_(false),
finished_(false),
- error_occurred_(false),
+ skipped_(internal::NotSkipped),
range_(ranges),
complexity_n_(0),
+ name_(std::move(name)),
thread_index_(thread_i),
threads_(n_threads),
timer_(timer),
@@ -154,6 +179,17 @@
BM_CHECK_LT(thread_index_, threads_)
<< "thread_index must be less than threads";
+ // Add counters with correct flag now. If added with `counters[name]` in
+ // `PauseTiming`, a new `Counter` will be inserted the first time, which
+ // won't have the flag. Inserting them now also reduces the allocations
+ // during the benchmark.
+ if (perf_counters_measurement_) {
+ for (const std::string& counter_name :
+ perf_counters_measurement_->names()) {
+ counters[counter_name] = Counter(0.0, Counter::kAvgIterations);
+ }
+ }
+
// Note: The use of offsetof below is technically undefined until C++17
// because State is not a standard layout type. However, all compilers
// currently provide well-defined behavior as an extension (which is
@@ -170,11 +206,18 @@
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Winvalid-offsetof"
#endif
+#if defined(__NVCC__)
+#pragma nv_diagnostic push
+#pragma nv_diag_suppress 1427
+#endif
+#if defined(__NVCOMPILER)
+#pragma diagnostic push
+#pragma diag_suppress offset_in_non_POD_nonstandard
+#endif
// Offset tests to ensure commonly accessed data is on the first cache line.
const int cache_line_size = 64;
- static_assert(offsetof(State, error_occurred_) <=
- (cache_line_size - sizeof(error_occurred_)),
- "");
+ static_assert(
+ offsetof(State, skipped_) <= (cache_line_size - sizeof(skipped_)), "");
#if defined(__INTEL_COMPILER)
#pragma warning pop
#elif defined(__GNUC__)
@@ -182,39 +225,61 @@
#elif defined(__clang__)
#pragma clang diagnostic pop
#endif
+#if defined(__NVCC__)
+#pragma nv_diagnostic pop
+#endif
+#if defined(__NVCOMPILER)
+#pragma diagnostic pop
+#endif
}
void State::PauseTiming() {
// Add in time accumulated so far
- BM_CHECK(started_ && !finished_ && !error_occurred_);
+ BM_CHECK(started_ && !finished_ && !skipped());
timer_->StopTimer();
if (perf_counters_measurement_) {
- auto measurements = perf_counters_measurement_->StopAndGetMeasurements();
+ std::vector<std::pair<std::string, double>> measurements;
+ if (!perf_counters_measurement_->Stop(measurements)) {
+ BM_CHECK(false) << "Perf counters read the value failed.";
+ }
for (const auto& name_and_measurement : measurements) {
- auto name = name_and_measurement.first;
- auto measurement = name_and_measurement.second;
- BM_CHECK_EQ(counters[name], 0.0);
- counters[name] = Counter(measurement, Counter::kAvgIterations);
+ const std::string& name = name_and_measurement.first;
+ const double measurement = name_and_measurement.second;
+ // Counter was inserted with `kAvgIterations` flag by the constructor.
+ assert(counters.find(name) != counters.end());
+ counters[name].value += measurement;
}
}
}
void State::ResumeTiming() {
- BM_CHECK(started_ && !finished_ && !error_occurred_);
+ BM_CHECK(started_ && !finished_ && !skipped());
timer_->StartTimer();
if (perf_counters_measurement_) {
perf_counters_measurement_->Start();
}
}
-void State::SkipWithError(const char* msg) {
- BM_CHECK(msg);
- error_occurred_ = true;
+void State::SkipWithMessage(const std::string& msg) {
+ skipped_ = internal::SkippedWithMessage;
{
MutexLock l(manager_->GetBenchmarkMutex());
- if (manager_->results.has_error_ == false) {
- manager_->results.error_message_ = msg;
- manager_->results.has_error_ = true;
+ if (internal::NotSkipped == manager_->results.skipped_) {
+ manager_->results.skip_message_ = msg;
+ manager_->results.skipped_ = skipped_;
+ }
+ }
+ total_iterations_ = 0;
+ if (timer_->running()) timer_->StopTimer();
+}
+
+void State::SkipWithError(const std::string& msg) {
+ skipped_ = internal::SkippedWithError;
+ {
+ MutexLock l(manager_->GetBenchmarkMutex());
+ if (internal::NotSkipped == manager_->results.skipped_) {
+ manager_->results.skip_message_ = msg;
+ manager_->results.skipped_ = skipped_;
}
}
total_iterations_ = 0;
@@ -225,7 +290,7 @@
timer_->SetIterationTime(seconds);
}
-void State::SetLabel(const char* label) {
+void State::SetLabel(const std::string& label) {
MutexLock l(manager_->GetBenchmarkMutex());
manager_->results.report_label_ = label;
}
@@ -233,14 +298,14 @@
void State::StartKeepRunning() {
BM_CHECK(!started_ && !finished_);
started_ = true;
- total_iterations_ = error_occurred_ ? 0 : max_iterations;
+ total_iterations_ = skipped() ? 0 : max_iterations;
manager_->StartStopBarrier();
- if (!error_occurred_) ResumeTiming();
+ if (!skipped()) ResumeTiming();
}
void State::FinishKeepRunning() {
- BM_CHECK(started_ && (!finished_ || error_occurred_));
- if (!error_occurred_) {
+ BM_CHECK(started_ && (!finished_ || skipped()));
+ if (!skipped()) {
PauseTiming();
}
// Total iterations has now wrapped around past 0. Fix this.
@@ -318,14 +383,26 @@
size_t num_repetitions_total = 0;
+ // This perfcounters object needs to be created before the runners vector
+ // below so it outlasts their lifetime.
+ PerfCountersMeasurement perfcounters(
+ StrSplit(FLAGS_benchmark_perf_counters, ','));
+
+ // Vector of benchmarks to run
std::vector<internal::BenchmarkRunner> runners;
runners.reserve(benchmarks.size());
+
+ // Count the number of benchmarks with threads to warn the user in case
+ // performance counters are used.
+ int benchmarks_with_threads = 0;
+
+ // Loop through all benchmarks
for (const BenchmarkInstance& benchmark : benchmarks) {
BenchmarkReporter::PerFamilyRunReports* reports_for_family = nullptr;
if (benchmark.complexity() != oNone)
reports_for_family = &per_family_reports[benchmark.family_index()];
-
- runners.emplace_back(benchmark, reports_for_family);
+ benchmarks_with_threads += (benchmark.threads() > 1);
+ runners.emplace_back(benchmark, &perfcounters, reports_for_family);
int num_repeats_of_this_instance = runners.back().GetNumRepeats();
num_repetitions_total += num_repeats_of_this_instance;
if (reports_for_family)
@@ -333,6 +410,17 @@
}
assert(runners.size() == benchmarks.size() && "Unexpected runner count.");
+ // The use of performance counters with threads would be unintuitive for
+ // the average user so we need to warn them about this case
+ if ((benchmarks_with_threads > 0) && (perfcounters.num_counters() > 0)) {
+ GetErrorLogInstance()
+ << "***WARNING*** There are " << benchmarks_with_threads
+ << " benchmarks with threads and " << perfcounters.num_counters()
+ << " performance counters were requested. Beware counters will "
+ "reflect the combined usage across all "
+ "threads.\n";
+ }
+
std::vector<size_t> repetition_indices;
repetition_indices.reserve(num_repetitions_total);
for (size_t runner_index = 0, num_runners = runners.size();
@@ -356,6 +444,12 @@
if (runner.HasRepeatsRemaining()) continue;
// FIXME: report each repetition separately, not all of them in bulk.
+ display_reporter->ReportRunsConfig(
+ runner.GetMinTime(), runner.HasExplicitIters(), runner.GetIters());
+ if (file_reporter)
+ file_reporter->ReportRunsConfig(
+ runner.GetMinTime(), runner.HasExplicitIters(), runner.GetIters());
+
RunResults run_results = runner.GetResults();
// Maybe calculate complexity report
@@ -389,14 +483,15 @@
typedef std::unique_ptr<BenchmarkReporter> PtrType;
if (name == "console") {
return PtrType(new ConsoleReporter(output_opts));
- } else if (name == "json") {
- return PtrType(new JSONReporter);
- } else if (name == "csv") {
- return PtrType(new CSVReporter);
- } else {
- std::cerr << "Unexpected format: '" << name << "'\n";
- std::exit(1);
}
+ if (name == "json") {
+ return PtrType(new JSONReporter());
+ }
+ if (name == "csv") {
+ return PtrType(new CSVReporter());
+ }
+ std::cerr << "Unexpected format: '" << name << "'\n";
+ std::exit(1);
}
BENCHMARK_RESTORE_DEPRECATED_WARNING
@@ -433,6 +528,14 @@
} // end namespace internal
+BenchmarkReporter* CreateDefaultDisplayReporter() {
+ static auto default_display_reporter =
+ internal::CreateReporter(FLAGS_benchmark_format,
+ internal::GetOutputOptions())
+ .release();
+ return default_display_reporter;
+}
+
size_t RunSpecifiedBenchmarks() {
return RunSpecifiedBenchmarks(nullptr, nullptr, FLAGS_benchmark_filter);
}
@@ -468,8 +571,7 @@
std::unique_ptr<BenchmarkReporter> default_display_reporter;
std::unique_ptr<BenchmarkReporter> default_file_reporter;
if (!display_reporter) {
- default_display_reporter = internal::CreateReporter(
- FLAGS_benchmark_format, internal::GetOutputOptions());
+ default_display_reporter.reset(CreateDefaultDisplayReporter());
display_reporter = default_display_reporter.get();
}
auto& Out = display_reporter->GetOutputStream();
@@ -480,17 +582,23 @@
Err << "A custom file reporter was provided but "
"--benchmark_out=<file> was not specified."
<< std::endl;
+ Out.flush();
+ Err.flush();
std::exit(1);
}
if (!fname.empty()) {
output_file.open(fname);
if (!output_file.is_open()) {
Err << "invalid file name: '" << fname << "'" << std::endl;
+ Out.flush();
+ Err.flush();
std::exit(1);
}
if (!file_reporter) {
default_file_reporter = internal::CreateReporter(
- FLAGS_benchmark_out_format, ConsoleReporter::OO_None);
+ FLAGS_benchmark_out_format, FLAGS_benchmark_counters_tabular
+ ? ConsoleReporter::OO_Tabular
+ : ConsoleReporter::OO_None);
file_reporter = default_file_reporter.get();
}
file_reporter->SetOutputStream(&output_file);
@@ -498,10 +606,16 @@
}
std::vector<internal::BenchmarkInstance> benchmarks;
- if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0;
+ if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) {
+ Out.flush();
+ Err.flush();
+ return 0;
+ }
if (benchmarks.empty()) {
Err << "Failed to match any benchmarks against regex: " << spec << "\n";
+ Out.flush();
+ Err.flush();
return 0;
}
@@ -512,11 +626,28 @@
internal::RunBenchmarks(benchmarks, display_reporter, file_reporter);
}
+ Out.flush();
+ Err.flush();
return benchmarks.size();
}
+namespace {
+// stores the time unit benchmarks use by default
+TimeUnit default_time_unit = kNanosecond;
+} // namespace
+
+TimeUnit GetDefaultTimeUnit() { return default_time_unit; }
+
+void SetDefaultTimeUnit(TimeUnit unit) { default_time_unit = unit; }
+
std::string GetBenchmarkFilter() { return FLAGS_benchmark_filter; }
+void SetBenchmarkFilter(std::string value) {
+ FLAGS_benchmark_filter = std::move(value);
+}
+
+int32_t GetBenchmarkVerbosity() { return FLAGS_v; }
+
void RegisterMemoryManager(MemoryManager* manager) {
internal::memory_manager = manager;
}
@@ -533,27 +664,31 @@
namespace internal {
+void (*HelperPrintf)();
+
void PrintUsageAndExit() {
- fprintf(stdout,
- "benchmark"
- " [--benchmark_list_tests={true|false}]\n"
- " [--benchmark_filter=<regex>]\n"
- " [--benchmark_min_time=<min_time>]\n"
- " [--benchmark_repetitions=<num_repetitions>]\n"
- " [--benchmark_enable_random_interleaving={true|false}]\n"
- " [--benchmark_report_aggregates_only={true|false}]\n"
- " [--benchmark_display_aggregates_only={true|false}]\n"
- " [--benchmark_format=<console|json|csv>]\n"
- " [--benchmark_out=<filename>]\n"
- " [--benchmark_out_format=<json|console|csv>]\n"
- " [--benchmark_color={auto|true|false}]\n"
- " [--benchmark_counters_tabular={true|false}]\n"
- " [--benchmark_perf_counters=<counter>,...]\n"
- " [--benchmark_context=<key>=<value>,...]\n"
- " [--v=<verbosity>]\n");
+ HelperPrintf();
exit(0);
}
+void SetDefaultTimeUnitFromFlag(const std::string& time_unit_flag) {
+ if (time_unit_flag == "s") {
+ return SetDefaultTimeUnit(kSecond);
+ }
+ if (time_unit_flag == "ms") {
+ return SetDefaultTimeUnit(kMillisecond);
+ }
+ if (time_unit_flag == "us") {
+ return SetDefaultTimeUnit(kMicrosecond);
+ }
+ if (time_unit_flag == "ns") {
+ return SetDefaultTimeUnit(kNanosecond);
+ }
+ if (!time_unit_flag.empty()) {
+ PrintUsageAndExit();
+ }
+}
+
void ParseCommandLineFlags(int* argc, char** argv) {
using namespace benchmark;
BenchmarkReporter::Context::executable_name =
@@ -562,8 +697,10 @@
if (ParseBoolFlag(argv[i], "benchmark_list_tests",
&FLAGS_benchmark_list_tests) ||
ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||
- ParseDoubleFlag(argv[i], "benchmark_min_time",
+ ParseStringFlag(argv[i], "benchmark_min_time",
&FLAGS_benchmark_min_time) ||
+ ParseDoubleFlag(argv[i], "benchmark_min_warmup_time",
+ &FLAGS_benchmark_min_warmup_time) ||
ParseInt32Flag(argv[i], "benchmark_repetitions",
&FLAGS_benchmark_repetitions) ||
ParseBoolFlag(argv[i], "benchmark_enable_random_interleaving",
@@ -583,6 +720,8 @@
&FLAGS_benchmark_perf_counters) ||
ParseKeyValueFlag(argv[i], "benchmark_context",
&FLAGS_benchmark_context) ||
+ ParseStringFlag(argv[i], "benchmark_time_unit",
+ &FLAGS_benchmark_time_unit) ||
ParseInt32Flag(argv[i], "v", &FLAGS_v)) {
for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1];
@@ -598,6 +737,7 @@
PrintUsageAndExit();
}
}
+ SetDefaultTimeUnitFromFlag(FLAGS_benchmark_time_unit);
if (FLAGS_benchmark_color.empty()) {
PrintUsageAndExit();
}
@@ -613,7 +753,34 @@
} // end namespace internal
-void Initialize(int* argc, char** argv) {
+std::string GetBenchmarkVersion() { return {BENCHMARK_VERSION}; }
+
+void PrintDefaultHelp() {
+ fprintf(stdout,
+ "benchmark"
+ " [--benchmark_list_tests={true|false}]\n"
+ " [--benchmark_filter=<regex>]\n"
+ " [--benchmark_min_time=`<integer>x` OR `<float>s` ]\n"
+ " [--benchmark_min_warmup_time=<min_warmup_time>]\n"
+ " [--benchmark_repetitions=<num_repetitions>]\n"
+ " [--benchmark_enable_random_interleaving={true|false}]\n"
+ " [--benchmark_report_aggregates_only={true|false}]\n"
+ " [--benchmark_display_aggregates_only={true|false}]\n"
+ " [--benchmark_format=<console|json|csv>]\n"
+ " [--benchmark_out=<filename>]\n"
+ " [--benchmark_out_format=<json|console|csv>]\n"
+ " [--benchmark_color={auto|true|false}]\n"
+ " [--benchmark_counters_tabular={true|false}]\n"
+#if defined HAVE_LIBPFM
+ " [--benchmark_perf_counters=<counter>,...]\n"
+#endif
+ " [--benchmark_context=<key>=<value>,...]\n"
+ " [--benchmark_time_unit={ns|us|ms|s}]\n"
+ " [--v=<verbosity>]\n");
+}
+
+void Initialize(int* argc, char** argv, void (*HelperPrintf)()) {
+ internal::HelperPrintf = HelperPrintf;
internal::ParseCommandLineFlags(argc, argv);
internal::LogLevel() = FLAGS_v;
}
diff --git a/third-party/benchmark/src/benchmark_api_internal.cc b/third-party/benchmark/src/benchmark_api_internal.cc
index 4de36e3..286f986 100644
--- a/third-party/benchmark/src/benchmark_api_internal.cc
+++ b/third-party/benchmark/src/benchmark_api_internal.cc
@@ -16,7 +16,7 @@
per_family_instance_index_(per_family_instance_idx),
aggregation_report_mode_(benchmark_.aggregation_report_mode_),
args_(args),
- time_unit_(benchmark_.time_unit_),
+ time_unit_(benchmark_.GetTimeUnit()),
measure_process_cpu_time_(benchmark_.measure_process_cpu_time_),
use_real_time_(benchmark_.use_real_time_),
use_manual_time_(benchmark_.use_manual_time_),
@@ -25,6 +25,7 @@
statistics_(benchmark_.statistics_),
repetitions_(benchmark_.repetitions_),
min_time_(benchmark_.min_time_),
+ min_warmup_time_(benchmark_.min_warmup_time_),
iterations_(benchmark_.iterations_),
threads_(thread_count) {
name_.function_name = benchmark_.name_;
@@ -50,6 +51,11 @@
name_.min_time = StrFormat("min_time:%0.3f", benchmark_.min_time_);
}
+ if (!IsZero(benchmark->min_warmup_time_)) {
+ name_.min_warmup_time =
+ StrFormat("min_warmup_time:%0.3f", benchmark_.min_warmup_time_);
+ }
+
if (benchmark_.iterations_ != 0) {
name_.iterations = StrFormat(
"iterations:%lu", static_cast<unsigned long>(benchmark_.iterations_));
@@ -87,24 +93,24 @@
IterationCount iters, int thread_id, internal::ThreadTimer* timer,
internal::ThreadManager* manager,
internal::PerfCountersMeasurement* perf_counters_measurement) const {
- State st(iters, args_, thread_id, threads_, timer, manager,
- perf_counters_measurement);
+ State st(name_.function_name, iters, args_, thread_id, threads_, timer,
+ manager, perf_counters_measurement);
benchmark_.Run(st);
return st;
}
void BenchmarkInstance::Setup() const {
if (setup_) {
- State st(/*iters*/ 1, args_, /*thread_id*/ 0, threads_, nullptr, nullptr,
- nullptr);
+ State st(name_.function_name, /*iters*/ 1, args_, /*thread_id*/ 0, threads_,
+ nullptr, nullptr, nullptr);
setup_(st);
}
}
void BenchmarkInstance::Teardown() const {
if (teardown_) {
- State st(/*iters*/ 1, args_, /*thread_id*/ 0, threads_, nullptr, nullptr,
- nullptr);
+ State st(name_.function_name, /*iters*/ 1, args_, /*thread_id*/ 0, threads_,
+ nullptr, nullptr, nullptr);
teardown_(st);
}
}
diff --git a/third-party/benchmark/src/benchmark_api_internal.h b/third-party/benchmark/src/benchmark_api_internal.h
index 94c2b29..94f5165 100644
--- a/third-party/benchmark/src/benchmark_api_internal.h
+++ b/third-party/benchmark/src/benchmark_api_internal.h
@@ -36,6 +36,7 @@
const std::vector<Statistics>& statistics() const { return statistics_; }
int repetitions() const { return repetitions_; }
double min_time() const { return min_time_; }
+ double min_warmup_time() const { return min_warmup_time_; }
IterationCount iterations() const { return iterations_; }
int threads() const { return threads_; }
void Setup() const;
@@ -62,6 +63,7 @@
const std::vector<Statistics>& statistics_;
int repetitions_;
double min_time_;
+ double min_warmup_time_;
IterationCount iterations_;
int threads_; // Number of concurrent threads to us
@@ -76,6 +78,7 @@
bool IsZero(double n);
+BENCHMARK_EXPORT
ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color = false);
} // end namespace internal
diff --git a/third-party/benchmark/src/benchmark_main.cc b/third-party/benchmark/src/benchmark_main.cc
index b3b2478..cd61cd2 100644
--- a/third-party/benchmark/src/benchmark_main.cc
+++ b/third-party/benchmark/src/benchmark_main.cc
@@ -14,4 +14,5 @@
#include "benchmark/benchmark.h"
+BENCHMARK_EXPORT int main(int, char**);
BENCHMARK_MAIN();
diff --git a/third-party/benchmark/src/benchmark_name.cc b/third-party/benchmark/src/benchmark_name.cc
index 2a17ebc..01676bb 100644
--- a/third-party/benchmark/src/benchmark_name.cc
+++ b/third-party/benchmark/src/benchmark_name.cc
@@ -51,8 +51,9 @@
}
} // namespace
+BENCHMARK_EXPORT
std::string BenchmarkName::str() const {
- return join('/', function_name, args, min_time, iterations, repetitions,
- time_type, threads);
+ return join('/', function_name, args, min_time, min_warmup_time, iterations,
+ repetitions, time_type, threads);
}
} // namespace benchmark
diff --git a/third-party/benchmark/src/benchmark_register.cc b/third-party/benchmark/src/benchmark_register.cc
index 61a0c26..e447c9a 100644
--- a/third-party/benchmark/src/benchmark_register.cc
+++ b/third-party/benchmark/src/benchmark_register.cc
@@ -15,7 +15,7 @@
#include "benchmark_register.h"
#ifndef BENCHMARK_OS_WINDOWS
-#ifndef BENCHMARK_OS_FUCHSIA
+#if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT)
#include <sys/resource.h>
#endif
#include <sys/time.h>
@@ -53,10 +53,13 @@
namespace {
// For non-dense Range, intermediate values are powers of kRangeMultiplier.
-static const int kRangeMultiplier = 8;
+static constexpr int kRangeMultiplier = 8;
+
// The size of a benchmark family determines is the number of inputs to repeat
// the benchmark on. If this is "large" then warn the user during configuration.
-static const size_t kMaxFamilySize = 100;
+static constexpr size_t kMaxFamilySize = 100;
+
+static constexpr char kDisabledPrefix[] = "DISABLED_";
} // end namespace
namespace internal {
@@ -116,10 +119,10 @@
// Make regular expression out of command-line flag
std::string error_msg;
Regex re;
- bool isNegativeFilter = false;
+ bool is_negative_filter = false;
if (spec[0] == '-') {
spec.replace(0, 1, "");
- isNegativeFilter = true;
+ is_negative_filter = true;
}
if (!re.Init(spec, &error_msg)) {
Err << "Could not compile benchmark re: " << error_msg << std::endl;
@@ -154,7 +157,8 @@
<< " will be repeated at least " << family_size << " times.\n";
}
// reserve in the special case the regex ".", since we know the final
- // family size.
+ // family size. this doesn't take into account any disabled benchmarks
+ // so worst case we reserve more than we need.
if (spec == ".") benchmarks->reserve(benchmarks->size() + family_size);
for (auto const& args : family->args_) {
@@ -164,8 +168,9 @@
num_threads);
const auto full_name = instance.name().str();
- if ((re.Match(full_name) && !isNegativeFilter) ||
- (!re.Match(full_name) && isNegativeFilter)) {
+ if (full_name.rfind(kDisabledPrefix, 0) != 0 &&
+ ((re.Match(full_name) && !is_negative_filter) ||
+ (!re.Match(full_name) && is_negative_filter))) {
benchmarks->push_back(std::move(instance));
++per_family_instance_index;
@@ -199,12 +204,14 @@
// Benchmark
//=============================================================================//
-Benchmark::Benchmark(const char* name)
+Benchmark::Benchmark(const std::string& name)
: name_(name),
aggregation_report_mode_(ARM_Unspecified),
- time_unit_(kNanosecond),
+ time_unit_(GetDefaultTimeUnit()),
+ use_default_time_unit_(true),
range_multiplier_(kRangeMultiplier),
min_time_(0),
+ min_warmup_time_(0),
iterations_(0),
repetitions_(0),
measure_process_cpu_time_(false),
@@ -223,7 +230,7 @@
Benchmark::~Benchmark() {}
Benchmark* Benchmark::Name(const std::string& name) {
- SetName(name.c_str());
+ SetName(name);
return this;
}
@@ -235,6 +242,7 @@
Benchmark* Benchmark::Unit(TimeUnit unit) {
time_unit_ = unit;
+ use_default_time_unit_ = false;
return this;
}
@@ -348,9 +356,17 @@
return this;
}
+Benchmark* Benchmark::MinWarmUpTime(double t) {
+ BM_CHECK(t >= 0.0);
+ BM_CHECK(iterations_ == 0);
+ min_warmup_time_ = t;
+ return this;
+}
+
Benchmark* Benchmark::Iterations(IterationCount n) {
BM_CHECK(n > 0);
BM_CHECK(IsZero(min_time_));
+ BM_CHECK(IsZero(min_warmup_time_));
iterations_ = n;
return this;
}
@@ -452,7 +468,9 @@
return this;
}
-void Benchmark::SetName(const char* name) { name_ = name; }
+void Benchmark::SetName(const std::string& name) { name_ = name; }
+
+const char* Benchmark::GetName() const { return name_.c_str(); }
int Benchmark::ArgsCnt() const {
if (args_.empty()) {
@@ -462,6 +480,16 @@
return static_cast<int>(args_.front().size());
}
+const char* Benchmark::GetArgName(int arg) const {
+ BM_CHECK_GE(arg, 0);
+ BM_CHECK_LT(arg, static_cast<int>(arg_names_.size()));
+ return arg_names_[arg].c_str();
+}
+
+TimeUnit Benchmark::GetTimeUnit() const {
+ return use_default_time_unit_ ? GetDefaultTimeUnit() : time_unit_;
+}
+
//=============================================================================//
// FunctionBenchmark
//=============================================================================//
diff --git a/third-party/benchmark/src/benchmark_register.h b/third-party/benchmark/src/benchmark_register.h
index d3f4974..53367c7 100644
--- a/third-party/benchmark/src/benchmark_register.h
+++ b/third-party/benchmark/src/benchmark_register.h
@@ -1,6 +1,7 @@
#ifndef BENCHMARK_REGISTER_H
#define BENCHMARK_REGISTER_H
+#include <algorithm>
#include <limits>
#include <vector>
@@ -23,7 +24,7 @@
static const T kmax = std::numeric_limits<T>::max();
// Space out the values in multiples of "mult"
- for (T i = static_cast<T>(1); i <= hi; i *= mult) {
+ for (T i = static_cast<T>(1); i <= hi; i *= static_cast<T>(mult)) {
if (i >= lo) {
dst->push_back(i);
}
@@ -32,7 +33,7 @@
if (i > kmax / mult) break;
}
- return dst->begin() + start_offset;
+ return dst->begin() + static_cast<int>(start_offset);
}
template <typename T>
diff --git a/third-party/benchmark/src/benchmark_runner.cc b/third-party/benchmark/src/benchmark_runner.cc
index eac807b..dcddb43 100644
--- a/third-party/benchmark/src/benchmark_runner.cc
+++ b/third-party/benchmark/src/benchmark_runner.cc
@@ -19,7 +19,7 @@
#include "internal_macros.h"
#ifndef BENCHMARK_OS_WINDOWS
-#ifndef BENCHMARK_OS_FUCHSIA
+#if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT)
#include <sys/resource.h>
#endif
#include <sys/time.h>
@@ -28,11 +28,14 @@
#include <algorithm>
#include <atomic>
+#include <climits>
+#include <cmath>
#include <condition_variable>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iostream>
+#include <limits>
#include <memory>
#include <string>
#include <thread>
@@ -61,7 +64,9 @@
namespace {
-static constexpr IterationCount kMaxIterations = 1000000000;
+static constexpr IterationCount kMaxIterations = 1000000000000;
+const double kDefaultMinTime =
+ std::strtod(::benchmark::kDefaultMinTimeStr, /*p_end*/ nullptr);
BenchmarkReporter::Run CreateRunReport(
const benchmark::internal::BenchmarkInstance& b,
@@ -75,8 +80,8 @@
report.run_name = b.name();
report.family_index = b.family_index();
report.per_family_instance_index = b.per_family_instance_index();
- report.error_occurred = results.has_error_;
- report.error_message = results.error_message_;
+ report.skipped = results.skipped_;
+ report.skip_message = results.skip_message_;
report.report_label = results.report_label_;
// This is the total iterations across all threads.
report.iterations = results.iterations;
@@ -85,12 +90,13 @@
report.repetition_index = repetition_index;
report.repetitions = repeats;
- if (!report.error_occurred) {
+ if (!report.skipped) {
if (b.use_manual_time()) {
report.real_accumulated_time = results.manual_time_used;
} else {
report.real_accumulated_time = results.real_time_used;
}
+ report.use_real_time_for_initial_big_o = b.use_manual_time();
report.cpu_accumulated_time = results.cpu_time_used;
report.complexity_n = results.complexity_n;
report.complexity = b.complexity();
@@ -103,7 +109,7 @@
report.memory_result = memory_result;
report.allocs_per_iter =
memory_iterations ? static_cast<double>(memory_result->num_allocs) /
- memory_iterations
+ static_cast<double>(memory_iterations)
: 0;
}
@@ -122,9 +128,10 @@
b->measure_process_cpu_time()
? internal::ThreadTimer::CreateProcessCpuTime()
: internal::ThreadTimer::Create());
+
State st =
b->Run(iters, thread_id, &timer, manager, perf_counters_measurement);
- BM_CHECK(st.error_occurred() || st.iterations() >= st.max_iterations)
+ BM_CHECK(st.skipped() || st.iterations() >= st.max_iterations)
<< "Benchmark returned before State::KeepRunning() returned false!";
{
MutexLock l(manager->GetBenchmarkMutex());
@@ -139,24 +146,100 @@
manager->NotifyThreadComplete();
}
+double ComputeMinTime(const benchmark::internal::BenchmarkInstance& b,
+ const BenchTimeType& iters_or_time) {
+ if (!IsZero(b.min_time())) return b.min_time();
+ // If the flag was used to specify number of iters, then return the default
+ // min_time.
+ if (iters_or_time.tag == BenchTimeType::ITERS) return kDefaultMinTime;
+
+ return iters_or_time.time;
+}
+
+IterationCount ComputeIters(const benchmark::internal::BenchmarkInstance& b,
+ const BenchTimeType& iters_or_time) {
+ if (b.iterations() != 0) return b.iterations();
+
+ // We've already concluded that this flag is currently used to pass
+ // iters but do a check here again anyway.
+ BM_CHECK(iters_or_time.tag == BenchTimeType::ITERS);
+ return iters_or_time.iters;
+}
+
} // end namespace
+BenchTimeType ParseBenchMinTime(const std::string& value) {
+ BenchTimeType ret;
+
+ if (value.empty()) {
+ ret.tag = BenchTimeType::TIME;
+ ret.time = 0.0;
+ return ret;
+ }
+
+ if (value.back() == 'x') {
+ char* p_end;
+ // Reset errno before it's changed by strtol.
+ errno = 0;
+ IterationCount num_iters = std::strtol(value.c_str(), &p_end, 10);
+
+ // After a valid parse, p_end should have been set to
+ // point to the 'x' suffix.
+ BM_CHECK(errno == 0 && p_end != nullptr && *p_end == 'x')
+ << "Malformed iters value passed to --benchmark_min_time: `" << value
+ << "`. Expected --benchmark_min_time=<integer>x.";
+
+ ret.tag = BenchTimeType::ITERS;
+ ret.iters = num_iters;
+ return ret;
+ }
+
+ bool has_suffix = value.back() == 's';
+ if (!has_suffix) {
+ BM_VLOG(0) << "Value passed to --benchmark_min_time should have a suffix. "
+ "Eg., `30s` for 30-seconds.";
+ }
+
+ char* p_end;
+ // Reset errno before it's changed by strtod.
+ errno = 0;
+ double min_time = std::strtod(value.c_str(), &p_end);
+
+ // After a successful parse, p_end should point to the suffix 's',
+ // or the end of the string if the suffix was omitted.
+ BM_CHECK(errno == 0 && p_end != nullptr &&
+ ((has_suffix && *p_end == 's') || *p_end == '\0'))
+ << "Malformed seconds value passed to --benchmark_min_time: `" << value
+ << "`. Expected --benchmark_min_time=<float>x.";
+
+ ret.tag = BenchTimeType::TIME;
+ ret.time = min_time;
+
+ return ret;
+}
+
BenchmarkRunner::BenchmarkRunner(
const benchmark::internal::BenchmarkInstance& b_,
+ PerfCountersMeasurement* pcm_,
BenchmarkReporter::PerFamilyRunReports* reports_for_family_)
: b(b_),
reports_for_family(reports_for_family_),
- min_time(!IsZero(b.min_time()) ? b.min_time() : FLAGS_benchmark_min_time),
+ parsed_benchtime_flag(ParseBenchMinTime(FLAGS_benchmark_min_time)),
+ min_time(ComputeMinTime(b_, parsed_benchtime_flag)),
+ min_warmup_time((!IsZero(b.min_time()) && b.min_warmup_time() > 0.0)
+ ? b.min_warmup_time()
+ : FLAGS_benchmark_min_warmup_time),
+ warmup_done(!(min_warmup_time > 0.0)),
repeats(b.repetitions() != 0 ? b.repetitions()
: FLAGS_benchmark_repetitions),
- has_explicit_iteration_count(b.iterations() != 0),
+ has_explicit_iteration_count(b.iterations() != 0 ||
+ parsed_benchtime_flag.tag ==
+ BenchTimeType::ITERS),
pool(b.threads() - 1),
- iters(has_explicit_iteration_count ? b.iterations() : 1),
- perf_counters_measurement(
- PerfCounters::Create(StrSplit(FLAGS_benchmark_perf_counters, ','))),
- perf_counters_measurement_ptr(perf_counters_measurement.IsValid()
- ? &perf_counters_measurement
- : nullptr) {
+ iters(has_explicit_iteration_count
+ ? ComputeIters(b_, parsed_benchtime_flag)
+ : 1),
+ perf_counters_measurement_ptr(pcm_) {
run_results.display_report_aggregates_only =
(FLAGS_benchmark_report_aggregates_only ||
FLAGS_benchmark_display_aggregates_only);
@@ -169,7 +252,7 @@
run_results.file_report_aggregates_only =
(b.aggregation_report_mode() & internal::ARM_FileReportAggregatesOnly);
BM_CHECK(FLAGS_benchmark_perf_counters.empty() ||
- perf_counters_measurement.IsValid())
+ (perf_counters_measurement_ptr->num_counters() == 0))
<< "Perf counters were requested but could not be set up.";
}
}
@@ -232,20 +315,20 @@
const IterationResults& i) const {
// See how much iterations should be increased by.
// Note: Avoid division by zero with max(seconds, 1ns).
- double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9);
+ double multiplier = GetMinTimeToApply() * 1.4 / std::max(i.seconds, 1e-9);
// If our last run was at least 10% of FLAGS_benchmark_min_time then we
// use the multiplier directly.
// Otherwise we use at most 10 times expansion.
// NOTE: When the last run was at least 10% of the min time the max
// expansion should be 14x.
- bool is_significant = (i.seconds / min_time) > 0.1;
+ const bool is_significant = (i.seconds / GetMinTimeToApply()) > 0.1;
multiplier = is_significant ? multiplier : 10.0;
// So what seems to be the sufficiently-large iteration count? Round up.
const IterationCount max_next_iters = static_cast<IterationCount>(
- std::lround(std::max(multiplier * static_cast<double>(i.iters),
- static_cast<double>(i.iters) + 1.0)));
- // But we do have *some* sanity limits though..
+ std::llround(std::max(multiplier * static_cast<double>(i.iters),
+ static_cast<double>(i.iters) + 1.0)));
+ // But we do have *some* limits though..
const IterationCount next_iters = std::min(max_next_iters, kMaxIterations);
BM_VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
@@ -257,21 +340,80 @@
// Determine if this run should be reported;
// Either it has run for a sufficient amount of time
// or because an error was reported.
- return i.results.has_error_ ||
+ return i.results.skipped_ ||
i.iters >= kMaxIterations || // Too many iterations already.
- i.seconds >= min_time || // The elapsed time is large enough.
+ i.seconds >=
+ GetMinTimeToApply() || // The elapsed time is large enough.
// CPU time is specified but the elapsed real time greatly exceeds
// the minimum time.
- // Note that user provided timers are except from this sanity check.
- ((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time());
+ // Note that user provided timers are except from this test.
+ ((i.results.real_time_used >= 5 * GetMinTimeToApply()) &&
+ !b.use_manual_time());
+}
+
+double BenchmarkRunner::GetMinTimeToApply() const {
+ // In order to re-use functionality to run and measure benchmarks for running
+ // a warmup phase of the benchmark, we need a way of telling whether to apply
+ // min_time or min_warmup_time. This function will figure out if we are in the
+ // warmup phase and therefore need to apply min_warmup_time or if we already
+ // in the benchmarking phase and min_time needs to be applied.
+ return warmup_done ? min_time : min_warmup_time;
+}
+
+void BenchmarkRunner::FinishWarmUp(const IterationCount& i) {
+ warmup_done = true;
+ iters = i;
+}
+
+void BenchmarkRunner::RunWarmUp() {
+ // Use the same mechanisms for warming up the benchmark as used for actually
+ // running and measuring the benchmark.
+ IterationResults i_warmup;
+ // Dont use the iterations determined in the warmup phase for the actual
+ // measured benchmark phase. While this may be a good starting point for the
+ // benchmark and it would therefore get rid of the need to figure out how many
+ // iterations are needed if min_time is set again, this may also be a complete
+ // wrong guess since the warmup loops might be considerably slower (e.g
+ // because of caching effects).
+ const IterationCount i_backup = iters;
+
+ for (;;) {
+ b.Setup();
+ i_warmup = DoNIterations();
+ b.Teardown();
+
+ const bool finish = ShouldReportIterationResults(i_warmup);
+
+ if (finish) {
+ FinishWarmUp(i_backup);
+ break;
+ }
+
+ // Although we are running "only" a warmup phase where running enough
+ // iterations at once without measuring time isn't as important as it is for
+ // the benchmarking phase, we still do it the same way as otherwise it is
+ // very confusing for the user to know how to choose a proper value for
+ // min_warmup_time if a different approach on running it is used.
+ iters = PredictNumItersNeeded(i_warmup);
+ assert(iters > i_warmup.iters &&
+ "if we did more iterations than we want to do the next time, "
+ "then we should have accepted the current iteration run.");
+ }
}
void BenchmarkRunner::DoOneRepetition() {
assert(HasRepeatsRemaining() && "Already done all repetitions?");
const bool is_the_first_repetition = num_repetitions_done == 0;
- IterationResults i;
+ // In case a warmup phase is requested by the benchmark, run it now.
+ // After running the warmup phase the BenchmarkRunner should be in a state as
+ // this warmup never happened except the fact that warmup_done is set. Every
+ // other manipulation of the BenchmarkRunner instance would be a bug! Please
+ // fix it.
+ if (!warmup_done) RunWarmUp();
+
+ IterationResults i;
// We *may* be gradually increasing the length (iteration count)
// of the benchmark until we decide the results are significant.
// And once we do, we report those last results and exit.
@@ -324,10 +466,7 @@
manager->WaitForAllThreads();
manager.reset();
b.Teardown();
-
- BENCHMARK_DISABLE_DEPRECATED_WARNING
- memory_manager->Stop(memory_result);
- BENCHMARK_RESTORE_DEPRECATED_WARNING
+ memory_manager->Stop(*memory_result);
}
// Ok, now actually report.
@@ -337,7 +476,7 @@
if (reports_for_family) {
++reports_for_family->num_runs_done;
- if (!report.error_occurred) reports_for_family->Runs.push_back(report);
+ if (!report.skipped) reports_for_family->Runs.push_back(report);
}
run_results.non_aggregates.push_back(report);
diff --git a/third-party/benchmark/src/benchmark_runner.h b/third-party/benchmark/src/benchmark_runner.h
index 752eefd..db2fa04 100644
--- a/third-party/benchmark/src/benchmark_runner.h
+++ b/third-party/benchmark/src/benchmark_runner.h
@@ -25,7 +25,8 @@
namespace benchmark {
-BM_DECLARE_double(benchmark_min_time);
+BM_DECLARE_string(benchmark_min_time);
+BM_DECLARE_double(benchmark_min_warmup_time);
BM_DECLARE_int32(benchmark_repetitions);
BM_DECLARE_bool(benchmark_report_aggregates_only);
BM_DECLARE_bool(benchmark_display_aggregates_only);
@@ -43,9 +44,21 @@
bool file_report_aggregates_only = false;
};
+struct BENCHMARK_EXPORT BenchTimeType {
+ enum { ITERS, TIME } tag;
+ union {
+ IterationCount iters;
+ double time;
+ };
+};
+
+BENCHMARK_EXPORT
+BenchTimeType ParseBenchMinTime(const std::string& value);
+
class BenchmarkRunner {
public:
BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_,
+ benchmark::internal::PerfCountersMeasurement* pmc_,
BenchmarkReporter::PerFamilyRunReports* reports_for_family);
int GetNumRepeats() const { return repeats; }
@@ -62,13 +75,22 @@
return reports_for_family;
}
+ double GetMinTime() const { return min_time; }
+
+ bool HasExplicitIters() const { return has_explicit_iteration_count; }
+
+ IterationCount GetIters() const { return iters; }
+
private:
RunResults run_results;
const benchmark::internal::BenchmarkInstance& b;
BenchmarkReporter::PerFamilyRunReports* reports_for_family;
+ BenchTimeType parsed_benchtime_flag;
const double min_time;
+ const double min_warmup_time;
+ bool warmup_done;
const int repeats;
const bool has_explicit_iteration_count;
@@ -82,8 +104,7 @@
// So only the first repetition has to find/calculate it,
// the other repetitions will just use that precomputed iteration count.
- PerfCountersMeasurement perf_counters_measurement;
- PerfCountersMeasurement* const perf_counters_measurement_ptr;
+ PerfCountersMeasurement* const perf_counters_measurement_ptr = nullptr;
struct IterationResults {
internal::ThreadManager::Result results;
@@ -95,6 +116,12 @@
IterationCount PredictNumItersNeeded(const IterationResults& i) const;
bool ShouldReportIterationResults(const IterationResults& i) const;
+
+ double GetMinTimeToApply() const;
+
+ void FinishWarmUp(const IterationCount& i);
+
+ void RunWarmUp();
};
} // namespace internal
diff --git a/third-party/benchmark/src/check.cc b/third-party/benchmark/src/check.cc
new file mode 100644
index 0000000..5f7526e
--- /dev/null
+++ b/third-party/benchmark/src/check.cc
@@ -0,0 +1,11 @@
+#include "check.h"
+
+namespace benchmark {
+namespace internal {
+
+static AbortHandlerT* handler = &std::abort;
+
+BENCHMARK_EXPORT AbortHandlerT*& GetAbortHandler() { return handler; }
+
+} // namespace internal
+} // namespace benchmark
diff --git a/third-party/benchmark/src/check.h b/third-party/benchmark/src/check.h
index 0efd13f..c1cd5e8 100644
--- a/third-party/benchmark/src/check.h
+++ b/third-party/benchmark/src/check.h
@@ -5,18 +5,34 @@
#include <cstdlib>
#include <ostream>
+#include "benchmark/export.h"
#include "internal_macros.h"
#include "log.h"
+#if defined(__GNUC__) || defined(__clang__)
+#define BENCHMARK_NOEXCEPT noexcept
+#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x)
+#elif defined(_MSC_VER) && !defined(__clang__)
+#if _MSC_VER >= 1900
+#define BENCHMARK_NOEXCEPT noexcept
+#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x)
+#else
+#define BENCHMARK_NOEXCEPT
+#define BENCHMARK_NOEXCEPT_OP(x)
+#endif
+#define __func__ __FUNCTION__
+#else
+#define BENCHMARK_NOEXCEPT
+#define BENCHMARK_NOEXCEPT_OP(x)
+#endif
+
namespace benchmark {
namespace internal {
typedef void(AbortHandlerT)();
-inline AbortHandlerT*& GetAbortHandler() {
- static AbortHandlerT* handler = &std::abort;
- return handler;
-}
+BENCHMARK_EXPORT
+AbortHandlerT*& GetAbortHandler();
BENCHMARK_NORETURN inline void CallAbortHandler() {
GetAbortHandler()();
@@ -36,10 +52,17 @@
LogType& GetLog() { return log_; }
+#if defined(COMPILER_MSVC)
+#pragma warning(push)
+#pragma warning(disable : 4722)
+#endif
BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false) {
log_ << std::endl;
CallAbortHandler();
}
+#if defined(COMPILER_MSVC)
+#pragma warning(pop)
+#endif
CheckHandler& operator=(const CheckHandler&) = delete;
CheckHandler(const CheckHandler&) = delete;
diff --git a/third-party/benchmark/src/colorprint.cc b/third-party/benchmark/src/colorprint.cc
index 1a000a0..abc71492 100644
--- a/third-party/benchmark/src/colorprint.cc
+++ b/third-party/benchmark/src/colorprint.cc
@@ -96,18 +96,18 @@
// currently there is no error handling for failure, so this is hack.
BM_CHECK(ret >= 0);
- if (ret == 0) // handle empty expansion
+ if (ret == 0) { // handle empty expansion
return {};
- else if (static_cast<size_t>(ret) < size)
- return local_buff;
- else {
- // we did not provide a long enough buffer on our first attempt.
- size = static_cast<size_t>(ret) + 1; // + 1 for the null byte
- std::unique_ptr<char[]> buff(new char[size]);
- ret = vsnprintf(buff.get(), size, msg, args);
- BM_CHECK(ret > 0 && (static_cast<size_t>(ret)) < size);
- return buff.get();
}
+ if (static_cast<size_t>(ret) < size) {
+ return local_buff;
+ }
+ // we did not provide a long enough buffer on our first attempt.
+ size = static_cast<size_t>(ret) + 1; // + 1 for the null byte
+ std::unique_ptr<char[]> buff(new char[size]);
+ ret = vsnprintf(buff.get(), size, msg, args);
+ BM_CHECK(ret > 0 && (static_cast<size_t>(ret)) < size);
+ return buff.get();
}
std::string FormatString(const char* msg, ...) {
@@ -140,12 +140,12 @@
// We need to flush the stream buffers into the console before each
// SetConsoleTextAttribute call lest it affect the text that is already
// printed but has not yet reached the console.
- fflush(stdout);
+ out.flush();
SetConsoleTextAttribute(stdout_handle,
GetPlatformColorCode(color) | FOREGROUND_INTENSITY);
- vprintf(fmt, args);
+ out << FormatString(fmt, args);
- fflush(stdout);
+ out.flush();
// Restores the text color.
SetConsoleTextAttribute(stdout_handle, old_color_attrs);
#else
@@ -163,12 +163,24 @@
#else
// On non-Windows platforms, we rely on the TERM variable. This list of
// supported TERM values is copied from Google Test:
- // <https://github.com/google/googletest/blob/master/googletest/src/gtest.cc#L2925>.
+ // <https://github.com/google/googletest/blob/v1.13.0/googletest/src/gtest.cc#L3225-L3259>.
const char* const SUPPORTED_TERM_VALUES[] = {
- "xterm", "xterm-color", "xterm-256color",
- "screen", "screen-256color", "tmux",
- "tmux-256color", "rxvt-unicode", "rxvt-unicode-256color",
- "linux", "cygwin",
+ "xterm",
+ "xterm-color",
+ "xterm-256color",
+ "screen",
+ "screen-256color",
+ "tmux",
+ "tmux-256color",
+ "rxvt-unicode",
+ "rxvt-unicode-256color",
+ "linux",
+ "cygwin",
+ "xterm-kitty",
+ "alacritty",
+ "foot",
+ "foot-extra",
+ "wezterm",
};
const char* const term = getenv("TERM");
diff --git a/third-party/benchmark/src/commandlineflags.cc b/third-party/benchmark/src/commandlineflags.cc
index 9615e35..dcb4149 100644
--- a/third-party/benchmark/src/commandlineflags.cc
+++ b/third-party/benchmark/src/commandlineflags.cc
@@ -121,12 +121,14 @@
} // namespace
+BENCHMARK_EXPORT
bool BoolFromEnv(const char* flag, bool default_val) {
const std::string env_var = FlagToEnvVar(flag);
const char* const value_str = getenv(env_var.c_str());
return value_str == nullptr ? default_val : IsTruthyFlagValue(value_str);
}
+BENCHMARK_EXPORT
int32_t Int32FromEnv(const char* flag, int32_t default_val) {
const std::string env_var = FlagToEnvVar(flag);
const char* const value_str = getenv(env_var.c_str());
@@ -139,6 +141,7 @@
return value;
}
+BENCHMARK_EXPORT
double DoubleFromEnv(const char* flag, double default_val) {
const std::string env_var = FlagToEnvVar(flag);
const char* const value_str = getenv(env_var.c_str());
@@ -151,12 +154,14 @@
return value;
}
+BENCHMARK_EXPORT
const char* StringFromEnv(const char* flag, const char* default_val) {
const std::string env_var = FlagToEnvVar(flag);
const char* const value = getenv(env_var.c_str());
return value == nullptr ? default_val : value;
}
+BENCHMARK_EXPORT
std::map<std::string, std::string> KvPairsFromEnv(
const char* flag, std::map<std::string, std::string> default_val) {
const std::string env_var = FlagToEnvVar(flag);
@@ -201,6 +206,7 @@
return flag_end + 1;
}
+BENCHMARK_EXPORT
bool ParseBoolFlag(const char* str, const char* flag, bool* value) {
// Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, true);
@@ -213,6 +219,7 @@
return true;
}
+BENCHMARK_EXPORT
bool ParseInt32Flag(const char* str, const char* flag, int32_t* value) {
// Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, false);
@@ -225,6 +232,7 @@
value);
}
+BENCHMARK_EXPORT
bool ParseDoubleFlag(const char* str, const char* flag, double* value) {
// Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, false);
@@ -237,6 +245,7 @@
value);
}
+BENCHMARK_EXPORT
bool ParseStringFlag(const char* str, const char* flag, std::string* value) {
// Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, false);
@@ -248,6 +257,7 @@
return true;
}
+BENCHMARK_EXPORT
bool ParseKeyValueFlag(const char* str, const char* flag,
std::map<std::string, std::string>* value) {
const char* const value_str = ParseFlagValue(str, flag, false);
@@ -263,23 +273,26 @@
return true;
}
+BENCHMARK_EXPORT
bool IsFlag(const char* str, const char* flag) {
return (ParseFlagValue(str, flag, true) != nullptr);
}
+BENCHMARK_EXPORT
bool IsTruthyFlagValue(const std::string& value) {
if (value.size() == 1) {
char v = value[0];
return isalnum(v) &&
!(v == '0' || v == 'f' || v == 'F' || v == 'n' || v == 'N');
- } else if (!value.empty()) {
+ }
+ if (!value.empty()) {
std::string value_lower(value);
std::transform(value_lower.begin(), value_lower.end(), value_lower.begin(),
[](char c) { return static_cast<char>(::tolower(c)); });
return !(value_lower == "false" || value_lower == "no" ||
value_lower == "off");
- } else
- return true;
+ }
+ return true;
}
} // end namespace benchmark
diff --git a/third-party/benchmark/src/commandlineflags.h b/third-party/benchmark/src/commandlineflags.h
index 5baaf11..7882628 100644
--- a/third-party/benchmark/src/commandlineflags.h
+++ b/third-party/benchmark/src/commandlineflags.h
@@ -5,28 +5,33 @@
#include <map>
#include <string>
+#include "benchmark/export.h"
+
// Macro for referencing flags.
#define FLAG(name) FLAGS_##name
// Macros for declaring flags.
-#define BM_DECLARE_bool(name) extern bool FLAG(name)
-#define BM_DECLARE_int32(name) extern int32_t FLAG(name)
-#define BM_DECLARE_double(name) extern double FLAG(name)
-#define BM_DECLARE_string(name) extern std::string FLAG(name)
+#define BM_DECLARE_bool(name) BENCHMARK_EXPORT extern bool FLAG(name)
+#define BM_DECLARE_int32(name) BENCHMARK_EXPORT extern int32_t FLAG(name)
+#define BM_DECLARE_double(name) BENCHMARK_EXPORT extern double FLAG(name)
+#define BM_DECLARE_string(name) BENCHMARK_EXPORT extern std::string FLAG(name)
#define BM_DECLARE_kvpairs(name) \
- extern std::map<std::string, std::string> FLAG(name)
+ BENCHMARK_EXPORT extern std::map<std::string, std::string> FLAG(name)
// Macros for defining flags.
#define BM_DEFINE_bool(name, default_val) \
- bool FLAG(name) = benchmark::BoolFromEnv(#name, default_val)
+ BENCHMARK_EXPORT bool FLAG(name) = benchmark::BoolFromEnv(#name, default_val)
#define BM_DEFINE_int32(name, default_val) \
- int32_t FLAG(name) = benchmark::Int32FromEnv(#name, default_val)
+ BENCHMARK_EXPORT int32_t FLAG(name) = \
+ benchmark::Int32FromEnv(#name, default_val)
#define BM_DEFINE_double(name, default_val) \
- double FLAG(name) = benchmark::DoubleFromEnv(#name, default_val)
+ BENCHMARK_EXPORT double FLAG(name) = \
+ benchmark::DoubleFromEnv(#name, default_val)
#define BM_DEFINE_string(name, default_val) \
- std::string FLAG(name) = benchmark::StringFromEnv(#name, default_val)
-#define BM_DEFINE_kvpairs(name, default_val) \
- std::map<std::string, std::string> FLAG(name) = \
+ BENCHMARK_EXPORT std::string FLAG(name) = \
+ benchmark::StringFromEnv(#name, default_val)
+#define BM_DEFINE_kvpairs(name, default_val) \
+ BENCHMARK_EXPORT std::map<std::string, std::string> FLAG(name) = \
benchmark::KvPairsFromEnv(#name, default_val)
namespace benchmark {
@@ -35,6 +40,7 @@
//
// If the variable exists, returns IsTruthyFlagValue() value; if not,
// returns the given default value.
+BENCHMARK_EXPORT
bool BoolFromEnv(const char* flag, bool default_val);
// Parses an Int32 from the environment variable corresponding to the given
@@ -42,6 +48,7 @@
//
// If the variable exists, returns ParseInt32() value; if not, returns
// the given default value.
+BENCHMARK_EXPORT
int32_t Int32FromEnv(const char* flag, int32_t default_val);
// Parses an Double from the environment variable corresponding to the given
@@ -49,6 +56,7 @@
//
// If the variable exists, returns ParseDouble(); if not, returns
// the given default value.
+BENCHMARK_EXPORT
double DoubleFromEnv(const char* flag, double default_val);
// Parses a string from the environment variable corresponding to the given
@@ -56,6 +64,7 @@
//
// If variable exists, returns its value; if not, returns
// the given default value.
+BENCHMARK_EXPORT
const char* StringFromEnv(const char* flag, const char* default_val);
// Parses a set of kvpairs from the environment variable corresponding to the
@@ -63,6 +72,7 @@
//
// If variable exists, returns its value; if not, returns
// the given default value.
+BENCHMARK_EXPORT
std::map<std::string, std::string> KvPairsFromEnv(
const char* flag, std::map<std::string, std::string> default_val);
@@ -75,40 +85,47 @@
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
+BENCHMARK_EXPORT
bool ParseBoolFlag(const char* str, const char* flag, bool* value);
// Parses a string for an Int32 flag, in the form of "--flag=value".
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
+BENCHMARK_EXPORT
bool ParseInt32Flag(const char* str, const char* flag, int32_t* value);
// Parses a string for a Double flag, in the form of "--flag=value".
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
+BENCHMARK_EXPORT
bool ParseDoubleFlag(const char* str, const char* flag, double* value);
// Parses a string for a string flag, in the form of "--flag=value".
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
+BENCHMARK_EXPORT
bool ParseStringFlag(const char* str, const char* flag, std::string* value);
// Parses a string for a kvpairs flag in the form "--flag=key=value,key=value"
//
// On success, stores the value of the flag in *value and returns true. On
// failure returns false, though *value may have been mutated.
+BENCHMARK_EXPORT
bool ParseKeyValueFlag(const char* str, const char* flag,
std::map<std::string, std::string>* value);
// Returns true if the string matches the flag.
+BENCHMARK_EXPORT
bool IsFlag(const char* str, const char* flag);
// Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or
// some non-alphanumeric character. Also returns false if the value matches
// one of 'no', 'false', 'off' (case-insensitive). As a special case, also
// returns true if value is the empty string.
+BENCHMARK_EXPORT
bool IsTruthyFlagValue(const std::string& value);
} // end namespace benchmark
diff --git a/third-party/benchmark/src/complexity.cc b/third-party/benchmark/src/complexity.cc
index 825c573..eee3122 100644
--- a/third-party/benchmark/src/complexity.cc
+++ b/third-party/benchmark/src/complexity.cc
@@ -37,12 +37,14 @@
return [](IterationCount n) -> double { return std::pow(n, 3); };
case oLogN:
/* Note: can't use log2 because Android's GNU STL lacks it */
- return
- [](IterationCount n) { return kLog2E * log(static_cast<double>(n)); };
+ return [](IterationCount n) {
+ return kLog2E * std::log(static_cast<double>(n));
+ };
case oNLogN:
/* Note: can't use log2 because Android's GNU STL lacks it */
return [](IterationCount n) {
- return kLog2E * n * log(static_cast<double>(n));
+ return kLog2E * static_cast<double>(n) *
+ std::log(static_cast<double>(n));
};
case o1:
default:
@@ -75,12 +77,12 @@
// given by the lambda expression.
// - n : Vector containing the size of the benchmark tests.
// - time : Vector containing the times for the benchmark tests.
-// - fitting_curve : lambda expression (e.g. [](int64_t n) {return n; };).
+// - fitting_curve : lambda expression (e.g. [](ComplexityN n) {return n; };).
// For a deeper explanation on the algorithm logic, please refer to
// https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics
-LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
+LeastSq MinimalLeastSq(const std::vector<ComplexityN>& n,
const std::vector<double>& time,
BigOFunc* fitting_curve) {
double sigma_gn_squared = 0.0;
@@ -105,12 +107,12 @@
double rms = 0.0;
for (size_t i = 0; i < n.size(); ++i) {
double fit = result.coef * fitting_curve(n[i]);
- rms += pow((time[i] - fit), 2);
+ rms += std::pow((time[i] - fit), 2);
}
// Normalized RMS by the mean of the observed values
- double mean = sigma_time / n.size();
- result.rms = sqrt(rms / n.size()) / mean;
+ double mean = sigma_time / static_cast<double>(n.size());
+ result.rms = std::sqrt(rms / static_cast<double>(n.size())) / mean;
return result;
}
@@ -122,7 +124,7 @@
// - complexity : If different than oAuto, the fitting curve will stick to
// this one. If it is oAuto, it will be calculated the best
// fitting curve.
-LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
+LeastSq MinimalLeastSq(const std::vector<ComplexityN>& n,
const std::vector<double>& time, const BigO complexity) {
BM_CHECK_EQ(n.size(), time.size());
BM_CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two
@@ -162,7 +164,7 @@
if (reports.size() < 2) return results;
// Accumulators.
- std::vector<int64_t> n;
+ std::vector<ComplexityN> n;
std::vector<double> real_time;
std::vector<double> cpu_time;
@@ -171,8 +173,10 @@
BM_CHECK_GT(run.complexity_n, 0)
<< "Did you forget to call SetComplexityN?";
n.push_back(run.complexity_n);
- real_time.push_back(run.real_accumulated_time / run.iterations);
- cpu_time.push_back(run.cpu_accumulated_time / run.iterations);
+ real_time.push_back(run.real_accumulated_time /
+ static_cast<double>(run.iterations));
+ cpu_time.push_back(run.cpu_accumulated_time /
+ static_cast<double>(run.iterations));
}
LeastSq result_cpu;
@@ -182,8 +186,19 @@
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda);
result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda);
} else {
- result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
- result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
+ const BigO* InitialBigO = &reports[0].complexity;
+ const bool use_real_time_for_initial_big_o =
+ reports[0].use_real_time_for_initial_big_o;
+ if (use_real_time_for_initial_big_o) {
+ result_real = MinimalLeastSq(n, real_time, *InitialBigO);
+ InitialBigO = &result_real.complexity;
+ // The Big-O complexity for CPU time must have the same Big-O function!
+ }
+ result_cpu = MinimalLeastSq(n, cpu_time, *InitialBigO);
+ InitialBigO = &result_cpu.complexity;
+ if (!use_real_time_for_initial_big_o) {
+ result_real = MinimalLeastSq(n, real_time, *InitialBigO);
+ }
}
// Drop the 'args' when reporting complexity.
diff --git a/third-party/benchmark/src/complexity.h b/third-party/benchmark/src/complexity.h
index df29b48..0a0679b 100644
--- a/third-party/benchmark/src/complexity.h
+++ b/third-party/benchmark/src/complexity.h
@@ -31,7 +31,7 @@
const std::vector<BenchmarkReporter::Run>& reports);
// This data structure will contain the result returned by MinimalLeastSq
-// - coef : Estimated coeficient for the high-order term as
+// - coef : Estimated coefficient for the high-order term as
// interpolated from data.
// - rms : Normalized Root Mean Squared Error.
// - complexity : Scalability form (e.g. oN, oNLogN). In case a scalability
diff --git a/third-party/benchmark/src/console_reporter.cc b/third-party/benchmark/src/console_reporter.cc
index 04cc0b7..35c3de2 100644
--- a/third-party/benchmark/src/console_reporter.cc
+++ b/third-party/benchmark/src/console_reporter.cc
@@ -33,6 +33,7 @@
namespace benchmark {
+BENCHMARK_EXPORT
bool ConsoleReporter::ReportContext(const Context& context) {
name_field_width_ = context.name_field_width;
printed_header_ = false;
@@ -41,17 +42,22 @@
PrintBasicContext(&GetErrorStream(), context);
#ifdef BENCHMARK_OS_WINDOWS
- if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream()) {
- GetErrorStream()
- << "Color printing is only supported for stdout on windows."
- " Disabling color printing\n";
- output_options_ = static_cast<OutputOptions>(output_options_ & ~OO_Color);
+ if ((output_options_ & OO_Color)) {
+ auto stdOutBuf = std::cout.rdbuf();
+ auto outStreamBuf = GetOutputStream().rdbuf();
+ if (stdOutBuf != outStreamBuf) {
+ GetErrorStream()
+ << "Color printing is only supported for stdout on windows."
+ " Disabling color printing\n";
+ output_options_ = static_cast<OutputOptions>(output_options_ & ~OO_Color);
+ }
}
#endif
return true;
}
+BENCHMARK_EXPORT
void ConsoleReporter::PrintHeader(const Run& run) {
std::string str =
FormatString("%-*s %13s %15s %12s", static_cast<int>(name_field_width_),
@@ -69,6 +75,7 @@
GetOutputStream() << line << "\n" << str << "\n" << line << "\n";
}
+BENCHMARK_EXPORT
void ConsoleReporter::ReportRuns(const std::vector<Run>& reports) {
for (const auto& run : reports) {
// print the header:
@@ -99,6 +106,9 @@
}
static std::string FormatTime(double time) {
+ // For the time columns of the console printer 13 digits are reserved. One of
+ // them is a space and max two of them are the time unit (e.g ns). That puts
+ // us at 10 digits usable for the number.
// Align decimal places...
if (time < 1.0) {
return FormatString("%10.3f", time);
@@ -109,9 +119,15 @@
if (time < 100.0) {
return FormatString("%10.1f", time);
}
+ // Assuming the time is at max 9.9999e+99 and we have 10 digits for the
+ // number, we get 10-1(.)-1(e)-1(sign)-2(exponent) = 5 digits to print.
+ if (time > 9999999999 /*max 10 digit number*/) {
+ return FormatString("%1.4e", time);
+ }
return FormatString("%10.0f", time);
}
+BENCHMARK_EXPORT
void ConsoleReporter::PrintRunData(const Run& result) {
typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...);
auto& Out = GetOutputStream();
@@ -123,9 +139,13 @@
printer(Out, name_color, "%-*s ", name_field_width_,
result.benchmark_name().c_str());
- if (result.error_occurred) {
+ if (internal::SkippedWithError == result.skipped) {
printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'",
- result.error_message.c_str());
+ result.skip_message.c_str());
+ printer(Out, COLOR_DEFAULT, "\n");
+ return;
+ } else if (internal::SkippedWithMessage == result.skipped) {
+ printer(Out, COLOR_WHITE, "SKIPPED: \'%s\'", result.skip_message.c_str());
printer(Out, COLOR_DEFAULT, "\n");
return;
}
diff --git a/third-party/benchmark/src/counter.cc b/third-party/benchmark/src/counter.cc
index cf5b78e..aa14cd8 100644
--- a/third-party/benchmark/src/counter.cc
+++ b/third-party/benchmark/src/counter.cc
@@ -27,10 +27,10 @@
v /= num_threads;
}
if (c.flags & Counter::kIsIterationInvariant) {
- v *= iterations;
+ v *= static_cast<double>(iterations);
}
if (c.flags & Counter::kAvgIterations) {
- v /= iterations;
+ v /= static_cast<double>(iterations);
}
if (c.flags & Counter::kInvert) { // Invert is *always* last.
diff --git a/third-party/benchmark/src/csv_reporter.cc b/third-party/benchmark/src/csv_reporter.cc
index 1c5e9fa..4b39e2c 100644
--- a/third-party/benchmark/src/csv_reporter.cc
+++ b/third-party/benchmark/src/csv_reporter.cc
@@ -52,11 +52,13 @@
return '"' + tmp + '"';
}
+BENCHMARK_EXPORT
bool CSVReporter::ReportContext(const Context& context) {
PrintBasicContext(&GetErrorStream(), context);
return true;
}
+BENCHMARK_EXPORT
void CSVReporter::ReportRuns(const std::vector<Run>& reports) {
std::ostream& Out = GetOutputStream();
@@ -103,13 +105,14 @@
}
}
+BENCHMARK_EXPORT
void CSVReporter::PrintRunData(const Run& run) {
std::ostream& Out = GetOutputStream();
Out << CsvEscape(run.benchmark_name()) << ",";
- if (run.error_occurred) {
+ if (run.skipped) {
Out << std::string(elements.size() - 3, ',');
- Out << "true,";
- Out << CsvEscape(run.error_message) << "\n";
+ Out << std::boolalpha << (internal::SkippedWithError == run.skipped) << ",";
+ Out << CsvEscape(run.skip_message) << "\n";
return;
}
@@ -119,13 +122,21 @@
}
Out << ",";
- Out << run.GetAdjustedRealTime() << ",";
- Out << run.GetAdjustedCPUTime() << ",";
+ if (run.run_type != Run::RT_Aggregate ||
+ run.aggregate_unit == StatisticUnit::kTime) {
+ Out << run.GetAdjustedRealTime() << ",";
+ Out << run.GetAdjustedCPUTime() << ",";
+ } else {
+ assert(run.aggregate_unit == StatisticUnit::kPercentage);
+ Out << run.real_accumulated_time << ",";
+ Out << run.cpu_accumulated_time << ",";
+ }
// Do not print timeLabel on bigO and RMS report
if (run.report_big_o) {
Out << GetBigOString(run.complexity);
- } else if (!run.report_rms) {
+ } else if (!run.report_rms &&
+ run.aggregate_unit != StatisticUnit::kPercentage) {
Out << GetTimeUnitString(run.time_unit);
}
Out << ",";
diff --git a/third-party/benchmark/src/cycleclock.h b/third-party/benchmark/src/cycleclock.h
index d65d32a..eff563e 100644
--- a/third-party/benchmark/src/cycleclock.h
+++ b/third-party/benchmark/src/cycleclock.h
@@ -36,7 +36,8 @@
// declarations of some other intrinsics, breaking compilation.
// Therefore, we simply declare __rdtsc ourselves. See also
// http://connect.microsoft.com/VisualStudio/feedback/details/262047
-#if defined(COMPILER_MSVC) && !defined(_M_IX86) && !defined(_M_ARM64)
+#if defined(COMPILER_MSVC) && !defined(_M_IX86) && !defined(_M_ARM64) && \
+ !defined(_M_ARM64EC)
extern "C" uint64_t __rdtsc();
#pragma intrinsic(__rdtsc)
#endif
@@ -114,7 +115,7 @@
// when I know it will work. Otherwise, I'll use __rdtsc and hope
// the code is being compiled with a non-ancient compiler.
_asm rdtsc
-#elif defined(COMPILER_MSVC) && defined(_M_ARM64)
+#elif defined(COMPILER_MSVC) && (defined(_M_ARM64) || defined(_M_ARM64EC))
// See // https://docs.microsoft.com/en-us/cpp/intrinsics/arm64-intrinsics
// and https://reviews.llvm.org/D53115
int64_t virtual_timer_value;
@@ -132,7 +133,7 @@
// Native Client does not provide any API to access cycle counter.
// Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday
- // because is provides nanosecond resolution (which is noticable at
+ // because is provides nanosecond resolution (which is noticeable at
// least for PNaCl modules running on x86 Mac & Linux).
// Initialize to always return 0 if clock_gettime fails.
struct timespec ts = {0, 0};
@@ -173,7 +174,7 @@
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
-#elif defined(__loongarch__)
+#elif defined(__loongarch__) || defined(__csky__)
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
@@ -188,15 +189,16 @@
#endif
return tsc;
#elif defined(__riscv) // RISC-V
- // Use RDCYCLE (and RDCYCLEH on riscv32)
+ // Use RDTIME (and RDTIMEH on riscv32).
+ // RDCYCLE is a privileged instruction since Linux 6.6.
#if __riscv_xlen == 32
uint32_t cycles_lo, cycles_hi0, cycles_hi1;
// This asm also includes the PowerPC overflow handling strategy, as above.
// Implemented in assembly because Clang insisted on branching.
asm volatile(
- "rdcycleh %0\n"
- "rdcycle %1\n"
- "rdcycleh %2\n"
+ "rdtimeh %0\n"
+ "rdtime %1\n"
+ "rdtimeh %2\n"
"sub %0, %0, %2\n"
"seqz %0, %0\n"
"sub %0, zero, %0\n"
@@ -205,17 +207,31 @@
return (static_cast<uint64_t>(cycles_hi1) << 32) | cycles_lo;
#else
uint64_t cycles;
- asm volatile("rdcycle %0" : "=r"(cycles));
+ asm volatile("rdtime %0" : "=r"(cycles));
return cycles;
#endif
#elif defined(__e2k__) || defined(__elbrus__)
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
+#elif defined(__hexagon__)
+ uint64_t pcycle;
+ asm volatile("%0 = C15:14" : "=r"(pcycle));
+ return static_cast<double>(pcycle);
+#elif defined(__alpha__)
+ // Alpha has a cycle counter, the PCC register, but it is an unsigned 32-bit
+ // integer and thus wraps every ~4s, making using it for tick counts
+ // unreliable beyond this time range. The real-time clock is low-precision,
+ // roughtly ~1ms, but it is the only option that can reasonable count
+ // indefinitely.
+ struct timeval tv;
+ gettimeofday(&tv, nullptr);
+ return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
#else
-// The soft failover to a generic implementation is automatic only for ARM.
-// For other platforms the developer is expected to make an attempt to create
-// a fast implementation and use generic version if nothing better is available.
+ // The soft failover to a generic implementation is automatic only for ARM.
+ // For other platforms the developer is expected to make an attempt to create
+ // a fast implementation and use generic version if nothing better is
+ // available.
#error You need to define CycleTimer for your OS and CPU
#endif
}
diff --git a/third-party/benchmark/src/internal_macros.h b/third-party/benchmark/src/internal_macros.h
index 91f367b..8dd7d0c 100644
--- a/third-party/benchmark/src/internal_macros.h
+++ b/third-party/benchmark/src/internal_macros.h
@@ -1,8 +1,6 @@
#ifndef BENCHMARK_INTERNAL_MACROS_H_
#define BENCHMARK_INTERNAL_MACROS_H_
-#include "benchmark/benchmark.h"
-
/* Needed to detect STL */
#include <cstdlib>
@@ -44,6 +42,19 @@
#define BENCHMARK_OS_CYGWIN 1
#elif defined(_WIN32)
#define BENCHMARK_OS_WINDOWS 1
+ // WINAPI_FAMILY_PARTITION is defined in winapifamily.h.
+ // We include windows.h which implicitly includes winapifamily.h for compatibility.
+ #ifndef NOMINMAX
+ #define NOMINMAX
+ #endif
+ #include <windows.h>
+ #if defined(WINAPI_FAMILY_PARTITION)
+ #if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
+ #define BENCHMARK_OS_WINDOWS_WIN32 1
+ #elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP)
+ #define BENCHMARK_OS_WINDOWS_RT 1
+ #endif
+ #endif
#if defined(__MINGW32__)
#define BENCHMARK_OS_MINGW 1
#endif
@@ -80,6 +91,8 @@
#define BENCHMARK_OS_QNX 1
#elif defined(__MVS__)
#define BENCHMARK_OS_ZOS 1
+#elif defined(__hexagon__)
+#define BENCHMARK_OS_QURT 1
#endif
#if defined(__ANDROID__) && defined(__GLIBCXX__)
diff --git a/third-party/benchmark/src/json_reporter.cc b/third-party/benchmark/src/json_reporter.cc
index e84a4ed..b8c8c94 100644
--- a/third-party/benchmark/src/json_reporter.cc
+++ b/third-party/benchmark/src/json_reporter.cc
@@ -28,10 +28,6 @@
#include "timers.h"
namespace benchmark {
-namespace internal {
-extern std::map<std::string, std::string>* global_context;
-}
-
namespace {
std::string StrEscape(const std::string& s) {
@@ -89,12 +85,6 @@
return ss.str();
}
-std::string FormatKV(std::string const& key, IterationCount value) {
- std::stringstream ss;
- ss << '"' << StrEscape(key) << "\": " << value;
- return ss.str();
-}
-
std::string FormatKV(std::string const& key, double value) {
std::stringstream ss;
ss << '"' << StrEscape(key) << "\": ";
@@ -177,15 +167,25 @@
}
out << "],\n";
+ out << indent << FormatKV("library_version", GetBenchmarkVersion());
+ out << ",\n";
+
#if defined(NDEBUG)
const char build_type[] = "release";
#else
const char build_type[] = "debug";
#endif
out << indent << FormatKV("library_build_type", build_type);
+ out << ",\n";
- if (internal::global_context != nullptr) {
- for (const auto& kv : *internal::global_context) {
+ // NOTE: our json schema is not strictly tied to the library version!
+ out << indent << FormatKV("json_schema_version", int64_t(1));
+
+ std::map<std::string, std::string>* global_context =
+ internal::GetGlobalContext();
+
+ if (global_context != nullptr) {
+ for (const auto& kv : *global_context) {
out << ",\n";
out << indent << FormatKV(kv.first, kv.second);
}
@@ -261,9 +261,12 @@
BENCHMARK_UNREACHABLE();
}()) << ",\n";
}
- if (run.error_occurred) {
- out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n";
- out << indent << FormatKV("error_message", run.error_message) << ",\n";
+ if (internal::SkippedWithError == run.skipped) {
+ out << indent << FormatKV("error_occurred", true) << ",\n";
+ out << indent << FormatKV("error_message", run.skip_message) << ",\n";
+ } else if (internal::SkippedWithMessage == run.skipped) {
+ out << indent << FormatKV("skipped", true) << ",\n";
+ out << indent << FormatKV("skip_message", run.skip_message) << ",\n";
}
if (!run.report_big_o && !run.report_rms) {
out << indent << FormatKV("iterations", run.iterations) << ",\n";
@@ -301,7 +304,8 @@
out << ",\n"
<< indent << FormatKV("max_bytes_used", memory_result.max_bytes_used);
- auto report_if_present = [&out, &indent](const char* label, int64_t val) {
+ auto report_if_present = [&out, &indent](const std::string& label,
+ int64_t val) {
if (val != MemoryManager::TombstoneValue)
out << ",\n" << indent << FormatKV(label, val);
};
diff --git a/third-party/benchmark/src/log.h b/third-party/benchmark/src/log.h
index 48c071a..9a21400 100644
--- a/third-party/benchmark/src/log.h
+++ b/third-party/benchmark/src/log.h
@@ -4,7 +4,12 @@
#include <iostream>
#include <ostream>
-#include "benchmark/benchmark.h"
+// NOTE: this is also defined in benchmark.h but we're trying to avoid a
+// dependency.
+// The _MSVC_LANG check should detect Visual Studio 2015 Update 3 and newer.
+#if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L)
+#define BENCHMARK_HAS_CXX11
+#endif
namespace benchmark {
namespace internal {
@@ -23,7 +28,16 @@
private:
LogType(std::ostream* out) : out_(out) {}
std::ostream* out_;
- BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType);
+
+ // NOTE: we could use BENCHMARK_DISALLOW_COPY_AND_ASSIGN but we shouldn't have
+ // a dependency on benchmark.h from here.
+#ifndef BENCHMARK_HAS_CXX11
+ LogType(const LogType&);
+ LogType& operator=(const LogType&);
+#else
+ LogType(const LogType&) = delete;
+ LogType& operator=(const LogType&) = delete;
+#endif
};
template <class Tp>
@@ -47,13 +61,13 @@
}
inline LogType& GetNullLogInstance() {
- static LogType log(nullptr);
- return log;
+ static LogType null_log(static_cast<std::ostream*>(nullptr));
+ return null_log;
}
inline LogType& GetErrorLogInstance() {
- static LogType log(&std::clog);
- return log;
+ static LogType error_log(&std::clog);
+ return error_log;
}
inline LogType& GetLogInstanceForLevel(int level) {
diff --git a/third-party/benchmark/src/perf_counters.cc b/third-party/benchmark/src/perf_counters.cc
index b2ac768..d466e27e 100644
--- a/third-party/benchmark/src/perf_counters.cc
+++ b/third-party/benchmark/src/perf_counters.cc
@@ -15,6 +15,7 @@
#include "perf_counters.h"
#include <cstring>
+#include <memory>
#include <vector>
#if defined HAVE_LIBPFM
@@ -28,105 +29,254 @@
constexpr size_t PerfCounterValues::kMaxCounters;
#if defined HAVE_LIBPFM
+
+size_t PerfCounterValues::Read(const std::vector<int>& leaders) {
+ // Create a pointer for multiple reads
+ const size_t bufsize = values_.size() * sizeof(values_[0]);
+ char* ptr = reinterpret_cast<char*>(values_.data());
+ size_t size = bufsize;
+ for (int lead : leaders) {
+ auto read_bytes = ::read(lead, ptr, size);
+ if (read_bytes >= ssize_t(sizeof(uint64_t))) {
+ // Actual data bytes are all bytes minus initial padding
+ std::size_t data_bytes = read_bytes - sizeof(uint64_t);
+ // This should be very cheap since it's in hot cache
+ std::memmove(ptr, ptr + sizeof(uint64_t), data_bytes);
+ // Increment our counters
+ ptr += data_bytes;
+ size -= data_bytes;
+ } else {
+ int err = errno;
+ GetErrorLogInstance() << "Error reading lead " << lead << " errno:" << err
+ << " " << ::strerror(err) << "\n";
+ return 0;
+ }
+ }
+ return (bufsize - size) / sizeof(uint64_t);
+}
+
const bool PerfCounters::kSupported = true;
-bool PerfCounters::Initialize() { return pfm_initialize() == PFM_SUCCESS; }
+// Initializes libpfm only on the first call. Returns whether that single
+// initialization was successful.
+bool PerfCounters::Initialize() {
+ // Function-scope static gets initialized only once on first call.
+ static const bool success = []() {
+ return pfm_initialize() == PFM_SUCCESS;
+ }();
+ return success;
+}
+
+bool PerfCounters::IsCounterSupported(const std::string& name) {
+ Initialize();
+ perf_event_attr_t attr;
+ std::memset(&attr, 0, sizeof(attr));
+ pfm_perf_encode_arg_t arg;
+ std::memset(&arg, 0, sizeof(arg));
+ arg.attr = &attr;
+ const int mode = PFM_PLM3; // user mode only
+ int ret = pfm_get_os_event_encoding(name.c_str(), mode, PFM_OS_PERF_EVENT_EXT,
+ &arg);
+ return (ret == PFM_SUCCESS);
+}
PerfCounters PerfCounters::Create(
const std::vector<std::string>& counter_names) {
- if (counter_names.empty()) {
- return NoCounters();
+ if (!counter_names.empty()) {
+ Initialize();
}
- if (counter_names.size() > PerfCounterValues::kMaxCounters) {
- GetErrorLogInstance()
- << counter_names.size()
- << " counters were requested. The minimum is 1, the maximum is "
- << PerfCounterValues::kMaxCounters << "\n";
- return NoCounters();
- }
- std::vector<int> counter_ids(counter_names.size());
- const int mode = PFM_PLM3; // user mode only
+ // Valid counters will populate these arrays but we start empty
+ std::vector<std::string> valid_names;
+ std::vector<int> counter_ids;
+ std::vector<int> leader_ids;
+
+ // Resize to the maximum possible
+ valid_names.reserve(counter_names.size());
+ counter_ids.reserve(counter_names.size());
+
+ const int kCounterMode = PFM_PLM3; // user mode only
+
+ // Group leads will be assigned on demand. The idea is that once we cannot
+ // create a counter descriptor, the reason is that this group has maxed out
+ // so we set the group_id again to -1 and retry - giving the algorithm a
+ // chance to create a new group leader to hold the next set of counters.
+ int group_id = -1;
+
+ // Loop through all performance counters
for (size_t i = 0; i < counter_names.size(); ++i) {
- const bool is_first = i == 0;
- struct perf_event_attr attr {};
- attr.size = sizeof(attr);
- const int group_id = !is_first ? counter_ids[0] : -1;
+ // we are about to push into the valid names vector
+ // check if we did not reach the maximum
+ if (valid_names.size() == PerfCounterValues::kMaxCounters) {
+ // Log a message if we maxed out and stop adding
+ GetErrorLogInstance()
+ << counter_names.size() << " counters were requested. The maximum is "
+ << PerfCounterValues::kMaxCounters << " and " << valid_names.size()
+ << " were already added. All remaining counters will be ignored\n";
+ // stop the loop and return what we have already
+ break;
+ }
+
+ // Check if this name is empty
const auto& name = counter_names[i];
if (name.empty()) {
- GetErrorLogInstance() << "A counter name was the empty string\n";
- return NoCounters();
+ GetErrorLogInstance()
+ << "A performance counter name was the empty string\n";
+ continue;
}
+
+ // Here first means first in group, ie the group leader
+ const bool is_first = (group_id < 0);
+
+ // This struct will be populated by libpfm from the counter string
+ // and then fed into the syscall perf_event_open
+ struct perf_event_attr attr {};
+ attr.size = sizeof(attr);
+
+ // This is the input struct to libpfm.
pfm_perf_encode_arg_t arg{};
arg.attr = &attr;
-
- const int pfm_get =
- pfm_get_os_event_encoding(name.c_str(), mode, PFM_OS_PERF_EVENT, &arg);
+ const int pfm_get = pfm_get_os_event_encoding(name.c_str(), kCounterMode,
+ PFM_OS_PERF_EVENT, &arg);
if (pfm_get != PFM_SUCCESS) {
- GetErrorLogInstance() << "Unknown counter name: " << name << "\n";
- return NoCounters();
+ GetErrorLogInstance()
+ << "Unknown performance counter name: " << name << "\n";
+ continue;
}
- attr.disabled = is_first;
- // Note: the man page for perf_event_create suggests inerit = true and
+
+ // We then proceed to populate the remaining fields in our attribute struct
+ // Note: the man page for perf_event_create suggests inherit = true and
// read_format = PERF_FORMAT_GROUP don't work together, but that's not the
// case.
+ attr.disabled = is_first;
attr.inherit = true;
attr.pinned = is_first;
attr.exclude_kernel = true;
attr.exclude_user = false;
attr.exclude_hv = true;
- // Read all counters in one read.
+
+ // Read all counters in a group in one read.
attr.read_format = PERF_FORMAT_GROUP;
int id = -1;
- static constexpr size_t kNrOfSyscallRetries = 5;
- // Retry syscall as it was interrupted often (b/64774091).
- for (size_t num_retries = 0; num_retries < kNrOfSyscallRetries;
- ++num_retries) {
- id = perf_event_open(&attr, 0, -1, group_id, 0);
- if (id >= 0 || errno != EINTR) {
- break;
+ while (id < 0) {
+ static constexpr size_t kNrOfSyscallRetries = 5;
+ // Retry syscall as it was interrupted often (b/64774091).
+ for (size_t num_retries = 0; num_retries < kNrOfSyscallRetries;
+ ++num_retries) {
+ id = perf_event_open(&attr, 0, -1, group_id, 0);
+ if (id >= 0 || errno != EINTR) {
+ break;
+ }
+ }
+ if (id < 0) {
+ // If the file descriptor is negative we might have reached a limit
+ // in the current group. Set the group_id to -1 and retry
+ if (group_id >= 0) {
+ // Create a new group
+ group_id = -1;
+ } else {
+ // At this point we have already retried to set a new group id and
+ // failed. We then give up.
+ break;
+ }
}
}
+
+ // We failed to get a new file descriptor. We might have reached a hard
+ // hardware limit that cannot be resolved even with group multiplexing
if (id < 0) {
- GetErrorLogInstance()
- << "Failed to get a file descriptor for " << name << "\n";
+ GetErrorLogInstance() << "***WARNING** Failed to get a file descriptor "
+ "for performance counter "
+ << name << ". Ignoring\n";
+
+ // We give up on this counter but try to keep going
+ // as the others would be fine
+ continue;
+ }
+ if (group_id < 0) {
+ // This is a leader, store and assign it to the current file descriptor
+ leader_ids.push_back(id);
+ group_id = id;
+ }
+ // This is a valid counter, add it to our descriptor's list
+ counter_ids.push_back(id);
+ valid_names.push_back(name);
+ }
+
+ // Loop through all group leaders activating them
+ // There is another option of starting ALL counters in a process but
+ // that would be far reaching an intrusion. If the user is using PMCs
+ // by themselves then this would have a side effect on them. It is
+ // friendlier to loop through all groups individually.
+ for (int lead : leader_ids) {
+ if (ioctl(lead, PERF_EVENT_IOC_ENABLE) != 0) {
+ // This should never happen but if it does, we give up on the
+ // entire batch as recovery would be a mess.
+ GetErrorLogInstance() << "***WARNING*** Failed to start counters. "
+ "Claring out all counters.\n";
+
+ // Close all peformance counters
+ for (int id : counter_ids) {
+ ::close(id);
+ }
+
+ // Return an empty object so our internal state is still good and
+ // the process can continue normally without impact
return NoCounters();
}
-
- counter_ids[i] = id;
- }
- if (ioctl(counter_ids[0], PERF_EVENT_IOC_ENABLE) != 0) {
- GetErrorLogInstance() << "Failed to start counters\n";
- return NoCounters();
}
- return PerfCounters(counter_names, std::move(counter_ids));
+ return PerfCounters(std::move(valid_names), std::move(counter_ids),
+ std::move(leader_ids));
}
-PerfCounters::~PerfCounters() {
+void PerfCounters::CloseCounters() const {
if (counter_ids_.empty()) {
return;
}
- ioctl(counter_ids_[0], PERF_EVENT_IOC_DISABLE);
+ for (int lead : leader_ids_) {
+ ioctl(lead, PERF_EVENT_IOC_DISABLE);
+ }
for (int fd : counter_ids_) {
close(fd);
}
}
#else // defined HAVE_LIBPFM
+size_t PerfCounterValues::Read(const std::vector<int>&) { return 0; }
+
const bool PerfCounters::kSupported = false;
bool PerfCounters::Initialize() { return false; }
+bool PerfCounters::IsCounterSupported(const std::string&) { return false; }
+
PerfCounters PerfCounters::Create(
const std::vector<std::string>& counter_names) {
if (!counter_names.empty()) {
- GetErrorLogInstance() << "Performance counters not supported.";
+ GetErrorLogInstance() << "Performance counters not supported.\n";
}
return NoCounters();
}
-PerfCounters::~PerfCounters() = default;
+void PerfCounters::CloseCounters() const {}
#endif // defined HAVE_LIBPFM
+
+PerfCountersMeasurement::PerfCountersMeasurement(
+ const std::vector<std::string>& counter_names)
+ : start_values_(counter_names.size()), end_values_(counter_names.size()) {
+ counters_ = PerfCounters::Create(counter_names);
+}
+
+PerfCounters& PerfCounters::operator=(PerfCounters&& other) noexcept {
+ if (this != &other) {
+ CloseCounters();
+
+ counter_ids_ = std::move(other.counter_ids_);
+ leader_ids_ = std::move(other.leader_ids_);
+ counter_names_ = std::move(other.counter_names_);
+ }
+ return *this;
+}
} // namespace internal
} // namespace benchmark
diff --git a/third-party/benchmark/src/perf_counters.h b/third-party/benchmark/src/perf_counters.h
index 47ca138..bf5eb6b 100644
--- a/third-party/benchmark/src/perf_counters.h
+++ b/third-party/benchmark/src/perf_counters.h
@@ -17,16 +17,25 @@
#include <array>
#include <cstdint>
+#include <cstring>
+#include <memory>
#include <vector>
#include "benchmark/benchmark.h"
#include "check.h"
#include "log.h"
+#include "mutex.h"
#ifndef BENCHMARK_OS_WINDOWS
#include <unistd.h>
#endif
+#if defined(_MSC_VER)
+#pragma warning(push)
+// C4251: <symbol> needs to have dll-interface to be used by clients of class
+#pragma warning(disable : 4251)
+#endif
+
namespace benchmark {
namespace internal {
@@ -36,18 +45,21 @@
// The implementation ensures the storage is inlined, and allows 0-based
// indexing into the counter values.
// The object is used in conjunction with a PerfCounters object, by passing it
-// to Snapshot(). The values are populated such that
-// perfCounters->names()[i]'s value is obtained at position i (as given by
-// operator[]) of this object.
-class PerfCounterValues {
+// to Snapshot(). The Read() method relocates individual reads, discarding
+// the initial padding from each group leader in the values buffer such that
+// all user accesses through the [] operator are correct.
+class BENCHMARK_EXPORT PerfCounterValues {
public:
explicit PerfCounterValues(size_t nr_counters) : nr_counters_(nr_counters) {
BM_CHECK_LE(nr_counters_, kMaxCounters);
}
- uint64_t operator[](size_t pos) const { return values_[kPadding + pos]; }
+ // We are reading correctly now so the values don't need to skip padding
+ uint64_t operator[](size_t pos) const { return values_[pos]; }
- static constexpr size_t kMaxCounters = 3;
+ // Increased the maximum to 32 only since the buffer
+ // is std::array<> backed
+ static constexpr size_t kMaxCounters = 32;
private:
friend class PerfCounters;
@@ -58,7 +70,14 @@
sizeof(uint64_t) * (kPadding + nr_counters_)};
}
- static constexpr size_t kPadding = 1;
+ // This reading is complex and as the goal of this class is to
+ // abstract away the intrincacies of the reading process, this is
+ // a better place for it
+ size_t Read(const std::vector<int>& leaders);
+
+ // Move the padding to 2 due to the reading algorithm (1st padding plus a
+ // current read padding)
+ static constexpr size_t kPadding = 2;
std::array<uint64_t, kPadding + kMaxCounters> values_;
const size_t nr_counters_;
};
@@ -66,27 +85,34 @@
// Collect PMU counters. The object, once constructed, is ready to be used by
// calling read(). PMU counter collection is enabled from the time create() is
// called, to obtain the object, until the object's destructor is called.
-class PerfCounters final {
+class BENCHMARK_EXPORT PerfCounters final {
public:
// True iff this platform supports performance counters.
static const bool kSupported;
- bool IsValid() const { return is_valid_; }
+ // Returns an empty object
static PerfCounters NoCounters() { return PerfCounters(); }
- ~PerfCounters();
+ ~PerfCounters() { CloseCounters(); }
+ PerfCounters() = default;
PerfCounters(PerfCounters&&) = default;
PerfCounters(const PerfCounters&) = delete;
+ PerfCounters& operator=(PerfCounters&&) noexcept;
+ PerfCounters& operator=(const PerfCounters&) = delete;
// Platform-specific implementations may choose to do some library
// initialization here.
static bool Initialize();
+ // Check if the given counter is supported, if the app wants to
+ // check before passing
+ static bool IsCounterSupported(const std::string& name);
+
// Return a PerfCounters object ready to read the counters with the names
// specified. The values are user-mode only. The counter name format is
// implementation and OS specific.
- // TODO: once we move to C++-17, this should be a std::optional, and then the
- // IsValid() boolean can be dropped.
+ // In case of failure, this method will in the worst case return an
+ // empty object whose state will still be valid.
static PerfCounters Create(const std::vector<std::string>& counter_names);
// Take a snapshot of the current value of the counters into the provided
@@ -95,10 +121,7 @@
BENCHMARK_ALWAYS_INLINE bool Snapshot(PerfCounterValues* values) const {
#ifndef BENCHMARK_OS_WINDOWS
assert(values != nullptr);
- assert(IsValid());
- auto buffer = values->get_data_buffer();
- auto read_bytes = ::read(counter_ids_[0], buffer.first, buffer.second);
- return static_cast<size_t>(read_bytes) == buffer.second;
+ return values->Read(leader_ids_) == counter_ids_.size();
#else
(void)values;
return false;
@@ -110,63 +133,68 @@
private:
PerfCounters(const std::vector<std::string>& counter_names,
- std::vector<int>&& counter_ids)
+ std::vector<int>&& counter_ids, std::vector<int>&& leader_ids)
: counter_ids_(std::move(counter_ids)),
- counter_names_(counter_names),
- is_valid_(true) {}
- PerfCounters() : is_valid_(false) {}
+ leader_ids_(std::move(leader_ids)),
+ counter_names_(counter_names) {}
+
+ void CloseCounters() const;
std::vector<int> counter_ids_;
- const std::vector<std::string> counter_names_;
- const bool is_valid_;
+ std::vector<int> leader_ids_;
+ std::vector<std::string> counter_names_;
};
// Typical usage of the above primitives.
-class PerfCountersMeasurement final {
+class BENCHMARK_EXPORT PerfCountersMeasurement final {
public:
- PerfCountersMeasurement(PerfCounters&& c)
- : counters_(std::move(c)),
- start_values_(counters_.IsValid() ? counters_.names().size() : 0),
- end_values_(counters_.IsValid() ? counters_.names().size() : 0) {}
+ PerfCountersMeasurement(const std::vector<std::string>& counter_names);
- bool IsValid() const { return counters_.IsValid(); }
+ size_t num_counters() const { return counters_.num_counters(); }
- BENCHMARK_ALWAYS_INLINE void Start() {
- assert(IsValid());
+ std::vector<std::string> names() const { return counters_.names(); }
+
+ BENCHMARK_ALWAYS_INLINE bool Start() {
+ if (num_counters() == 0) return true;
// Tell the compiler to not move instructions above/below where we take
// the snapshot.
ClobberMemory();
- counters_.Snapshot(&start_values_);
+ valid_read_ &= counters_.Snapshot(&start_values_);
ClobberMemory();
+
+ return valid_read_;
}
- BENCHMARK_ALWAYS_INLINE std::vector<std::pair<std::string, double>>
- StopAndGetMeasurements() {
- assert(IsValid());
+ BENCHMARK_ALWAYS_INLINE bool Stop(
+ std::vector<std::pair<std::string, double>>& measurements) {
+ if (num_counters() == 0) return true;
// Tell the compiler to not move instructions above/below where we take
// the snapshot.
ClobberMemory();
- counters_.Snapshot(&end_values_);
+ valid_read_ &= counters_.Snapshot(&end_values_);
ClobberMemory();
- std::vector<std::pair<std::string, double>> ret;
for (size_t i = 0; i < counters_.names().size(); ++i) {
double measurement = static_cast<double>(end_values_[i]) -
static_cast<double>(start_values_[i]);
- ret.push_back({counters_.names()[i], measurement});
+ measurements.push_back({counters_.names()[i], measurement});
}
- return ret;
+
+ return valid_read_;
}
private:
PerfCounters counters_;
+ bool valid_read_ = true;
PerfCounterValues start_values_;
PerfCounterValues end_values_;
};
-BENCHMARK_UNUSED static bool perf_init_anchor = PerfCounters::Initialize();
-
} // namespace internal
} // namespace benchmark
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
+
#endif // BENCHMARK_PERF_COUNTERS_H
diff --git a/third-party/benchmark/src/re.h b/third-party/benchmark/src/re.h
index 6300467..9afb869b 100644
--- a/third-party/benchmark/src/re.h
+++ b/third-party/benchmark/src/re.h
@@ -33,7 +33,7 @@
// Prefer C regex libraries when compiling w/o exceptions so that we can
// correctly report errors.
#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && \
- defined(BENCHMARK_HAVE_STD_REGEX) && \
+ defined(HAVE_STD_REGEX) && \
(defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX))
#undef HAVE_STD_REGEX
#endif
diff --git a/third-party/benchmark/src/reporter.cc b/third-party/benchmark/src/reporter.cc
index 1d2df17..076bc31 100644
--- a/third-party/benchmark/src/reporter.cc
+++ b/third-party/benchmark/src/reporter.cc
@@ -25,9 +25,6 @@
#include "timers.h"
namespace benchmark {
-namespace internal {
-extern std::map<std::string, std::string> *global_context;
-}
BenchmarkReporter::BenchmarkReporter()
: output_stream_(&std::cout), error_stream_(&std::cerr) {}
@@ -39,7 +36,11 @@
BM_CHECK(out) << "cannot be null";
auto &Out = *out;
+#ifndef BENCHMARK_OS_QURT
+ // Date/time information is not available on QuRT.
+ // Attempting to get it via this call cause the binary to crash.
Out << LocalDateTimeString() << "\n";
+#endif
if (context.executable_name)
Out << "Running " << context.executable_name << "\n";
@@ -67,8 +68,11 @@
Out << "\n";
}
- if (internal::global_context != nullptr) {
- for (const auto &kv : *internal::global_context) {
+ std::map<std::string, std::string> *global_context =
+ internal::GetGlobalContext();
+
+ if (global_context != nullptr) {
+ for (const auto &kv : *global_context) {
Out << kv.first << ": " << kv.second << "\n";
}
}
diff --git a/third-party/benchmark/src/sleep.cc b/third-party/benchmark/src/sleep.cc
deleted file mode 100644
index ab59000..0000000
--- a/third-party/benchmark/src/sleep.cc
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "sleep.h"
-
-#include <cerrno>
-#include <cstdlib>
-#include <ctime>
-
-#include "internal_macros.h"
-
-#ifdef BENCHMARK_OS_WINDOWS
-#include <windows.h>
-#endif
-
-#ifdef BENCHMARK_OS_ZOS
-#include <unistd.h>
-#endif
-
-namespace benchmark {
-#ifdef BENCHMARK_OS_WINDOWS
-// Window's Sleep takes milliseconds argument.
-void SleepForMilliseconds(int milliseconds) { Sleep(milliseconds); }
-void SleepForSeconds(double seconds) {
- SleepForMilliseconds(static_cast<int>(kNumMillisPerSecond * seconds));
-}
-#else // BENCHMARK_OS_WINDOWS
-void SleepForMicroseconds(int microseconds) {
-#ifdef BENCHMARK_OS_ZOS
- // z/OS does not support nanosleep. Instead call sleep() and then usleep() to
- // sleep for the remaining microseconds because usleep() will fail if its
- // argument is greater than 1000000.
- div_t sleepTime = div(microseconds, kNumMicrosPerSecond);
- int seconds = sleepTime.quot;
- while (seconds != 0) seconds = sleep(seconds);
- while (usleep(sleepTime.rem) == -1 && errno == EINTR)
- ;
-#else
- struct timespec sleep_time;
- sleep_time.tv_sec = microseconds / kNumMicrosPerSecond;
- sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro;
- while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR)
- ; // Ignore signals and wait for the full interval to elapse.
-#endif
-}
-
-void SleepForMilliseconds(int milliseconds) {
- SleepForMicroseconds(milliseconds * kNumMicrosPerMilli);
-}
-
-void SleepForSeconds(double seconds) {
- SleepForMicroseconds(static_cast<int>(seconds * kNumMicrosPerSecond));
-}
-#endif // BENCHMARK_OS_WINDOWS
-} // end namespace benchmark
diff --git a/third-party/benchmark/src/sleep.h b/third-party/benchmark/src/sleep.h
deleted file mode 100644
index f98551a..0000000
--- a/third-party/benchmark/src/sleep.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef BENCHMARK_SLEEP_H_
-#define BENCHMARK_SLEEP_H_
-
-namespace benchmark {
-const int kNumMillisPerSecond = 1000;
-const int kNumMicrosPerMilli = 1000;
-const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000;
-const int kNumNanosPerMicro = 1000;
-const int kNumNanosPerSecond = kNumNanosPerMicro * kNumMicrosPerSecond;
-
-void SleepForMilliseconds(int milliseconds);
-void SleepForSeconds(double seconds);
-} // end namespace benchmark
-
-#endif // BENCHMARK_SLEEP_H_
diff --git a/third-party/benchmark/src/statistics.cc b/third-party/benchmark/src/statistics.cc
index 3e5ef09..261dcb2 100644
--- a/third-party/benchmark/src/statistics.cc
+++ b/third-party/benchmark/src/statistics.cc
@@ -32,7 +32,7 @@
double StatisticsMean(const std::vector<double>& v) {
if (v.empty()) return 0.0;
- return StatisticsSum(v) * (1.0 / v.size());
+ return StatisticsSum(v) * (1.0 / static_cast<double>(v.size()));
}
double StatisticsMedian(const std::vector<double>& v) {
@@ -42,13 +42,13 @@
auto center = copy.begin() + v.size() / 2;
std::nth_element(copy.begin(), center, copy.end());
- // did we have an odd number of samples?
- // if yes, then center is the median
- // it no, then we are looking for the average between center and the value
- // before
+ // Did we have an odd number of samples? If yes, then center is the median.
+ // If not, then we are looking for the average between center and the value
+ // before. Instead of resorting, we just look for the max value before it,
+ // which is not necessarily the element immediately preceding `center` Since
+ // `copy` is only partially sorted by `nth_element`.
if (v.size() % 2 == 1) return *center;
- auto center2 = copy.begin() + v.size() / 2 - 1;
- std::nth_element(copy.begin(), center2, copy.end());
+ auto center2 = std::max_element(copy.begin(), center);
return (*center + *center2) / 2.0;
}
@@ -71,8 +71,11 @@
// Sample standard deviation is undefined for n = 1
if (v.size() == 1) return 0.0;
- const double avg_squares = SumSquares(v) * (1.0 / v.size());
- return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean)));
+ const double avg_squares =
+ SumSquares(v) * (1.0 / static_cast<double>(v.size()));
+ return Sqrt(static_cast<double>(v.size()) /
+ (static_cast<double>(v.size()) - 1.0) *
+ (avg_squares - Sqr(mean)));
}
double StatisticsCV(const std::vector<double>& v) {
@@ -81,6 +84,8 @@
const auto stddev = StatisticsStdDev(v);
const auto mean = StatisticsMean(v);
+ if (std::fpclassify(mean) == FP_ZERO) return 0.0;
+
return stddev / mean;
}
@@ -89,9 +94,8 @@
typedef BenchmarkReporter::Run Run;
std::vector<Run> results;
- auto error_count =
- std::count_if(reports.begin(), reports.end(),
- [](Run const& run) { return run.error_occurred; });
+ auto error_count = std::count_if(reports.begin(), reports.end(),
+ [](Run const& run) { return run.skipped; });
if (reports.size() - error_count < 2) {
// We don't report aggregated data if there was a single run.
@@ -118,11 +122,13 @@
for (auto const& cnt : r.counters) {
auto it = counter_stats.find(cnt.first);
if (it == counter_stats.end()) {
- counter_stats.insert({cnt.first, {cnt.second, std::vector<double>{}}});
- it = counter_stats.find(cnt.first);
+ it = counter_stats
+ .emplace(cnt.first,
+ CounterStat{cnt.second, std::vector<double>{}})
+ .first;
it->second.s.reserve(reports.size());
} else {
- BM_CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags);
+ BM_CHECK_EQ(it->second.c.flags, cnt.second.flags);
}
}
}
@@ -131,7 +137,7 @@
for (Run const& run : reports) {
BM_CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name());
BM_CHECK_EQ(run_iterations, run.iterations);
- if (run.error_occurred) continue;
+ if (run.skipped) continue;
real_accumulated_time_stat.emplace_back(run.real_accumulated_time);
cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time);
// user counters
diff --git a/third-party/benchmark/src/statistics.h b/third-party/benchmark/src/statistics.h
index a9545a5..6e5560e 100644
--- a/third-party/benchmark/src/statistics.h
+++ b/third-party/benchmark/src/statistics.h
@@ -22,15 +22,21 @@
namespace benchmark {
-// Return a vector containing the mean, median and standard devation information
-// (and any user-specified info) for the specified list of reports. If 'reports'
-// contains less than two non-errored runs an empty vector is returned
+// Return a vector containing the mean, median and standard deviation
+// information (and any user-specified info) for the specified list of reports.
+// If 'reports' contains less than two non-errored runs an empty vector is
+// returned
+BENCHMARK_EXPORT
std::vector<BenchmarkReporter::Run> ComputeStats(
const std::vector<BenchmarkReporter::Run>& reports);
+BENCHMARK_EXPORT
double StatisticsMean(const std::vector<double>& v);
+BENCHMARK_EXPORT
double StatisticsMedian(const std::vector<double>& v);
+BENCHMARK_EXPORT
double StatisticsStdDev(const std::vector<double>& v);
+BENCHMARK_EXPORT
double StatisticsCV(const std::vector<double>& v);
} // end namespace benchmark
diff --git a/third-party/benchmark/src/string_util.cc b/third-party/benchmark/src/string_util.cc
index 401fa13..c69e40a 100644
--- a/third-party/benchmark/src/string_util.cc
+++ b/third-party/benchmark/src/string_util.cc
@@ -11,16 +11,17 @@
#include <sstream>
#include "arraysize.h"
+#include "benchmark/benchmark.h"
namespace benchmark {
namespace {
-
// kilo, Mega, Giga, Tera, Peta, Exa, Zetta, Yotta.
-const char kBigSIUnits[] = "kMGTPEZY";
+const char* const kBigSIUnits[] = {"k", "M", "G", "T", "P", "E", "Z", "Y"};
// Kibi, Mebi, Gibi, Tebi, Pebi, Exbi, Zebi, Yobi.
-const char kBigIECUnits[] = "KMGTPEZY";
+const char* const kBigIECUnits[] = {"Ki", "Mi", "Gi", "Ti",
+ "Pi", "Ei", "Zi", "Yi"};
// milli, micro, nano, pico, femto, atto, zepto, yocto.
-const char kSmallSIUnits[] = "munpfazy";
+const char* const kSmallSIUnits[] = {"m", "u", "n", "p", "f", "a", "z", "y"};
// We require that all three arrays have the same size.
static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits),
@@ -30,9 +31,8 @@
static const int64_t kUnitsSize = arraysize(kBigSIUnits);
-void ToExponentAndMantissa(double val, double thresh, int precision,
- double one_k, std::string* mantissa,
- int64_t* exponent) {
+void ToExponentAndMantissa(double val, int precision, double one_k,
+ std::string* mantissa, int64_t* exponent) {
std::stringstream mantissa_stream;
if (val < 0) {
@@ -43,8 +43,8 @@
// Adjust threshold so that it never excludes things which can't be rendered
// in 'precision' digits.
const double adjusted_threshold =
- std::max(thresh, 1.0 / std::pow(10.0, precision));
- const double big_threshold = adjusted_threshold * one_k;
+ std::max(1.0, 1.0 / std::pow(10.0, precision));
+ const double big_threshold = (adjusted_threshold * one_k) - 1;
const double small_threshold = adjusted_threshold;
// Values in ]simple_threshold,small_threshold[ will be printed as-is
const double simple_threshold = 0.01;
@@ -92,37 +92,20 @@
const int64_t index = (exponent > 0 ? exponent - 1 : -exponent - 1);
if (index >= kUnitsSize) return "";
- const char* array =
+ const char* const* array =
(exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits);
- if (iec)
- return array[index] + std::string("i");
- else
- return std::string(1, array[index]);
+
+ return std::string(array[index]);
}
-std::string ToBinaryStringFullySpecified(double value, double threshold,
- int precision, double one_k = 1024.0) {
+std::string ToBinaryStringFullySpecified(double value, int precision,
+ Counter::OneK one_k) {
std::string mantissa;
int64_t exponent;
- ToExponentAndMantissa(value, threshold, precision, one_k, &mantissa,
+ ToExponentAndMantissa(value, precision,
+ one_k == Counter::kIs1024 ? 1024.0 : 1000.0, &mantissa,
&exponent);
- return mantissa + ExponentToPrefix(exponent, false);
-}
-
-} // end namespace
-
-void AppendHumanReadable(int n, std::string* str) {
- std::stringstream ss;
- // Round down to the nearest SI prefix.
- ss << ToBinaryStringFullySpecified(n, 1.0, 0);
- *str += ss.str();
-}
-
-std::string HumanReadableNumber(double n, double one_k) {
- // 1.1 means that figures up to 1.1k should be shown with the next unit down;
- // this softens edge effects.
- // 1 means that we should show one decimal place of precision.
- return ToBinaryStringFullySpecified(n, 1.1, 1, one_k);
+ return mantissa + ExponentToPrefix(exponent, one_k == Counter::kIs1024);
}
std::string StrFormatImp(const char* msg, va_list args) {
@@ -133,21 +116,21 @@
// TODO(ericwf): use std::array for first attempt to avoid one memory
// allocation guess what the size might be
std::array<char, 256> local_buff;
- std::size_t size = local_buff.size();
+
// 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
// in the android-ndk
- auto ret = vsnprintf(local_buff.data(), size, msg, args_cp);
+ auto ret = vsnprintf(local_buff.data(), local_buff.size(), msg, args_cp);
va_end(args_cp);
// handle empty expansion
if (ret == 0) return std::string{};
- if (static_cast<std::size_t>(ret) < size)
+ if (static_cast<std::size_t>(ret) < local_buff.size())
return std::string(local_buff.data());
// we did not provide a long enough buffer on our first attempt.
// add 1 to size to account for null-byte in size cast to prevent overflow
- size = static_cast<std::size_t>(ret) + 1;
+ std::size_t size = static_cast<std::size_t>(ret) + 1;
auto buff_ptr = std::unique_ptr<char[]>(new char[size]);
// 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
// in the android-ndk
@@ -155,6 +138,12 @@
return std::string(buff_ptr.get());
}
+} // end namespace
+
+std::string HumanReadableNumber(double n, Counter::OneK one_k) {
+ return ToBinaryStringFullySpecified(n, 1, one_k);
+}
+
std::string StrFormat(const char* format, ...) {
va_list args;
va_start(args, format);
diff --git a/third-party/benchmark/src/string_util.h b/third-party/benchmark/src/string_util.h
index ff3b7da..731aa2c 100644
--- a/third-party/benchmark/src/string_util.h
+++ b/third-party/benchmark/src/string_util.h
@@ -4,15 +4,19 @@
#include <sstream>
#include <string>
#include <utility>
+#include <vector>
+#include "benchmark/benchmark.h"
+#include "benchmark/export.h"
+#include "check.h"
#include "internal_macros.h"
namespace benchmark {
-void AppendHumanReadable(int n, std::string* str);
+BENCHMARK_EXPORT
+std::string HumanReadableNumber(double n, Counter::OneK one_k);
-std::string HumanReadableNumber(double n, double one_k = 1024.0);
-
+BENCHMARK_EXPORT
#if defined(__MINGW32__)
__attribute__((format(__MINGW_PRINTF_FORMAT, 1, 2)))
#elif defined(__GNUC__)
@@ -38,6 +42,7 @@
return ss.str();
}
+BENCHMARK_EXPORT
std::vector<std::string> StrSplit(const std::string& str, char delim);
// Disable lint checking for this block since it re-implements C functions.
diff --git a/third-party/benchmark/src/sysinfo.cc b/third-party/benchmark/src/sysinfo.cc
index 3a56e8c..46df973 100644
--- a/third-party/benchmark/src/sysinfo.cc
+++ b/third-party/benchmark/src/sysinfo.cc
@@ -22,6 +22,10 @@
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
+#if !defined(WINVER) || WINVER < 0x0600
+#undef WINVER
+#define WINVER 0x0600
+#endif // WINVER handling
#include <shlwapi.h>
#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
#include <versionhelpers.h>
@@ -30,7 +34,7 @@
#include <codecvt>
#else
#include <fcntl.h>
-#ifndef BENCHMARK_OS_FUCHSIA
+#if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT)
#include <sys/resource.h>
#endif
#include <sys/time.h>
@@ -45,10 +49,17 @@
#endif
#if defined(BENCHMARK_OS_SOLARIS)
#include <kstat.h>
+#include <netdb.h>
#endif
#if defined(BENCHMARK_OS_QNX)
#include <sys/syspage.h>
#endif
+#if defined(BENCHMARK_OS_QURT)
+#include <qurt.h>
+#endif
+#if defined(BENCHMARK_HAS_PTHREAD_AFFINITY)
+#include <pthread.h>
+#endif
#include <algorithm>
#include <array>
@@ -65,15 +76,17 @@
#include <limits>
#include <locale>
#include <memory>
+#include <random>
#include <sstream>
#include <utility>
+#include "benchmark/benchmark.h"
#include "check.h"
#include "cycleclock.h"
#include "internal_macros.h"
#include "log.h"
-#include "sleep.h"
#include "string_util.h"
+#include "timers.h"
namespace benchmark {
namespace {
@@ -98,67 +111,59 @@
/// `sysctl` with the result type it's to be interpreted as.
struct ValueUnion {
union DataT {
- uint32_t uint32_value;
- uint64_t uint64_value;
+ int32_t int32_value;
+ int64_t int64_value;
// For correct aliasing of union members from bytes.
char bytes[8];
};
using DataPtr = std::unique_ptr<DataT, decltype(&std::free)>;
// The size of the data union member + its trailing array size.
- size_t Size;
- DataPtr Buff;
+ std::size_t size;
+ DataPtr buff;
public:
- ValueUnion() : Size(0), Buff(nullptr, &std::free) {}
+ ValueUnion() : size(0), buff(nullptr, &std::free) {}
- explicit ValueUnion(size_t BuffSize)
- : Size(sizeof(DataT) + BuffSize),
- Buff(::new (std::malloc(Size)) DataT(), &std::free) {}
+ explicit ValueUnion(std::size_t buff_size)
+ : size(sizeof(DataT) + buff_size),
+ buff(::new (std::malloc(size)) DataT(), &std::free) {}
ValueUnion(ValueUnion&& other) = default;
- explicit operator bool() const { return bool(Buff); }
+ explicit operator bool() const { return bool(buff); }
- char* data() const { return Buff->bytes; }
+ char* data() const { return buff->bytes; }
std::string GetAsString() const { return std::string(data()); }
int64_t GetAsInteger() const {
- if (Size == sizeof(Buff->uint32_value))
- return static_cast<int32_t>(Buff->uint32_value);
- else if (Size == sizeof(Buff->uint64_value))
- return static_cast<int64_t>(Buff->uint64_value);
- BENCHMARK_UNREACHABLE();
- }
-
- uint64_t GetAsUnsigned() const {
- if (Size == sizeof(Buff->uint32_value))
- return Buff->uint32_value;
- else if (Size == sizeof(Buff->uint64_value))
- return Buff->uint64_value;
+ if (size == sizeof(buff->int32_value))
+ return buff->int32_value;
+ else if (size == sizeof(buff->int64_value))
+ return buff->int64_value;
BENCHMARK_UNREACHABLE();
}
template <class T, int N>
std::array<T, N> GetAsArray() {
- const int ArrSize = sizeof(T) * N;
- BM_CHECK_LE(ArrSize, Size);
- std::array<T, N> Arr;
- std::memcpy(Arr.data(), data(), ArrSize);
- return Arr;
+ const int arr_size = sizeof(T) * N;
+ BM_CHECK_LE(arr_size, size);
+ std::array<T, N> arr;
+ std::memcpy(arr.data(), data(), arr_size);
+ return arr;
}
};
-ValueUnion GetSysctlImp(std::string const& Name) {
+ValueUnion GetSysctlImp(std::string const& name) {
#if defined BENCHMARK_OS_OPENBSD
int mib[2];
mib[0] = CTL_HW;
- if ((Name == "hw.ncpu") || (Name == "hw.cpuspeed")) {
+ if ((name == "hw.ncpu") || (name == "hw.cpuspeed")) {
ValueUnion buff(sizeof(int));
- if (Name == "hw.ncpu") {
+ if (name == "hw.ncpu") {
mib[1] = HW_NCPU;
} else {
mib[1] = HW_CPUSPEED;
@@ -171,41 +176,41 @@
}
return ValueUnion();
#else
- size_t CurBuffSize = 0;
- if (sysctlbyname(Name.c_str(), nullptr, &CurBuffSize, nullptr, 0) == -1)
+ std::size_t cur_buff_size = 0;
+ if (sysctlbyname(name.c_str(), nullptr, &cur_buff_size, nullptr, 0) == -1)
return ValueUnion();
- ValueUnion buff(CurBuffSize);
- if (sysctlbyname(Name.c_str(), buff.data(), &buff.Size, nullptr, 0) == 0)
+ ValueUnion buff(cur_buff_size);
+ if (sysctlbyname(name.c_str(), buff.data(), &buff.size, nullptr, 0) == 0)
return buff;
return ValueUnion();
#endif
}
BENCHMARK_MAYBE_UNUSED
-bool GetSysctl(std::string const& Name, std::string* Out) {
- Out->clear();
- auto Buff = GetSysctlImp(Name);
- if (!Buff) return false;
- Out->assign(Buff.data());
+bool GetSysctl(std::string const& name, std::string* out) {
+ out->clear();
+ auto buff = GetSysctlImp(name);
+ if (!buff) return false;
+ out->assign(buff.data());
return true;
}
template <class Tp,
class = typename std::enable_if<std::is_integral<Tp>::value>::type>
-bool GetSysctl(std::string const& Name, Tp* Out) {
- *Out = 0;
- auto Buff = GetSysctlImp(Name);
- if (!Buff) return false;
- *Out = static_cast<Tp>(Buff.GetAsUnsigned());
+bool GetSysctl(std::string const& name, Tp* out) {
+ *out = 0;
+ auto buff = GetSysctlImp(name);
+ if (!buff) return false;
+ *out = static_cast<Tp>(buff.GetAsInteger());
return true;
}
template <class Tp, size_t N>
-bool GetSysctl(std::string const& Name, std::array<Tp, N>* Out) {
- auto Buff = GetSysctlImp(Name);
- if (!Buff) return false;
- *Out = Buff.GetAsArray<Tp, N>();
+bool GetSysctl(std::string const& name, std::array<Tp, N>* out) {
+ auto buff = GetSysctlImp(name);
+ if (!buff) return false;
+ *out = buff.GetAsArray<Tp, N>();
return true;
}
#endif
@@ -241,21 +246,21 @@
#endif
}
-int CountSetBitsInCPUMap(std::string Val) {
- auto CountBits = [](std::string Part) {
+int CountSetBitsInCPUMap(std::string val) {
+ auto CountBits = [](std::string part) {
using CPUMask = std::bitset<sizeof(std::uintptr_t) * CHAR_BIT>;
- Part = "0x" + Part;
- CPUMask Mask(benchmark::stoul(Part, nullptr, 16));
- return static_cast<int>(Mask.count());
+ part = "0x" + part;
+ CPUMask mask(benchmark::stoul(part, nullptr, 16));
+ return static_cast<int>(mask.count());
};
- size_t Pos;
+ std::size_t pos;
int total = 0;
- while ((Pos = Val.find(',')) != std::string::npos) {
- total += CountBits(Val.substr(0, Pos));
- Val = Val.substr(Pos + 1);
+ while ((pos = val.find(',')) != std::string::npos) {
+ total += CountBits(val.substr(0, pos));
+ val = val.substr(pos + 1);
}
- if (!Val.empty()) {
- total += CountBits(Val);
+ if (!val.empty()) {
+ total += CountBits(val);
}
return total;
}
@@ -264,16 +269,16 @@
std::vector<CPUInfo::CacheInfo> GetCacheSizesFromKVFS() {
std::vector<CPUInfo::CacheInfo> res;
std::string dir = "/sys/devices/system/cpu/cpu0/cache/";
- int Idx = 0;
+ int idx = 0;
while (true) {
CPUInfo::CacheInfo info;
- std::string FPath = StrCat(dir, "index", Idx++, "/");
- std::ifstream f(StrCat(FPath, "size").c_str());
+ std::string fpath = StrCat(dir, "index", idx++, "/");
+ std::ifstream f(StrCat(fpath, "size").c_str());
if (!f.is_open()) break;
std::string suffix;
f >> info.size;
if (f.fail())
- PrintErrorAndDie("Failed while reading file '", FPath, "size'");
+ PrintErrorAndDie("Failed while reading file '", fpath, "size'");
if (f.good()) {
f >> suffix;
if (f.bad())
@@ -284,13 +289,13 @@
else if (suffix == "K")
info.size *= 1024;
}
- if (!ReadFromFile(StrCat(FPath, "type"), &info.type))
- PrintErrorAndDie("Failed to read from file ", FPath, "type");
- if (!ReadFromFile(StrCat(FPath, "level"), &info.level))
- PrintErrorAndDie("Failed to read from file ", FPath, "level");
+ if (!ReadFromFile(StrCat(fpath, "type"), &info.type))
+ PrintErrorAndDie("Failed to read from file ", fpath, "type");
+ if (!ReadFromFile(StrCat(fpath, "level"), &info.level))
+ PrintErrorAndDie("Failed to read from file ", fpath, "level");
std::string map_str;
- if (!ReadFromFile(StrCat(FPath, "shared_cpu_map"), &map_str))
- PrintErrorAndDie("Failed to read from file ", FPath, "shared_cpu_map");
+ if (!ReadFromFile(StrCat(fpath, "shared_cpu_map"), &map_str))
+ PrintErrorAndDie("Failed to read from file ", fpath, "shared_cpu_map");
info.num_sharing = CountSetBitsInCPUMap(map_str);
res.push_back(info);
}
@@ -301,26 +306,26 @@
#ifdef BENCHMARK_OS_MACOSX
std::vector<CPUInfo::CacheInfo> GetCacheSizesMacOSX() {
std::vector<CPUInfo::CacheInfo> res;
- std::array<uint64_t, 4> CacheCounts{{0, 0, 0, 0}};
- GetSysctl("hw.cacheconfig", &CacheCounts);
+ std::array<int, 4> cache_counts{{0, 0, 0, 0}};
+ GetSysctl("hw.cacheconfig", &cache_counts);
struct {
std::string name;
std::string type;
int level;
- uint64_t num_sharing;
- } Cases[] = {{"hw.l1dcachesize", "Data", 1, CacheCounts[1]},
- {"hw.l1icachesize", "Instruction", 1, CacheCounts[1]},
- {"hw.l2cachesize", "Unified", 2, CacheCounts[2]},
- {"hw.l3cachesize", "Unified", 3, CacheCounts[3]}};
- for (auto& C : Cases) {
+ int num_sharing;
+ } cases[] = {{"hw.l1dcachesize", "Data", 1, cache_counts[1]},
+ {"hw.l1icachesize", "Instruction", 1, cache_counts[1]},
+ {"hw.l2cachesize", "Unified", 2, cache_counts[2]},
+ {"hw.l3cachesize", "Unified", 3, cache_counts[3]}};
+ for (auto& c : cases) {
int val;
- if (!GetSysctl(C.name, &val)) continue;
+ if (!GetSysctl(c.name, &val)) continue;
CPUInfo::CacheInfo info;
- info.type = C.type;
- info.level = C.level;
+ info.type = c.type;
+ info.level = c.level;
info.size = val;
- info.num_sharing = static_cast<int>(C.num_sharing);
+ info.num_sharing = c.num_sharing;
res.push_back(std::move(info));
}
return res;
@@ -334,7 +339,7 @@
using UPtr = std::unique_ptr<PInfo, decltype(&std::free)>;
GetLogicalProcessorInformation(nullptr, &buffer_size);
- UPtr buff((PInfo*)malloc(buffer_size), &std::free);
+ UPtr buff(static_cast<PInfo*>(std::malloc(buffer_size)), &std::free);
if (!GetLogicalProcessorInformation(buff.get(), &buffer_size))
PrintErrorAndDie("Failed during call to GetLogicalProcessorInformation: ",
GetLastError());
@@ -345,16 +350,16 @@
for (; it != end; ++it) {
if (it->Relationship != RelationCache) continue;
using BitSet = std::bitset<sizeof(ULONG_PTR) * CHAR_BIT>;
- BitSet B(it->ProcessorMask);
+ BitSet b(it->ProcessorMask);
// To prevent duplicates, only consider caches where CPU 0 is specified
- if (!B.test(0)) continue;
- CInfo* Cache = &it->Cache;
+ if (!b.test(0)) continue;
+ const CInfo& cache = it->Cache;
CPUInfo::CacheInfo C;
- C.num_sharing = static_cast<int>(B.count());
- C.level = Cache->Level;
- C.size = Cache->Size;
+ C.num_sharing = static_cast<int>(b.count());
+ C.level = cache.Level;
+ C.size = cache.Size;
C.type = "Unknown";
- switch (Cache->Type) {
+ switch (cache.Type) {
case CacheUnified:
C.type = "Unified";
break;
@@ -417,6 +422,8 @@
return GetCacheSizesWindows();
#elif defined(BENCHMARK_OS_QNX)
return GetCacheSizesQNX();
+#elif defined(BENCHMARK_OS_QURT)
+ return std::vector<CPUInfo::CacheInfo>();
#else
return GetCacheSizesFromKVFS();
#endif
@@ -425,23 +432,32 @@
std::string GetSystemName() {
#if defined(BENCHMARK_OS_WINDOWS)
std::string str;
- const unsigned COUNT = MAX_COMPUTERNAME_LENGTH + 1;
+ static constexpr int COUNT = MAX_COMPUTERNAME_LENGTH + 1;
TCHAR hostname[COUNT] = {'\0'};
DWORD DWCOUNT = COUNT;
if (!GetComputerName(hostname, &DWCOUNT)) return std::string("");
#ifndef UNICODE
str = std::string(hostname, DWCOUNT);
#else
- // Using wstring_convert, Is deprecated in C++17
- using convert_type = std::codecvt_utf8<wchar_t>;
- std::wstring_convert<convert_type, wchar_t> converter;
- std::wstring wStr(hostname, DWCOUNT);
- str = converter.to_bytes(wStr);
+ // `WideCharToMultiByte` returns `0` when conversion fails.
+ int len = WideCharToMultiByte(CP_UTF8, WC_ERR_INVALID_CHARS, hostname,
+ DWCOUNT, NULL, 0, NULL, NULL);
+ str.resize(len);
+ WideCharToMultiByte(CP_UTF8, WC_ERR_INVALID_CHARS, hostname, DWCOUNT, &str[0],
+ str.size(), NULL, NULL);
#endif
return str;
-#else // defined(BENCHMARK_OS_WINDOWS)
+#elif defined(BENCHMARK_OS_QURT)
+ std::string str = "Hexagon DSP";
+ qurt_arch_version_t arch_version_struct;
+ if (qurt_sysenv_get_arch_version(&arch_version_struct) == QURT_EOK) {
+ str += " v";
+ str += std::to_string(arch_version_struct.arch_version);
+ }
+ return str;
+#else
#ifndef HOST_NAME_MAX
-#ifdef BENCHMARK_HAS_SYSCTL // BSD/Mac Doesnt have HOST_NAME_MAX defined
+#ifdef BENCHMARK_HAS_SYSCTL // BSD/Mac doesn't have HOST_NAME_MAX defined
#define HOST_NAME_MAX 64
#elif defined(BENCHMARK_OS_NACL)
#define HOST_NAME_MAX 64
@@ -449,6 +465,8 @@
#define HOST_NAME_MAX 154
#elif defined(BENCHMARK_OS_RTEMS)
#define HOST_NAME_MAX 256
+#elif defined(BENCHMARK_OS_SOLARIS)
+#define HOST_NAME_MAX MAXHOSTNAMELEN
#elif defined(BENCHMARK_OS_ZOS)
#define HOST_NAME_MAX _POSIX_HOST_NAME_MAX
#else
@@ -463,12 +481,11 @@
#endif // Catch-all POSIX block.
}
-int GetNumCPUs() {
+int GetNumCPUsImpl() {
#ifdef BENCHMARK_HAS_SYSCTL
- int NumCPU = -1;
- if (GetSysctl("hw.ncpu", &NumCPU)) return NumCPU;
- fprintf(stderr, "Err: %s\n", strerror(errno));
- std::exit(EXIT_FAILURE);
+ int num_cpu = -1;
+ if (GetSysctl("hw.ncpu", &num_cpu)) return num_cpu;
+ PrintErrorAndDie("Err: ", strerror(errno));
#elif defined(BENCHMARK_OS_WINDOWS)
SYSTEM_INFO sysinfo;
// Use memset as opposed to = {} to avoid GCC missing initializer false
@@ -480,64 +497,155 @@
// group
#elif defined(BENCHMARK_OS_SOLARIS)
// Returns -1 in case of a failure.
- int NumCPU = sysconf(_SC_NPROCESSORS_ONLN);
- if (NumCPU < 0) {
- fprintf(stderr, "sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n",
- strerror(errno));
+ long num_cpu = sysconf(_SC_NPROCESSORS_ONLN);
+ if (num_cpu < 0) {
+ PrintErrorAndDie("sysconf(_SC_NPROCESSORS_ONLN) failed with error: ",
+ strerror(errno));
}
- return NumCPU;
+ return (int)num_cpu;
#elif defined(BENCHMARK_OS_QNX)
return static_cast<int>(_syspage_ptr->num_cpu);
+#elif defined(BENCHMARK_OS_QURT)
+ qurt_sysenv_max_hthreads_t hardware_threads;
+ if (qurt_sysenv_get_max_hw_threads(&hardware_threads) != QURT_EOK) {
+ hardware_threads.max_hthreads = 1;
+ }
+ return hardware_threads.max_hthreads;
#else
- int NumCPUs = 0;
- int MaxID = -1;
+ int num_cpus = 0;
+ int max_id = -1;
std::ifstream f("/proc/cpuinfo");
if (!f.is_open()) {
- std::cerr << "failed to open /proc/cpuinfo\n";
- return -1;
+ PrintErrorAndDie("Failed to open /proc/cpuinfo");
}
+#if defined(__alpha__)
+ const std::string Key = "cpus detected";
+#else
const std::string Key = "processor";
+#endif
std::string ln;
while (std::getline(f, ln)) {
if (ln.empty()) continue;
- size_t SplitIdx = ln.find(':');
+ std::size_t split_idx = ln.find(':');
std::string value;
#if defined(__s390__)
// s390 has another format in /proc/cpuinfo
// it needs to be parsed differently
- if (SplitIdx != std::string::npos)
- value = ln.substr(Key.size() + 1, SplitIdx - Key.size() - 1);
+ if (split_idx != std::string::npos)
+ value = ln.substr(Key.size() + 1, split_idx - Key.size() - 1);
#else
- if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1);
+ if (split_idx != std::string::npos) value = ln.substr(split_idx + 1);
#endif
if (ln.size() >= Key.size() && ln.compare(0, Key.size(), Key) == 0) {
- NumCPUs++;
+ num_cpus++;
if (!value.empty()) {
- int CurID = benchmark::stoi(value);
- MaxID = std::max(CurID, MaxID);
+ const int cur_id = benchmark::stoi(value);
+ max_id = std::max(cur_id, max_id);
}
}
}
if (f.bad()) {
- std::cerr << "Failure reading /proc/cpuinfo\n";
- return -1;
+ PrintErrorAndDie("Failure reading /proc/cpuinfo");
}
if (!f.eof()) {
- std::cerr << "Failed to read to end of /proc/cpuinfo\n";
- return -1;
+ PrintErrorAndDie("Failed to read to end of /proc/cpuinfo");
}
f.close();
- if ((MaxID + 1) != NumCPUs) {
+ if ((max_id + 1) != num_cpus) {
fprintf(stderr,
"CPU ID assignments in /proc/cpuinfo seem messed up."
" This is usually caused by a bad BIOS.\n");
}
- return NumCPUs;
+ return num_cpus;
#endif
BENCHMARK_UNREACHABLE();
}
+int GetNumCPUs() {
+ const int num_cpus = GetNumCPUsImpl();
+ if (num_cpus < 1) {
+ PrintErrorAndDie(
+ "Unable to extract number of CPUs. If your platform uses "
+ "/proc/cpuinfo, custom support may need to be added.");
+ }
+ return num_cpus;
+}
+
+class ThreadAffinityGuard final {
+ public:
+ ThreadAffinityGuard() : reset_affinity(SetAffinity()) {
+ if (!reset_affinity)
+ std::cerr << "***WARNING*** Failed to set thread affinity. Estimated CPU "
+ "frequency may be incorrect."
+ << std::endl;
+ }
+
+ ~ThreadAffinityGuard() {
+ if (!reset_affinity) return;
+
+#if defined(BENCHMARK_HAS_PTHREAD_AFFINITY)
+ int ret = pthread_setaffinity_np(self, sizeof(previous_affinity),
+ &previous_affinity);
+ if (ret == 0) return;
+#elif defined(BENCHMARK_OS_WINDOWS_WIN32)
+ DWORD_PTR ret = SetThreadAffinityMask(self, previous_affinity);
+ if (ret != 0) return;
+#endif // def BENCHMARK_HAS_PTHREAD_AFFINITY
+ PrintErrorAndDie("Failed to reset thread affinity");
+ }
+
+ ThreadAffinityGuard(ThreadAffinityGuard&&) = delete;
+ ThreadAffinityGuard(const ThreadAffinityGuard&) = delete;
+ ThreadAffinityGuard& operator=(ThreadAffinityGuard&&) = delete;
+ ThreadAffinityGuard& operator=(const ThreadAffinityGuard&) = delete;
+
+ private:
+ bool SetAffinity() {
+#if defined(BENCHMARK_HAS_PTHREAD_AFFINITY)
+ int ret;
+ self = pthread_self();
+ ret = pthread_getaffinity_np(self, sizeof(previous_affinity),
+ &previous_affinity);
+ if (ret != 0) return false;
+
+ cpu_set_t affinity;
+ memcpy(&affinity, &previous_affinity, sizeof(affinity));
+
+ bool is_first_cpu = true;
+
+ for (int i = 0; i < CPU_SETSIZE; ++i)
+ if (CPU_ISSET(i, &affinity)) {
+ if (is_first_cpu)
+ is_first_cpu = false;
+ else
+ CPU_CLR(i, &affinity);
+ }
+
+ if (is_first_cpu) return false;
+
+ ret = pthread_setaffinity_np(self, sizeof(affinity), &affinity);
+ return ret == 0;
+#elif defined(BENCHMARK_OS_WINDOWS_WIN32)
+ self = GetCurrentThread();
+ DWORD_PTR mask = static_cast<DWORD_PTR>(1) << GetCurrentProcessorNumber();
+ previous_affinity = SetThreadAffinityMask(self, mask);
+ return previous_affinity != 0;
+#else
+ return false;
+#endif // def BENCHMARK_HAS_PTHREAD_AFFINITY
+ }
+
+#if defined(BENCHMARK_HAS_PTHREAD_AFFINITY)
+ pthread_t self;
+ cpu_set_t previous_affinity;
+#elif defined(BENCHMARK_OS_WINDOWS_WIN32)
+ HANDLE self;
+ DWORD_PTR previous_affinity;
+#endif // def BENCHMARK_HAS_PTHREAD_AFFINITY
+ bool reset_affinity;
+};
+
double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) {
// Currently, scaling is only used on linux path here,
// suppress diagnostics about it being unused on other paths.
@@ -566,7 +674,7 @@
&freq)) {
// The value is in kHz (as the file name suggests). For example, on a
// 2GHz warpstation, the file contains the value "2000000".
- return freq * 1000.0;
+ return static_cast<double>(freq) * 1000.0;
}
const double error_value = -1;
@@ -578,7 +686,7 @@
return error_value;
}
- auto startsWithKey = [](std::string const& Value, std::string const& Key) {
+ auto StartsWithKey = [](std::string const& Value, std::string const& Key) {
if (Key.size() > Value.size()) return false;
auto Cmp = [&](char X, char Y) {
return std::tolower(X) == std::tolower(Y);
@@ -589,18 +697,18 @@
std::string ln;
while (std::getline(f, ln)) {
if (ln.empty()) continue;
- size_t SplitIdx = ln.find(':');
+ std::size_t split_idx = ln.find(':');
std::string value;
- if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1);
+ if (split_idx != std::string::npos) value = ln.substr(split_idx + 1);
// When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only
// accept positive values. Some environments (virtual machines) report zero,
// which would cause infinite looping in WallTime_Init.
- if (startsWithKey(ln, "cpu MHz")) {
+ if (StartsWithKey(ln, "cpu MHz")) {
if (!value.empty()) {
double cycles_per_second = benchmark::stod(value) * 1000000.0;
if (cycles_per_second > 0) return cycles_per_second;
}
- } else if (startsWithKey(ln, "bogomips")) {
+ } else if (StartsWithKey(ln, "bogomips")) {
if (!value.empty()) {
bogo_clock = benchmark::stod(value) * 1000000.0;
if (bogo_clock < 0.0) bogo_clock = error_value;
@@ -622,7 +730,7 @@
if (bogo_clock >= 0.0) return bogo_clock;
#elif defined BENCHMARK_HAS_SYSCTL
- constexpr auto* FreqStr =
+ constexpr auto* freqStr =
#if defined(BENCHMARK_OS_FREEBSD) || defined(BENCHMARK_OS_NETBSD)
"machdep.tsc_freq";
#elif defined BENCHMARK_OS_OPENBSD
@@ -634,14 +742,17 @@
#endif
unsigned long long hz = 0;
#if defined BENCHMARK_OS_OPENBSD
- if (GetSysctl(FreqStr, &hz)) return hz * 1000000;
+ if (GetSysctl(freqStr, &hz)) return hz * 1000000;
#else
- if (GetSysctl(FreqStr, &hz)) return hz;
+ if (GetSysctl(freqStr, &hz)) return hz;
#endif
fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n",
- FreqStr, strerror(errno));
+ freqStr, strerror(errno));
+ fprintf(stderr,
+ "This does not affect benchmark measurements, only the "
+ "metadata output.\n");
-#elif defined BENCHMARK_OS_WINDOWS
+#elif defined BENCHMARK_OS_WINDOWS_WIN32
// In NT, read MHz from the registry. If we fail to do so or we're in win9x
// then make a crude estimate.
DWORD data, data_size = sizeof(data);
@@ -650,15 +761,16 @@
SHGetValueA(HKEY_LOCAL_MACHINE,
"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
"~MHz", nullptr, &data, &data_size)))
- return static_cast<double>((int64_t)data *
- (int64_t)(1000 * 1000)); // was mhz
+ return static_cast<double>(static_cast<int64_t>(data) *
+ static_cast<int64_t>(1000 * 1000)); // was mhz
#elif defined(BENCHMARK_OS_SOLARIS)
kstat_ctl_t* kc = kstat_open();
if (!kc) {
std::cerr << "failed to open /dev/kstat\n";
return -1;
}
- kstat_t* ksp = kstat_lookup(kc, (char*)"cpu_info", -1, (char*)"cpu_info0");
+ kstat_t* ksp = kstat_lookup(kc, const_cast<char*>("cpu_info"), -1,
+ const_cast<char*>("cpu_info0"));
if (!ksp) {
std::cerr << "failed to lookup in /dev/kstat\n";
return -1;
@@ -667,8 +779,8 @@
std::cerr << "failed to read from /dev/kstat\n";
return -1;
}
- kstat_named_t* knp =
- (kstat_named_t*)kstat_data_lookup(ksp, (char*)"current_clock_Hz");
+ kstat_named_t* knp = (kstat_named_t*)kstat_data_lookup(
+ ksp, const_cast<char*>("current_clock_Hz"));
if (!knp) {
std::cerr << "failed to lookup data in /dev/kstat\n";
return -1;
@@ -682,22 +794,55 @@
kstat_close(kc);
return clock_hz;
#elif defined(BENCHMARK_OS_QNX)
- return static_cast<double>((int64_t)(SYSPAGE_ENTRY(cpuinfo)->speed) *
- (int64_t)(1000 * 1000));
+ return static_cast<double>(
+ static_cast<int64_t>(SYSPAGE_ENTRY(cpuinfo)->speed) *
+ static_cast<int64_t>(1000 * 1000));
+#elif defined(BENCHMARK_OS_QURT)
+ // QuRT doesn't provide any API to query Hexagon frequency.
+ return 1000000000;
#endif
// If we've fallen through, attempt to roughly estimate the CPU clock rate.
- const int estimate_time_ms = 1000;
+
+ // Make sure to use the same cycle counter when starting and stopping the
+ // cycle timer. We just pin the current thread to a cpu in the previous
+ // affinity set.
+ ThreadAffinityGuard affinity_guard;
+
+ static constexpr double estimate_time_s = 1.0;
+ const double start_time = ChronoClockNow();
const auto start_ticks = cycleclock::Now();
- SleepForMilliseconds(estimate_time_ms);
- return static_cast<double>(cycleclock::Now() - start_ticks);
+
+ // Impose load instead of calling sleep() to make sure the cycle counter
+ // works.
+ using PRNG = std::minstd_rand;
+ using Result = PRNG::result_type;
+ PRNG rng(static_cast<Result>(start_ticks));
+
+ Result state = 0;
+
+ do {
+ static constexpr size_t batch_size = 10000;
+ rng.discard(batch_size);
+ state += rng();
+
+ } while (ChronoClockNow() - start_time < estimate_time_s);
+
+ DoNotOptimize(state);
+
+ const auto end_ticks = cycleclock::Now();
+ const double end_time = ChronoClockNow();
+
+ return static_cast<double>(end_ticks - start_ticks) / (end_time - start_time);
+ // Reset the affinity of current thread when the lifetime of affinity_guard
+ // ends.
}
std::vector<double> GetLoadAvg() {
#if (defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || \
defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \
defined BENCHMARK_OS_OPENBSD || defined BENCHMARK_OS_DRAGONFLY) && \
- !defined(__ANDROID__)
- constexpr int kMaxSamples = 3;
+ !(defined(__ANDROID__) && __ANDROID_API__ < 29)
+ static constexpr int kMaxSamples = 3;
std::vector<double> res(kMaxSamples, 0.0);
const int nelem = getloadavg(res.data(), kMaxSamples);
if (nelem < 1) {
diff --git a/third-party/benchmark/src/thread_manager.h b/third-party/benchmark/src/thread_manager.h
index 4680285..819b3c4 100644
--- a/third-party/benchmark/src/thread_manager.h
+++ b/third-party/benchmark/src/thread_manager.h
@@ -43,8 +43,8 @@
double manual_time_used = 0;
int64_t complexity_n = 0;
std::string report_label_;
- std::string error_message_;
- bool has_error_ = false;
+ std::string skip_message_;
+ internal::Skipped skipped_ = internal::NotSkipped;
UserCounters counters;
};
GUARDED_BY(GetBenchmarkMutex()) Result results;
diff --git a/third-party/benchmark/src/timers.cc b/third-party/benchmark/src/timers.cc
index ed35c01..667e7b2 100644
--- a/third-party/benchmark/src/timers.cc
+++ b/third-party/benchmark/src/timers.cc
@@ -23,7 +23,7 @@
#include <windows.h>
#else
#include <fcntl.h>
-#ifndef BENCHMARK_OS_FUCHSIA
+#if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT)
#include <sys/resource.h>
#endif
#include <sys/time.h>
@@ -38,6 +38,9 @@
#include <mach/mach_port.h>
#include <mach/thread_act.h>
#endif
+#if defined(BENCHMARK_OS_QURT)
+#include <qurt.h>
+#endif
#endif
#ifdef BENCHMARK_OS_EMSCRIPTEN
@@ -56,7 +59,6 @@
#include "check.h"
#include "log.h"
-#include "sleep.h"
#include "string_util.h"
namespace benchmark {
@@ -65,6 +67,9 @@
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
+#if defined(__NVCOMPILER)
+#pragma diag_suppress declared_but_not_referenced
+#endif
namespace {
#if defined(BENCHMARK_OS_WINDOWS)
@@ -79,7 +84,7 @@
static_cast<double>(user.QuadPart)) *
1e-7;
}
-#elif !defined(BENCHMARK_OS_FUCHSIA)
+#elif !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT)
double MakeTime(struct rusage const& ru) {
return (static_cast<double>(ru.ru_utime.tv_sec) +
static_cast<double>(ru.ru_utime.tv_usec) * 1e-6 +
@@ -97,7 +102,8 @@
#endif
#if defined(CLOCK_PROCESS_CPUTIME_ID) || defined(CLOCK_THREAD_CPUTIME_ID)
double MakeTime(struct timespec const& ts) {
- return ts.tv_sec + (static_cast<double>(ts.tv_nsec) * 1e-9);
+ return static_cast<double>(ts.tv_sec) +
+ (static_cast<double>(ts.tv_nsec) * 1e-9);
}
#endif
@@ -119,11 +125,15 @@
&user_time))
return MakeTime(kernel_time, user_time);
DiagnoseAndExit("GetProccessTimes() failed");
+#elif defined(BENCHMARK_OS_QURT)
+ return static_cast<double>(
+ qurt_timer_timetick_to_us(qurt_timer_get_ticks())) *
+ 1.0e-6;
#elif defined(BENCHMARK_OS_EMSCRIPTEN)
// clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) returns 0 on Emscripten.
// Use Emscripten-specific API. Reported CPU time would be exactly the
// same as total time, but this is ok because there aren't long-latency
- // syncronous system calls in Emscripten.
+ // synchronous system calls in Emscripten.
return emscripten_get_now() * 1e-3;
#elif defined(CLOCK_PROCESS_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX)
// FIXME We want to use clock_gettime, but its not available in MacOS 10.11.
@@ -149,6 +159,10 @@
GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time,
&user_time);
return MakeTime(kernel_time, user_time);
+#elif defined(BENCHMARK_OS_QURT)
+ return static_cast<double>(
+ qurt_timer_timetick_to_us(qurt_timer_get_ticks())) *
+ 1.0e-6;
#elif defined(BENCHMARK_OS_MACOSX)
// FIXME We want to use clock_gettime, but its not available in MacOS 10.11.
// See https://github.com/google/benchmark/pull/292