1 // Copyright 2015 Google Inc. All rights reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "benchmark/benchmark.h"
16
17 #include "benchmark_api_internal.h"
18 #include "benchmark_runner.h"
19 #include "internal_macros.h"
20
21 #ifndef BENCHMARK_OS_WINDOWS
22 #ifndef BENCHMARK_OS_FUCHSIA
23 #include <sys/resource.h>
24 #endif
25 #include <sys/time.h>
26 #include <unistd.h>
27 #endif
28
29 #include <algorithm>
30 #include <atomic>
31 #include <condition_variable>
32 #include <cstdio>
33 #include <cstdlib>
34 #include <fstream>
35 #include <iostream>
36 #include <limits>
37 #include <map>
38 #include <memory>
39 #include <random>
40 #include <string>
41 #include <thread>
42 #include <utility>
43
44 #include "check.h"
45 #include "colorprint.h"
46 #include "commandlineflags.h"
47 #include "complexity.h"
48 #include "counter.h"
49 #include "internal_macros.h"
50 #include "log.h"
51 #include "mutex.h"
52 #include "perf_counters.h"
53 #include "re.h"
54 #include "statistics.h"
55 #include "string_util.h"
56 #include "thread_manager.h"
57 #include "thread_timer.h"
58
59 // Print a list of benchmarks. This option overrides all other options.
60 DEFINE_bool(benchmark_list_tests, false);
61
62 // A regular expression that specifies the set of benchmarks to execute. If
63 // this flag is empty, or if this flag is the string \"all\", all benchmarks
64 // linked into the binary are run.
65 DEFINE_string(benchmark_filter, ".");
66
67 // Minimum number of seconds we should run benchmark before results are
68 // considered significant. For cpu-time based tests, this is the lower bound
69 // on the total cpu time used by all threads that make up the test. For
70 // real-time based tests, this is the lower bound on the elapsed time of the
71 // benchmark execution, regardless of number of threads.
72 DEFINE_double(benchmark_min_time, 0.5);
73
74 // The number of runs of each benchmark. If greater than 1, the mean and
75 // standard deviation of the runs will be reported.
76 DEFINE_int32(benchmark_repetitions, 1);
77
78 // If set, enable random interleaving of repetitions of all benchmarks.
79 // See http://github.com/google/benchmark/issues/1051 for details.
80 DEFINE_bool(benchmark_enable_random_interleaving, false);
81
82 // Report the result of each benchmark repetitions. When 'true' is specified
83 // only the mean, standard deviation, and other statistics are reported for
84 // repeated benchmarks. Affects all reporters.
85 DEFINE_bool(benchmark_report_aggregates_only, false);
86
87 // Display the result of each benchmark repetitions. When 'true' is specified
88 // only the mean, standard deviation, and other statistics are displayed for
89 // repeated benchmarks. Unlike benchmark_report_aggregates_only, only affects
90 // the display reporter, but *NOT* file reporter, which will still contain
91 // all the output.
92 DEFINE_bool(benchmark_display_aggregates_only, false);
93
94 // The format to use for console output.
95 // Valid values are 'console', 'json', or 'csv'.
96 DEFINE_string(benchmark_format, "console");
97
98 // The format to use for file output.
99 // Valid values are 'console', 'json', or 'csv'.
100 DEFINE_string(benchmark_out_format, "json");
101
102 // The file to write additional output to.
103 DEFINE_string(benchmark_out, "");
104
105 // Whether to use colors in the output. Valid values:
106 // 'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use colors if
107 // the output is being sent to a terminal and the TERM environment variable is
108 // set to a terminal type that supports colors.
109 DEFINE_string(benchmark_color, "auto");
110
111 // Whether to use tabular format when printing user counters to the console.
112 // Valid values: 'true'/'yes'/1, 'false'/'no'/0. Defaults to false.
113 DEFINE_bool(benchmark_counters_tabular, false);
114
115 // The level of verbose logging to output
116 DEFINE_int32(v, 0);
117
118 // List of additional perf counters to collect, in libpfm format. For more
119 // information about libpfm: https://man7.org/linux/man-pages/man3/libpfm.3.html
120 DEFINE_string(benchmark_perf_counters, "");
121
122 namespace benchmark {
123 namespace internal {
124
125 // Extra context to include in the output formatted as comma-separated key-value
126 // pairs. Kept internal as it's only used for parsing from env/command line.
127 DEFINE_kvpairs(benchmark_context, {});
128
129 std::map<std::string, std::string>* global_context = nullptr;
130
131 // FIXME: wouldn't LTO mess this up?
UseCharPointer(char const volatile *)132 void UseCharPointer(char const volatile*) {}
133
134 } // namespace internal
135
State(IterationCount max_iters,const std::vector<int64_t> & ranges,int thread_i,int n_threads,internal::ThreadTimer * timer,internal::ThreadManager * manager,internal::PerfCountersMeasurement * perf_counters_measurement)136 State::State(IterationCount max_iters, const std::vector<int64_t>& ranges,
137 int thread_i, int n_threads, internal::ThreadTimer* timer,
138 internal::ThreadManager* manager,
139 internal::PerfCountersMeasurement* perf_counters_measurement)
140 : total_iterations_(0),
141 batch_leftover_(0),
142 max_iterations(max_iters),
143 started_(false),
144 finished_(false),
145 error_occurred_(false),
146 range_(ranges),
147 complexity_n_(0),
148 counters(),
149 thread_index(thread_i),
150 threads(n_threads),
151 timer_(timer),
152 manager_(manager),
153 perf_counters_measurement_(perf_counters_measurement) {
154 CHECK(max_iterations != 0) << "At least one iteration must be run";
155 CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
156
157 // Note: The use of offsetof below is technically undefined until C++17
158 // because State is not a standard layout type. However, all compilers
159 // currently provide well-defined behavior as an extension (which is
160 // demonstrated since constexpr evaluation must diagnose all undefined
161 // behavior). However, GCC and Clang also warn about this use of offsetof,
162 // which must be suppressed.
163 #if defined(__INTEL_COMPILER)
164 #pragma warning push
165 #pragma warning(disable : 1875)
166 #elif defined(__GNUC__)
167 #pragma GCC diagnostic push
168 #pragma GCC diagnostic ignored "-Winvalid-offsetof"
169 #endif
170 // Offset tests to ensure commonly accessed data is on the first cache line.
171 const int cache_line_size = 64;
172 static_assert(offsetof(State, error_occurred_) <=
173 (cache_line_size - sizeof(error_occurred_)),
174 "");
175 #if defined(__INTEL_COMPILER)
176 #pragma warning pop
177 #elif defined(__GNUC__)
178 #pragma GCC diagnostic pop
179 #endif
180 }
181
PauseTiming()182 void State::PauseTiming() {
183 // Add in time accumulated so far
184 CHECK(started_ && !finished_ && !error_occurred_);
185 timer_->StopTimer();
186 if (perf_counters_measurement_) {
187 auto measurements = perf_counters_measurement_->StopAndGetMeasurements();
188 for (const auto& name_and_measurement : measurements) {
189 auto name = name_and_measurement.first;
190 auto measurement = name_and_measurement.second;
191 CHECK_EQ(counters[name], 0.0);
192 counters[name] = Counter(measurement, Counter::kAvgIterations);
193 }
194 }
195 }
196
ResumeTiming()197 void State::ResumeTiming() {
198 CHECK(started_ && !finished_ && !error_occurred_);
199 timer_->StartTimer();
200 if (perf_counters_measurement_) {
201 perf_counters_measurement_->Start();
202 }
203 }
204
SkipWithError(const char * msg)205 void State::SkipWithError(const char* msg) {
206 CHECK(msg);
207 error_occurred_ = true;
208 {
209 MutexLock l(manager_->GetBenchmarkMutex());
210 if (manager_->results.has_error_ == false) {
211 manager_->results.error_message_ = msg;
212 manager_->results.has_error_ = true;
213 }
214 }
215 total_iterations_ = 0;
216 if (timer_->running()) timer_->StopTimer();
217 }
218
SetIterationTime(double seconds)219 void State::SetIterationTime(double seconds) {
220 timer_->SetIterationTime(seconds);
221 }
222
SetLabel(const char * label)223 void State::SetLabel(const char* label) {
224 MutexLock l(manager_->GetBenchmarkMutex());
225 manager_->results.report_label_ = label;
226 }
227
StartKeepRunning()228 void State::StartKeepRunning() {
229 CHECK(!started_ && !finished_);
230 started_ = true;
231 total_iterations_ = error_occurred_ ? 0 : max_iterations;
232 manager_->StartStopBarrier();
233 if (!error_occurred_) ResumeTiming();
234 }
235
FinishKeepRunning()236 void State::FinishKeepRunning() {
237 CHECK(started_ && (!finished_ || error_occurred_));
238 if (!error_occurred_) {
239 PauseTiming();
240 }
241 // Total iterations has now wrapped around past 0. Fix this.
242 total_iterations_ = 0;
243 finished_ = true;
244 manager_->StartStopBarrier();
245 }
246
247 namespace internal {
248 namespace {
249
250 // Flushes streams after invoking reporter methods that write to them. This
251 // ensures users get timely updates even when streams are not line-buffered.
FlushStreams(BenchmarkReporter * reporter)252 void FlushStreams(BenchmarkReporter* reporter) {
253 if (!reporter) return;
254 std::flush(reporter->GetOutputStream());
255 std::flush(reporter->GetErrorStream());
256 }
257
258 // Reports in both display and file reporters.
Report(BenchmarkReporter * display_reporter,BenchmarkReporter * file_reporter,const RunResults & run_results)259 void Report(BenchmarkReporter* display_reporter,
260 BenchmarkReporter* file_reporter, const RunResults& run_results) {
261 auto report_one = [](BenchmarkReporter* reporter, bool aggregates_only,
262 const RunResults& results) {
263 assert(reporter);
264 // If there are no aggregates, do output non-aggregates.
265 aggregates_only &= !results.aggregates_only.empty();
266 if (!aggregates_only) reporter->ReportRuns(results.non_aggregates);
267 if (!results.aggregates_only.empty())
268 reporter->ReportRuns(results.aggregates_only);
269 };
270
271 report_one(display_reporter, run_results.display_report_aggregates_only,
272 run_results);
273 if (file_reporter)
274 report_one(file_reporter, run_results.file_report_aggregates_only,
275 run_results);
276
277 FlushStreams(display_reporter);
278 FlushStreams(file_reporter);
279 }
280
RunBenchmarks(const std::vector<BenchmarkInstance> & benchmarks,BenchmarkReporter * display_reporter,BenchmarkReporter * file_reporter)281 void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
282 BenchmarkReporter* display_reporter,
283 BenchmarkReporter* file_reporter) {
284 // Note the file_reporter can be null.
285 CHECK(display_reporter != nullptr);
286
287 // Determine the width of the name field using a minimum width of 10.
288 bool might_have_aggregates = FLAGS_benchmark_repetitions > 1;
289 size_t name_field_width = 10;
290 size_t stat_field_width = 0;
291 for (const BenchmarkInstance& benchmark : benchmarks) {
292 name_field_width =
293 std::max<size_t>(name_field_width, benchmark.name().str().size());
294 might_have_aggregates |= benchmark.repetitions() > 1;
295
296 for (const auto& Stat : benchmark.statistics())
297 stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size());
298 }
299 if (might_have_aggregates) name_field_width += 1 + stat_field_width;
300
301 // Print header here
302 BenchmarkReporter::Context context;
303 context.name_field_width = name_field_width;
304
305 // Keep track of running times of all instances of each benchmark family.
306 std::map<int /*family_index*/, BenchmarkReporter::PerFamilyRunReports>
307 per_family_reports;
308
309 if (display_reporter->ReportContext(context) &&
310 (!file_reporter || file_reporter->ReportContext(context))) {
311 FlushStreams(display_reporter);
312 FlushStreams(file_reporter);
313
314 size_t num_repetitions_total = 0;
315
316 std::vector<internal::BenchmarkRunner> runners;
317 runners.reserve(benchmarks.size());
318 for (const BenchmarkInstance& benchmark : benchmarks) {
319 BenchmarkReporter::PerFamilyRunReports* reports_for_family = nullptr;
320 if (benchmark.complexity() != oNone)
321 reports_for_family = &per_family_reports[benchmark.family_index()];
322
323 runners.emplace_back(benchmark, reports_for_family);
324 int num_repeats_of_this_instance = runners.back().GetNumRepeats();
325 num_repetitions_total += num_repeats_of_this_instance;
326 if (reports_for_family)
327 reports_for_family->num_runs_total += num_repeats_of_this_instance;
328 }
329 assert(runners.size() == benchmarks.size() && "Unexpected runner count.");
330
331 std::vector<int> repetition_indices;
332 repetition_indices.reserve(num_repetitions_total);
333 for (size_t runner_index = 0, num_runners = runners.size();
334 runner_index != num_runners; ++runner_index) {
335 const internal::BenchmarkRunner& runner = runners[runner_index];
336 std::fill_n(std::back_inserter(repetition_indices),
337 runner.GetNumRepeats(), runner_index);
338 }
339 assert(repetition_indices.size() == num_repetitions_total &&
340 "Unexpected number of repetition indexes.");
341
342 if (FLAGS_benchmark_enable_random_interleaving) {
343 std::random_device rd;
344 std::mt19937 g(rd());
345 std::shuffle(repetition_indices.begin(), repetition_indices.end(), g);
346 }
347
348 for (size_t repetition_index : repetition_indices) {
349 internal::BenchmarkRunner& runner = runners[repetition_index];
350 runner.DoOneRepetition();
351 if (runner.HasRepeatsRemaining()) continue;
352 // FIXME: report each repetition separately, not all of them in bulk.
353
354 RunResults run_results = runner.GetResults();
355
356 // Maybe calculate complexity report
357 if (const auto* reports_for_family = runner.GetReportsForFamily()) {
358 if (reports_for_family->num_runs_done ==
359 reports_for_family->num_runs_total) {
360 auto additional_run_stats = ComputeBigO(reports_for_family->Runs);
361 run_results.aggregates_only.insert(run_results.aggregates_only.end(),
362 additional_run_stats.begin(),
363 additional_run_stats.end());
364 per_family_reports.erase(
365 (int)reports_for_family->Runs.front().family_index);
366 }
367 }
368
369 Report(display_reporter, file_reporter, run_results);
370 }
371 }
372 display_reporter->Finalize();
373 if (file_reporter) file_reporter->Finalize();
374 FlushStreams(display_reporter);
375 FlushStreams(file_reporter);
376 }
377
378 // Disable deprecated warnings temporarily because we need to reference
379 // CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations
380 #ifdef __GNUC__
381 #pragma GCC diagnostic push
382 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
383 #endif
384
CreateReporter(std::string const & name,ConsoleReporter::OutputOptions output_opts)385 std::unique_ptr<BenchmarkReporter> CreateReporter(
386 std::string const& name, ConsoleReporter::OutputOptions output_opts) {
387 typedef std::unique_ptr<BenchmarkReporter> PtrType;
388 if (name == "console") {
389 return PtrType(new ConsoleReporter(output_opts));
390 } else if (name == "json") {
391 return PtrType(new JSONReporter);
392 } else if (name == "csv") {
393 return PtrType(new CSVReporter);
394 } else {
395 std::cerr << "Unexpected format: '" << name << "'\n";
396 std::exit(1);
397 }
398 }
399
400 #ifdef __GNUC__
401 #pragma GCC diagnostic pop
402 #endif
403
404 } // end namespace
405
IsZero(double n)406 bool IsZero(double n) {
407 return std::abs(n) < std::numeric_limits<double>::epsilon();
408 }
409
GetOutputOptions(bool force_no_color)410 ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) {
411 int output_opts = ConsoleReporter::OO_Defaults;
412 auto is_benchmark_color = [force_no_color]() -> bool {
413 if (force_no_color) {
414 return false;
415 }
416 if (FLAGS_benchmark_color == "auto") {
417 return IsColorTerminal();
418 }
419 return IsTruthyFlagValue(FLAGS_benchmark_color);
420 };
421 if (is_benchmark_color()) {
422 output_opts |= ConsoleReporter::OO_Color;
423 } else {
424 output_opts &= ~ConsoleReporter::OO_Color;
425 }
426 if (FLAGS_benchmark_counters_tabular) {
427 output_opts |= ConsoleReporter::OO_Tabular;
428 } else {
429 output_opts &= ~ConsoleReporter::OO_Tabular;
430 }
431 return static_cast<ConsoleReporter::OutputOptions>(output_opts);
432 }
433
434 } // end namespace internal
435
RunSpecifiedBenchmarks()436 size_t RunSpecifiedBenchmarks() {
437 return RunSpecifiedBenchmarks(nullptr, nullptr);
438 }
439
RunSpecifiedBenchmarks(BenchmarkReporter * display_reporter)440 size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter) {
441 return RunSpecifiedBenchmarks(display_reporter, nullptr);
442 }
443
RunSpecifiedBenchmarks(BenchmarkReporter * display_reporter,BenchmarkReporter * file_reporter)444 size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
445 BenchmarkReporter* file_reporter) {
446 std::string spec = FLAGS_benchmark_filter;
447 if (spec.empty() || spec == "all")
448 spec = "."; // Regexp that matches all benchmarks
449
450 // Setup the reporters
451 std::ofstream output_file;
452 std::unique_ptr<BenchmarkReporter> default_display_reporter;
453 std::unique_ptr<BenchmarkReporter> default_file_reporter;
454 if (!display_reporter) {
455 default_display_reporter = internal::CreateReporter(
456 FLAGS_benchmark_format, internal::GetOutputOptions());
457 display_reporter = default_display_reporter.get();
458 }
459 auto& Out = display_reporter->GetOutputStream();
460 auto& Err = display_reporter->GetErrorStream();
461
462 std::string const& fname = FLAGS_benchmark_out;
463 if (fname.empty() && file_reporter) {
464 Err << "A custom file reporter was provided but "
465 "--benchmark_out=<file> was not specified."
466 << std::endl;
467 std::exit(1);
468 }
469 if (!fname.empty()) {
470 output_file.open(fname);
471 if (!output_file.is_open()) {
472 Err << "invalid file name: '" << fname << "'" << std::endl;
473 std::exit(1);
474 }
475 if (!file_reporter) {
476 default_file_reporter = internal::CreateReporter(
477 FLAGS_benchmark_out_format, ConsoleReporter::OO_None);
478 file_reporter = default_file_reporter.get();
479 }
480 file_reporter->SetOutputStream(&output_file);
481 file_reporter->SetErrorStream(&output_file);
482 }
483
484 std::vector<internal::BenchmarkInstance> benchmarks;
485 if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0;
486
487 if (benchmarks.empty()) {
488 Err << "Failed to match any benchmarks against regex: " << spec << "\n";
489 return 0;
490 }
491
492 if (FLAGS_benchmark_list_tests) {
493 for (auto const& benchmark : benchmarks)
494 Out << benchmark.name().str() << "\n";
495 } else {
496 internal::RunBenchmarks(benchmarks, display_reporter, file_reporter);
497 }
498
499 return benchmarks.size();
500 }
501
RegisterMemoryManager(MemoryManager * manager)502 void RegisterMemoryManager(MemoryManager* manager) {
503 internal::memory_manager = manager;
504 }
505
AddCustomContext(const std::string & key,const std::string & value)506 void AddCustomContext(const std::string& key, const std::string& value) {
507 if (internal::global_context == nullptr) {
508 internal::global_context = new std::map<std::string, std::string>();
509 }
510 if (!internal::global_context->emplace(key, value).second) {
511 std::cerr << "Failed to add custom context \"" << key << "\" as it already "
512 << "exists with value \"" << value << "\"\n";
513 }
514 }
515
516 namespace internal {
517
PrintUsageAndExit()518 void PrintUsageAndExit() {
519 fprintf(stdout,
520 "benchmark"
521 " [--benchmark_list_tests={true|false}]\n"
522 " [--benchmark_filter=<regex>]\n"
523 " [--benchmark_min_time=<min_time>]\n"
524 " [--benchmark_repetitions=<num_repetitions>]\n"
525 " [--benchmark_enable_random_interleaving={true|false}]\n"
526 " [--benchmark_report_aggregates_only={true|false}]\n"
527 " [--benchmark_display_aggregates_only={true|false}]\n"
528 " [--benchmark_format=<console|json|csv>]\n"
529 " [--benchmark_out=<filename>]\n"
530 " [--benchmark_out_format=<json|console|csv>]\n"
531 " [--benchmark_color={auto|true|false}]\n"
532 " [--benchmark_counters_tabular={true|false}]\n"
533 " [--benchmark_context=<key>=<value>,...]\n"
534 " [--v=<verbosity>]\n");
535 exit(0);
536 }
537
ParseCommandLineFlags(int * argc,char ** argv)538 void ParseCommandLineFlags(int* argc, char** argv) {
539 using namespace benchmark;
540 BenchmarkReporter::Context::executable_name =
541 (argc && *argc > 0) ? argv[0] : "unknown";
542 for (int i = 1; argc && i < *argc; ++i) {
543 if (ParseBoolFlag(argv[i], "benchmark_list_tests",
544 &FLAGS_benchmark_list_tests) ||
545 ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||
546 ParseDoubleFlag(argv[i], "benchmark_min_time",
547 &FLAGS_benchmark_min_time) ||
548 ParseInt32Flag(argv[i], "benchmark_repetitions",
549 &FLAGS_benchmark_repetitions) ||
550 ParseBoolFlag(argv[i], "benchmark_enable_random_interleaving",
551 &FLAGS_benchmark_enable_random_interleaving) ||
552 ParseBoolFlag(argv[i], "benchmark_report_aggregates_only",
553 &FLAGS_benchmark_report_aggregates_only) ||
554 ParseBoolFlag(argv[i], "benchmark_display_aggregates_only",
555 &FLAGS_benchmark_display_aggregates_only) ||
556 ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
557 ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
558 ParseStringFlag(argv[i], "benchmark_out_format",
559 &FLAGS_benchmark_out_format) ||
560 ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) ||
561 // "color_print" is the deprecated name for "benchmark_color".
562 // TODO: Remove this.
563 ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) ||
564 ParseBoolFlag(argv[i], "benchmark_counters_tabular",
565 &FLAGS_benchmark_counters_tabular) ||
566 ParseStringFlag(argv[i], "benchmark_perf_counters",
567 &FLAGS_benchmark_perf_counters) ||
568 ParseKeyValueFlag(argv[i], "benchmark_context",
569 &FLAGS_benchmark_context) ||
570 ParseInt32Flag(argv[i], "v", &FLAGS_v)) {
571 for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1];
572
573 --(*argc);
574 --i;
575 } else if (IsFlag(argv[i], "help")) {
576 PrintUsageAndExit();
577 }
578 }
579 for (auto const* flag :
580 {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format}) {
581 if (*flag != "console" && *flag != "json" && *flag != "csv") {
582 PrintUsageAndExit();
583 }
584 }
585 if (FLAGS_benchmark_color.empty()) {
586 PrintUsageAndExit();
587 }
588 for (const auto& kv : FLAGS_benchmark_context) {
589 AddCustomContext(kv.first, kv.second);
590 }
591 }
592
InitializeStreams()593 int InitializeStreams() {
594 static std::ios_base::Init init;
595 return 0;
596 }
597
598 } // end namespace internal
599
Initialize(int * argc,char ** argv)600 void Initialize(int* argc, char** argv) {
601 internal::ParseCommandLineFlags(argc, argv);
602 internal::LogLevel() = FLAGS_v;
603 }
604
Shutdown()605 void Shutdown() {
606 delete internal::global_context;
607 }
608
ReportUnrecognizedArguments(int argc,char ** argv)609 bool ReportUnrecognizedArguments(int argc, char** argv) {
610 for (int i = 1; i < argc; ++i) {
611 fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0],
612 argv[i]);
613 }
614 return argc > 1;
615 }
616
617 } // end namespace benchmark
618