|
@@ -164,7 +164,6 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
|
|
|
#ifndef BENCHMARK_BENCHMARK_H_
|
|
|
#define BENCHMARK_BENCHMARK_H_
|
|
|
|
|
|
-
|
|
|
// The _MSVC_LANG check should detect Visual Studio 2015 Update 3 and newer.
|
|
|
#if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L)
|
|
|
#define BENCHMARK_HAS_CXX11
|
|
@@ -172,22 +171,23 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
|
|
+#include <algorithm>
|
|
|
#include <cassert>
|
|
|
#include <cstddef>
|
|
|
#include <iosfwd>
|
|
|
-#include <string>
|
|
|
-#include <vector>
|
|
|
#include <map>
|
|
|
#include <set>
|
|
|
+#include <string>
|
|
|
+#include <vector>
|
|
|
|
|
|
#if defined(BENCHMARK_HAS_CXX11)
|
|
|
-#include <type_traits>
|
|
|
#include <initializer_list>
|
|
|
+#include <type_traits>
|
|
|
#include <utility>
|
|
|
#endif
|
|
|
|
|
|
#if defined(_MSC_VER)
|
|
|
-#include <intrin.h> // for _ReadWriteBarrier
|
|
|
+#include <intrin.h> // for _ReadWriteBarrier
|
|
|
#endif
|
|
|
|
|
|
#ifndef BENCHMARK_HAS_CXX11
|
|
@@ -226,13 +226,15 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
|
|
|
#define BENCHMARK_INTERNAL_TOSTRING2(x) #x
|
|
|
#define BENCHMARK_INTERNAL_TOSTRING(x) BENCHMARK_INTERNAL_TOSTRING2(x)
|
|
|
|
|
|
-#if defined(__GNUC__)
|
|
|
+#if defined(__GNUC__) || defined(__clang__)
|
|
|
#define BENCHMARK_BUILTIN_EXPECT(x, y) __builtin_expect(x, y)
|
|
|
#define BENCHMARK_DEPRECATED_MSG(msg) __attribute__((deprecated(msg)))
|
|
|
#else
|
|
|
#define BENCHMARK_BUILTIN_EXPECT(x, y) x
|
|
|
#define BENCHMARK_DEPRECATED_MSG(msg)
|
|
|
-#define BENCHMARK_WARNING_MSG(msg) __pragma(message(__FILE__ "(" BENCHMARK_INTERNAL_TOSTRING(__LINE__) ") : warning note: " msg))
|
|
|
+#define BENCHMARK_WARNING_MSG(msg) \
|
|
|
+ __pragma(message(__FILE__ "(" BENCHMARK_INTERNAL_TOSTRING( \
|
|
|
+ __LINE__) ") : warning note: " msg))
|
|
|
#endif
|
|
|
|
|
|
#if defined(__GNUC__) && !defined(__clang__)
|
|
@@ -289,13 +291,11 @@ BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams();
|
|
|
|
|
|
} // namespace internal
|
|
|
|
|
|
-
|
|
|
#if (!defined(__GNUC__) && !defined(__clang__)) || defined(__pnacl__) || \
|
|
|
- defined(EMSCRIPTN)
|
|
|
-# define BENCHMARK_HAS_NO_INLINE_ASSEMBLY
|
|
|
+ defined(__EMSCRIPTEN__)
|
|
|
+#define BENCHMARK_HAS_NO_INLINE_ASSEMBLY
|
|
|
#endif
|
|
|
|
|
|
-
|
|
|
// The DoNotOptimize(...) function can be used to prevent a value or
|
|
|
// expression from being optimized away by the compiler. This function is
|
|
|
// intended to add little to no overhead.
|
|
@@ -303,14 +303,18 @@ BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams();
|
|
|
#ifndef BENCHMARK_HAS_NO_INLINE_ASSEMBLY
|
|
|
template <class Tp>
|
|
|
inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
|
|
|
- // Clang doesn't like the 'X' constraint on `value` and certain GCC versions
|
|
|
- // don't like the 'g' constraint. Attempt to placate them both.
|
|
|
+ asm volatile("" : : "r,m"(value) : "memory");
|
|
|
+}
|
|
|
+
|
|
|
+template <class Tp>
|
|
|
+inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) {
|
|
|
#if defined(__clang__)
|
|
|
- asm volatile("" : : "g"(value) : "memory");
|
|
|
+ asm volatile("" : "+r,m"(value) : : "memory");
|
|
|
#else
|
|
|
- asm volatile("" : : "i,r,m"(value) : "memory");
|
|
|
+ asm volatile("" : "+m,r"(value) : : "memory");
|
|
|
#endif
|
|
|
}
|
|
|
+
|
|
|
// Force the compiler to flush pending writes to global memory. Acts as an
|
|
|
// effective read/write barrier
|
|
|
inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() {
|
|
@@ -323,9 +327,7 @@ inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
|
|
|
_ReadWriteBarrier();
|
|
|
}
|
|
|
|
|
|
-inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() {
|
|
|
- _ReadWriteBarrier();
|
|
|
-}
|
|
|
+inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { _ReadWriteBarrier(); }
|
|
|
#else
|
|
|
template <class Tp>
|
|
|
inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
|
|
@@ -334,39 +336,54 @@ inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
|
|
|
// FIXME Add ClobberMemory() for non-gnu and non-msvc compilers
|
|
|
#endif
|
|
|
|
|
|
-
|
|
|
-
|
|
|
// This class is used for user-defined counters.
|
|
|
class Counter {
|
|
|
-public:
|
|
|
-
|
|
|
+ public:
|
|
|
enum Flags {
|
|
|
- kDefaults = 0,
|
|
|
+ kDefaults = 0,
|
|
|
// Mark the counter as a rate. It will be presented divided
|
|
|
// by the duration of the benchmark.
|
|
|
- kIsRate = 1,
|
|
|
+ kIsRate = 1U << 0U,
|
|
|
// Mark the counter as a thread-average quantity. It will be
|
|
|
// presented divided by the number of threads.
|
|
|
- kAvgThreads = 2,
|
|
|
+ kAvgThreads = 1U << 1U,
|
|
|
// Mark the counter as a thread-average rate. See above.
|
|
|
- kAvgThreadsRate = kIsRate|kAvgThreads
|
|
|
+ kAvgThreadsRate = kIsRate | kAvgThreads,
|
|
|
+ // Mark the counter as a constant value, valid/same for *every* iteration.
|
|
|
+ // When reporting, it will be *multiplied* by the iteration count.
|
|
|
+ kIsIterationInvariant = 1U << 2U,
|
|
|
+ // Mark the counter as a constant rate.
|
|
|
+ // When reporting, it will be *multiplied* by the iteration count
|
|
|
+ // and then divided by the duration of the benchmark.
|
|
|
+ kIsIterationInvariantRate = kIsRate | kIsIterationInvariant,
|
|
|
+ // Mark the counter as a iteration-average quantity.
|
|
|
+ // It will be presented divided by the number of iterations.
|
|
|
+ kAvgIterations = 1U << 3U,
|
|
|
+ // Mark the counter as a iteration-average rate. See above.
|
|
|
+ kAvgIterationsRate = kIsRate | kAvgIterations
|
|
|
};
|
|
|
|
|
|
double value;
|
|
|
- Flags flags;
|
|
|
+ Flags flags;
|
|
|
|
|
|
BENCHMARK_ALWAYS_INLINE
|
|
|
Counter(double v = 0., Flags f = kDefaults) : value(v), flags(f) {}
|
|
|
|
|
|
- BENCHMARK_ALWAYS_INLINE operator double const& () const { return value; }
|
|
|
- BENCHMARK_ALWAYS_INLINE operator double & () { return value; }
|
|
|
-
|
|
|
+ BENCHMARK_ALWAYS_INLINE operator double const&() const { return value; }
|
|
|
+ BENCHMARK_ALWAYS_INLINE operator double&() { return value; }
|
|
|
};
|
|
|
|
|
|
+// A helper for user code to create unforeseen combinations of Flags, without
|
|
|
+// having to do this cast manually each time, or providing this operator.
|
|
|
+Counter::Flags inline operator|(const Counter::Flags& LHS,
|
|
|
+ const Counter::Flags& RHS) {
|
|
|
+ return static_cast<Counter::Flags>(static_cast<int>(LHS) |
|
|
|
+ static_cast<int>(RHS));
|
|
|
+}
|
|
|
+
|
|
|
// This is the container for the user-defined counters.
|
|
|
typedef std::map<std::string, Counter> UserCounters;
|
|
|
|
|
|
-
|
|
|
// TimeUnit is passed to a benchmark in order to specify the order of magnitude
|
|
|
// for the measured time.
|
|
|
enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond };
|
|
@@ -379,7 +396,7 @@ enum BigO { oNone, o1, oN, oNSquared, oNCubed, oLogN, oNLogN, oAuto, oLambda };
|
|
|
|
|
|
// BigOFunc is passed to a benchmark in order to specify the asymptotic
|
|
|
// computational complexity for the benchmark.
|
|
|
-typedef double(BigOFunc)(int);
|
|
|
+typedef double(BigOFunc)(int64_t);
|
|
|
|
|
|
// StatisticsFunc is passed to a benchmark in order to compute some descriptive
|
|
|
// statistics over all the measurements of some type
|
|
@@ -390,7 +407,7 @@ struct Statistics {
|
|
|
StatisticsFunc* compute_;
|
|
|
|
|
|
Statistics(std::string name, StatisticsFunc* compute)
|
|
|
- : name_(name), compute_(compute) {}
|
|
|
+ : name_(name), compute_(compute) {}
|
|
|
};
|
|
|
|
|
|
namespace internal {
|
|
@@ -399,14 +416,12 @@ class ThreadManager;
|
|
|
|
|
|
enum ReportMode
|
|
|
#if defined(BENCHMARK_HAS_CXX11)
|
|
|
- : unsigned
|
|
|
+ : unsigned
|
|
|
#else
|
|
|
#endif
|
|
|
- {
|
|
|
- RM_Unspecified, // The mode has not been manually specified
|
|
|
+{ RM_Unspecified, // The mode has not been manually specified
|
|
|
RM_Default, // The mode is user-specified as default.
|
|
|
- RM_ReportAggregatesOnly
|
|
|
-};
|
|
|
+ RM_ReportAggregatesOnly };
|
|
|
} // namespace internal
|
|
|
|
|
|
// State is passed to a running Benchmark and contains state for the
|
|
@@ -429,16 +444,19 @@ class State {
|
|
|
// Returns true if the benchmark should continue through another iteration.
|
|
|
// NOTE: A benchmark may not return from the test until KeepRunning() has
|
|
|
// returned false.
|
|
|
- bool KeepRunning() {
|
|
|
- if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) {
|
|
|
- StartKeepRunning();
|
|
|
- }
|
|
|
- bool const res = (--total_iterations_ != 0);
|
|
|
- if (BENCHMARK_BUILTIN_EXPECT(!res, false)) {
|
|
|
- FinishKeepRunning();
|
|
|
- }
|
|
|
- return res;
|
|
|
- }
|
|
|
+ bool KeepRunning();
|
|
|
+
|
|
|
+ // Returns true iff the benchmark should run n more iterations.
|
|
|
+ // REQUIRES: 'n' > 0.
|
|
|
+ // NOTE: A benchmark must not return from the test until KeepRunningBatch()
|
|
|
+ // has returned false.
|
|
|
+ // NOTE: KeepRunningBatch() may overshoot by up to 'n' iterations.
|
|
|
+ //
|
|
|
+ // Intended usage:
|
|
|
+ // while (state.KeepRunningBatch(1000)) {
|
|
|
+ // // process 1000 elements
|
|
|
+ // }
|
|
|
+ bool KeepRunningBatch(size_t n);
|
|
|
|
|
|
// REQUIRES: timer is running and 'SkipWithError(...)' has not been called
|
|
|
// by the current thread.
|
|
@@ -505,10 +523,10 @@ class State {
|
|
|
//
|
|
|
// REQUIRES: a benchmark has exited its benchmarking loop.
|
|
|
BENCHMARK_ALWAYS_INLINE
|
|
|
- void SetBytesProcessed(size_t bytes) { bytes_processed_ = bytes; }
|
|
|
+ void SetBytesProcessed(int64_t bytes) { bytes_processed_ = bytes; }
|
|
|
|
|
|
BENCHMARK_ALWAYS_INLINE
|
|
|
- size_t bytes_processed() const { return bytes_processed_; }
|
|
|
+ int64_t bytes_processed() const { return bytes_processed_; }
|
|
|
|
|
|
// If this routine is called with complexity_n > 0 and complexity report is
|
|
|
// requested for the
|
|
@@ -516,10 +534,10 @@ class State {
|
|
|
// and complexity_n will
|
|
|
// represent the length of N.
|
|
|
BENCHMARK_ALWAYS_INLINE
|
|
|
- void SetComplexityN(int complexity_n) { complexity_n_ = complexity_n; }
|
|
|
+ void SetComplexityN(int64_t complexity_n) { complexity_n_ = complexity_n; }
|
|
|
|
|
|
BENCHMARK_ALWAYS_INLINE
|
|
|
- int complexity_length_n() { return complexity_n_; }
|
|
|
+ int64_t complexity_length_n() { return complexity_n_; }
|
|
|
|
|
|
// If this routine is called with items > 0, then an items/s
|
|
|
// label is printed on the benchmark report line for the currently
|
|
@@ -528,10 +546,10 @@ class State {
|
|
|
//
|
|
|
// REQUIRES: a benchmark has exited its benchmarking loop.
|
|
|
BENCHMARK_ALWAYS_INLINE
|
|
|
- void SetItemsProcessed(size_t items) { items_processed_ = items; }
|
|
|
+ void SetItemsProcessed(int64_t items) { items_processed_ = items; }
|
|
|
|
|
|
BENCHMARK_ALWAYS_INLINE
|
|
|
- size_t items_processed() const { return items_processed_; }
|
|
|
+ int64_t items_processed() const { return items_processed_; }
|
|
|
|
|
|
// If this routine is called, the specified label is printed at the
|
|
|
// end of the benchmark report line for the currently executing
|
|
@@ -539,7 +557,7 @@ class State {
|
|
|
// static void BM_Compress(benchmark::State& state) {
|
|
|
// ...
|
|
|
// double compress = input_size / output_size;
|
|
|
- // state.SetLabel(StringPrintf("compress:%.1f%%", 100.0*compression));
|
|
|
+ // state.SetLabel(StrFormat("compress:%.1f%%", 100.0*compression));
|
|
|
// }
|
|
|
// Produces output that looks like:
|
|
|
// BM_Compress 50 50 14115038 compress:27.3%
|
|
@@ -553,33 +571,51 @@ class State {
|
|
|
|
|
|
// Range arguments for this run. CHECKs if the argument has been set.
|
|
|
BENCHMARK_ALWAYS_INLINE
|
|
|
- int range(std::size_t pos = 0) const {
|
|
|
+ int64_t range(std::size_t pos = 0) const {
|
|
|
assert(range_.size() > pos);
|
|
|
return range_[pos];
|
|
|
}
|
|
|
|
|
|
BENCHMARK_DEPRECATED_MSG("use 'range(0)' instead")
|
|
|
- int range_x() const { return range(0); }
|
|
|
+ int64_t range_x() const { return range(0); }
|
|
|
|
|
|
BENCHMARK_DEPRECATED_MSG("use 'range(1)' instead")
|
|
|
- int range_y() const { return range(1); }
|
|
|
+ int64_t range_y() const { return range(1); }
|
|
|
|
|
|
BENCHMARK_ALWAYS_INLINE
|
|
|
- size_t iterations() const { return (max_iterations - total_iterations_) + 1; }
|
|
|
+ size_t iterations() const {
|
|
|
+ if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) {
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ return max_iterations - total_iterations_ + batch_leftover_;
|
|
|
+ }
|
|
|
+
|
|
|
+ private
|
|
|
+ : // items we expect on the first cache line (ie 64 bytes of the struct)
|
|
|
+ // When total_iterations_ is 0, KeepRunning() and friends will return false.
|
|
|
+ // May be larger than max_iterations.
|
|
|
+ size_t total_iterations_;
|
|
|
+
|
|
|
+ // When using KeepRunningBatch(), batch_leftover_ holds the number of
|
|
|
+ // iterations beyond max_iters that were run. Used to track
|
|
|
+ // completed_iterations_ accurately.
|
|
|
+ size_t batch_leftover_;
|
|
|
+
|
|
|
+ public:
|
|
|
+ const size_t max_iterations;
|
|
|
|
|
|
private:
|
|
|
bool started_;
|
|
|
bool finished_;
|
|
|
- size_t total_iterations_;
|
|
|
-
|
|
|
- std::vector<int> range_;
|
|
|
+ bool error_occurred_;
|
|
|
|
|
|
- size_t bytes_processed_;
|
|
|
- size_t items_processed_;
|
|
|
+ private: // items we don't need on the first cache line
|
|
|
+ std::vector<int64_t> range_;
|
|
|
|
|
|
- int complexity_n_;
|
|
|
+ int64_t bytes_processed_;
|
|
|
+ int64_t items_processed_;
|
|
|
|
|
|
- bool error_occurred_;
|
|
|
+ int64_t complexity_n_;
|
|
|
|
|
|
public:
|
|
|
// Container for user-defined counters.
|
|
@@ -588,27 +624,66 @@ class State {
|
|
|
const int thread_index;
|
|
|
// Number of threads concurrently executing the benchmark.
|
|
|
const int threads;
|
|
|
- const size_t max_iterations;
|
|
|
|
|
|
// TODO(EricWF) make me private
|
|
|
- State(size_t max_iters, const std::vector<int>& ranges, int thread_i,
|
|
|
+ State(size_t max_iters, const std::vector<int64_t>& ranges, int thread_i,
|
|
|
int n_threads, internal::ThreadTimer* timer,
|
|
|
internal::ThreadManager* manager);
|
|
|
|
|
|
private:
|
|
|
void StartKeepRunning();
|
|
|
+ // Implementation of KeepRunning() and KeepRunningBatch().
|
|
|
+ // is_batch must be true unless n is 1.
|
|
|
+ bool KeepRunningInternal(size_t n, bool is_batch);
|
|
|
void FinishKeepRunning();
|
|
|
internal::ThreadTimer* timer_;
|
|
|
internal::ThreadManager* manager_;
|
|
|
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(State);
|
|
|
};
|
|
|
|
|
|
+inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunning() {
|
|
|
+ return KeepRunningInternal(1, /*is_batch=*/false);
|
|
|
+}
|
|
|
+
|
|
|
+inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningBatch(size_t n) {
|
|
|
+ return KeepRunningInternal(n, /*is_batch=*/true);
|
|
|
+}
|
|
|
+
|
|
|
+inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningInternal(size_t n,
|
|
|
+ bool is_batch) {
|
|
|
+ // total_iterations_ is set to 0 by the constructor, and always set to a
|
|
|
+ // nonzero value by StartKepRunning().
|
|
|
+ assert(n > 0);
|
|
|
+ // n must be 1 unless is_batch is true.
|
|
|
+ assert(is_batch || n == 1);
|
|
|
+ if (BENCHMARK_BUILTIN_EXPECT(total_iterations_ >= n, true)) {
|
|
|
+ total_iterations_ -= n;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!started_) {
|
|
|
+ StartKeepRunning();
|
|
|
+ if (!error_occurred_ && total_iterations_ >= n) {
|
|
|
+ total_iterations_ -= n;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ // For non-batch runs, total_iterations_ must be 0 by now.
|
|
|
+ if (is_batch && total_iterations_ != 0) {
|
|
|
+ batch_leftover_ = n - total_iterations_;
|
|
|
+ total_iterations_ = 0;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ FinishKeepRunning();
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
struct State::StateIterator {
|
|
|
struct BENCHMARK_UNUSED Value {};
|
|
|
typedef std::forward_iterator_tag iterator_category;
|
|
|
typedef Value value_type;
|
|
|
typedef Value reference;
|
|
|
typedef Value pointer;
|
|
|
+ typedef std::ptrdiff_t difference_type;
|
|
|
|
|
|
private:
|
|
|
friend class State;
|
|
@@ -670,7 +745,7 @@ class Benchmark {
|
|
|
// Run this benchmark once with "x" as the extra argument passed
|
|
|
// to the function.
|
|
|
// REQUIRES: The function passed to the constructor must accept an arg1.
|
|
|
- Benchmark* Arg(int x);
|
|
|
+ Benchmark* Arg(int64_t x);
|
|
|
|
|
|
// Run this benchmark with the given time unit for the generated output report
|
|
|
Benchmark* Unit(TimeUnit unit);
|
|
@@ -678,23 +753,23 @@ class Benchmark {
|
|
|
// Run this benchmark once for a number of values picked from the
|
|
|
// range [start..limit]. (start and limit are always picked.)
|
|
|
// REQUIRES: The function passed to the constructor must accept an arg1.
|
|
|
- Benchmark* Range(int start, int limit);
|
|
|
+ Benchmark* Range(int64_t start, int64_t limit);
|
|
|
|
|
|
// Run this benchmark once for all values in the range [start..limit] with
|
|
|
// specific step
|
|
|
// REQUIRES: The function passed to the constructor must accept an arg1.
|
|
|
- Benchmark* DenseRange(int start, int limit, int step = 1);
|
|
|
+ Benchmark* DenseRange(int64_t start, int64_t limit, int step = 1);
|
|
|
|
|
|
// Run this benchmark once with "args" as the extra arguments passed
|
|
|
// to the function.
|
|
|
// REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
|
|
|
- Benchmark* Args(const std::vector<int>& args);
|
|
|
+ Benchmark* Args(const std::vector<int64_t>& args);
|
|
|
|
|
|
// Equivalent to Args({x, y})
|
|
|
// NOTE: This is a legacy C++03 interface provided for compatibility only.
|
|
|
// New code should use 'Args'.
|
|
|
- Benchmark* ArgPair(int x, int y) {
|
|
|
- std::vector<int> args;
|
|
|
+ Benchmark* ArgPair(int64_t x, int64_t y) {
|
|
|
+ std::vector<int64_t> args;
|
|
|
args.push_back(x);
|
|
|
args.push_back(y);
|
|
|
return Args(args);
|
|
@@ -703,7 +778,7 @@ class Benchmark {
|
|
|
// Run this benchmark once for a number of values picked from the
|
|
|
// ranges [start..limit]. (starts and limits are always picked.)
|
|
|
// REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
|
|
|
- Benchmark* Ranges(const std::vector<std::pair<int, int> >& ranges);
|
|
|
+ Benchmark* Ranges(const std::vector<std::pair<int64_t, int64_t> >& ranges);
|
|
|
|
|
|
// Equivalent to ArgNames({name})
|
|
|
Benchmark* ArgName(const std::string& name);
|
|
@@ -715,8 +790,8 @@ class Benchmark {
|
|
|
// Equivalent to Ranges({{lo1, hi1}, {lo2, hi2}}).
|
|
|
// NOTE: This is a legacy C++03 interface provided for compatibility only.
|
|
|
// New code should use 'Ranges'.
|
|
|
- Benchmark* RangePair(int lo1, int hi1, int lo2, int hi2) {
|
|
|
- std::vector<std::pair<int, int> > ranges;
|
|
|
+ Benchmark* RangePair(int64_t lo1, int64_t hi1, int64_t lo2, int64_t hi2) {
|
|
|
+ std::vector<std::pair<int64_t, int64_t> > ranges;
|
|
|
ranges.push_back(std::make_pair(lo1, hi1));
|
|
|
ranges.push_back(std::make_pair(lo2, hi2));
|
|
|
return Ranges(ranges);
|
|
@@ -823,15 +898,13 @@ class Benchmark {
|
|
|
|
|
|
int ArgsCnt() const;
|
|
|
|
|
|
- static void AddRange(std::vector<int>* dst, int lo, int hi, int mult);
|
|
|
-
|
|
|
private:
|
|
|
friend class BenchmarkFamilies;
|
|
|
|
|
|
std::string name_;
|
|
|
ReportMode report_mode_;
|
|
|
- std::vector<std::string> arg_names_; // Args for all benchmark runs
|
|
|
- std::vector<std::vector<int> > args_; // Args for all benchmark runs
|
|
|
+ std::vector<std::string> arg_names_; // Args for all benchmark runs
|
|
|
+ std::vector<std::vector<int64_t> > args_; // Args for all benchmark runs
|
|
|
TimeUnit time_unit_;
|
|
|
int range_multiplier_;
|
|
|
double min_time_;
|
|
@@ -1055,7 +1128,7 @@ class Fixture : public internal::Benchmark {
|
|
|
class BaseClass##_##Method##_Benchmark : public BaseClass<a> { \
|
|
|
public: \
|
|
|
BaseClass##_##Method##_Benchmark() : BaseClass<a>() { \
|
|
|
- this->SetName(#BaseClass"<" #a ">/" #Method); \
|
|
|
+ this->SetName(#BaseClass "<" #a ">/" #Method); \
|
|
|
} \
|
|
|
\
|
|
|
protected: \
|
|
@@ -1066,7 +1139,7 @@ class Fixture : public internal::Benchmark {
|
|
|
class BaseClass##_##Method##_Benchmark : public BaseClass<a, b> { \
|
|
|
public: \
|
|
|
BaseClass##_##Method##_Benchmark() : BaseClass<a, b>() { \
|
|
|
- this->SetName(#BaseClass"<" #a "," #b ">/" #Method); \
|
|
|
+ this->SetName(#BaseClass "<" #a "," #b ">/" #Method); \
|
|
|
} \
|
|
|
\
|
|
|
protected: \
|
|
@@ -1078,14 +1151,15 @@ class Fixture : public internal::Benchmark {
|
|
|
class BaseClass##_##Method##_Benchmark : public BaseClass<__VA_ARGS__> { \
|
|
|
public: \
|
|
|
BaseClass##_##Method##_Benchmark() : BaseClass<__VA_ARGS__>() { \
|
|
|
- this->SetName(#BaseClass"<" #__VA_ARGS__ ">/" #Method); \
|
|
|
+ this->SetName(#BaseClass "<" #__VA_ARGS__ ">/" #Method); \
|
|
|
} \
|
|
|
\
|
|
|
protected: \
|
|
|
virtual void BenchmarkCase(::benchmark::State&); \
|
|
|
};
|
|
|
#else
|
|
|
-#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a)
|
|
|
+#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) \
|
|
|
+ BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a)
|
|
|
#endif
|
|
|
|
|
|
#define BENCHMARK_DEFINE_F(BaseClass, Method) \
|
|
@@ -1105,7 +1179,8 @@ class Fixture : public internal::Benchmark {
|
|
|
BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
|
|
|
void BaseClass##_##Method##_Benchmark::BenchmarkCase
|
|
|
#else
|
|
|
-#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a)
|
|
|
+#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) \
|
|
|
+ BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a)
|
|
|
#endif
|
|
|
|
|
|
#define BENCHMARK_REGISTER_F(BaseClass, Method) \
|
|
@@ -1132,24 +1207,24 @@ class Fixture : public internal::Benchmark {
|
|
|
void BaseClass##_##Method##_Benchmark::BenchmarkCase
|
|
|
|
|
|
#ifdef BENCHMARK_HAS_CXX11
|
|
|
-#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \
|
|
|
+#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \
|
|
|
BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
|
|
|
- BENCHMARK_REGISTER_F(BaseClass, Method); \
|
|
|
+ BENCHMARK_REGISTER_F(BaseClass, Method); \
|
|
|
void BaseClass##_##Method##_Benchmark::BenchmarkCase
|
|
|
#else
|
|
|
-#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) BENCHMARK_TEMPLATE1_F(BaseClass, Method, a)
|
|
|
+#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) \
|
|
|
+ BENCHMARK_TEMPLATE1_F(BaseClass, Method, a)
|
|
|
#endif
|
|
|
|
|
|
// Helper macro to create a main routine in a test that runs the benchmarks
|
|
|
-#define BENCHMARK_MAIN() \
|
|
|
- int main(int argc, char** argv) { \
|
|
|
- ::benchmark::Initialize(&argc, argv); \
|
|
|
+#define BENCHMARK_MAIN() \
|
|
|
+ int main(int argc, char** argv) { \
|
|
|
+ ::benchmark::Initialize(&argc, argv); \
|
|
|
if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1; \
|
|
|
- ::benchmark::RunSpecifiedBenchmarks(); \
|
|
|
- } \
|
|
|
+ ::benchmark::RunSpecifiedBenchmarks(); \
|
|
|
+ } \
|
|
|
int main(int, char**)
|
|
|
|
|
|
-
|
|
|
// ------------------------------------------------------
|
|
|
// Benchmark Reporters
|
|
|
|
|
@@ -1186,7 +1261,7 @@ class BenchmarkReporter {
|
|
|
CPUInfo const& cpu_info;
|
|
|
// The number of chars in the longest benchmark name.
|
|
|
size_t name_field_width;
|
|
|
-
|
|
|
+ static const char* executable_name;
|
|
|
Context();
|
|
|
};
|
|
|
|
|
@@ -1239,7 +1314,7 @@ class BenchmarkReporter {
|
|
|
// Keep track of arguments to compute asymptotic complexity
|
|
|
BigO complexity;
|
|
|
BigOFunc* complexity_lambda;
|
|
|
- int complexity_n;
|
|
|
+ int64_t complexity_n;
|
|
|
|
|
|
// what statistics to compute from the measurements
|
|
|
const std::vector<Statistics>* statistics;
|
|
@@ -1309,17 +1384,19 @@ class BenchmarkReporter {
|
|
|
// Simple reporter that outputs benchmark data to the console. This is the
|
|
|
// default reporter used by RunSpecifiedBenchmarks().
|
|
|
class ConsoleReporter : public BenchmarkReporter {
|
|
|
-public:
|
|
|
+ public:
|
|
|
enum OutputOptions {
|
|
|
OO_None = 0,
|
|
|
OO_Color = 1,
|
|
|
OO_Tabular = 2,
|
|
|
- OO_ColorTabular = OO_Color|OO_Tabular,
|
|
|
+ OO_ColorTabular = OO_Color | OO_Tabular,
|
|
|
OO_Defaults = OO_ColorTabular
|
|
|
};
|
|
|
explicit ConsoleReporter(OutputOptions opts_ = OO_Defaults)
|
|
|
- : output_options_(opts_), name_field_width_(0),
|
|
|
- prev_counters_(), printed_header_(false) {}
|
|
|
+ : output_options_(opts_),
|
|
|
+ name_field_width_(0),
|
|
|
+ prev_counters_(),
|
|
|
+ printed_header_(false) {}
|
|
|
|
|
|
virtual bool ReportContext(const Context& context);
|
|
|
virtual void ReportRuns(const std::vector<Run>& reports);
|
|
@@ -1347,7 +1424,8 @@ class JSONReporter : public BenchmarkReporter {
|
|
|
bool first_report_;
|
|
|
};
|
|
|
|
|
|
-class CSVReporter : public BenchmarkReporter {
|
|
|
+class BENCHMARK_DEPRECATED_MSG("The CSV Reporter will be removed in a future release")
|
|
|
+ CSVReporter : public BenchmarkReporter {
|
|
|
public:
|
|
|
CSVReporter() : printed_header_(false) {}
|
|
|
virtual bool ReportContext(const Context& context);
|
|
@@ -1357,7 +1435,7 @@ class CSVReporter : public BenchmarkReporter {
|
|
|
void PrintRunData(const Run& report);
|
|
|
|
|
|
bool printed_header_;
|
|
|
- std::set< std::string > user_counter_names_;
|
|
|
+ std::set<std::string> user_counter_names_;
|
|
|
};
|
|
|
|
|
|
inline const char* GetTimeUnitString(TimeUnit unit) {
|
|
@@ -1384,6 +1462,6 @@ inline double GetTimeUnitMultiplier(TimeUnit unit) {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-} // namespace benchmark
|
|
|
+} // namespace benchmark
|
|
|
|
|
|
#endif // BENCHMARK_BENCHMARK_H_
|