瀏覽代碼

Add benchmark for std::set.

Summary:
Benchmarks for construct, find, insert and iterate, with sequential
and random ordered inputs.

It also improves the cartesian product benchmark header to allow for
runtime values to be specified in the product.

Reviewers: EricWF

Subscribers: christof, ldionne, libcxx-commits

Differential Revision: https://reviews.llvm.org/D53523

git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@345035 91177308-0d34-0410-b5e6-96231b3b80d8
Samuel Benzaquen 6 年之前
父節點
當前提交
d8754baf49
共有 2 個文件被更改,包括 312 次插入20 次删除
  1. 63 20
      benchmarks/CartesianBenchmarks.hpp
  2. 249 0
      benchmarks/ordered_set.bench.cpp

+ 63 - 20
benchmarks/CartesianBenchmarks.hpp

@@ -11,6 +11,7 @@
 #include <string>
 #include <string>
 #include <tuple>
 #include <tuple>
 #include <type_traits>
 #include <type_traits>
+#include <vector>
 
 
 #include "benchmark/benchmark.h"
 #include "benchmark/benchmark.h"
 #include "test_macros.h"
 #include "test_macros.h"
@@ -27,25 +28,55 @@ constexpr auto makeEnumValueTuple(std::index_sequence<Idxs...>) {
   return std::make_tuple(EnumValue<D, E, Idxs>{}...);
   return std::make_tuple(EnumValue<D, E, Idxs>{}...);
 }
 }
 
 
-template <class T>
-static auto skip(int) -> decltype(T::skip()) {
-  return T::skip();
+template <class B>
+static auto skip(const B& Bench, int) -> decltype(Bench.skip()) {
+  return Bench.skip();
 }
 }
-template <class T>
-static bool skip(char) {
+template <class B>
+static auto skip(const B& Bench, char) {
   return false;
   return false;
 }
 }
 
 
-template <template <class...> class B, class... U>
-void makeBenchmarkImpl(std::tuple<U...> t) {
-  using T = B<U...>;
-  if (!internal::skip<T>(0))
-    benchmark::RegisterBenchmark(T::name().c_str(), T::run);
+template <class B, class Args, size_t... Is>
+void makeBenchmarkFromValuesImpl(const Args& A, std::index_sequence<Is...>) {
+  for (auto& V : A) {
+    B Bench{std::get<Is>(V)...};
+    if (!internal::skip(Bench, 0)) {
+      benchmark::RegisterBenchmark(Bench.name().c_str(),
+                                   [=](benchmark::State& S) { Bench.run(S); });
+    }
+  }
+}
+
+template <class B, class... Args>
+void makeBenchmarkFromValues(const std::vector<std::tuple<Args...> >& A) {
+  makeBenchmarkFromValuesImpl<B>(A, std::index_sequence_for<Args...>());
 }
 }
 
 
-template <template <class...> class B, class... U, class... T, class... Tuples>
-void makeBenchmarkImpl(std::tuple<U...>, std::tuple<T...>, Tuples... rest) {
-  (internal::makeBenchmarkImpl<B>(std::tuple<U..., T>(), rest...), ...);
+template <template <class...> class B, class Args, class... U>
+void makeBenchmarkImpl(const Args& A, std::tuple<U...> t) {
+  makeBenchmarkFromValues<B<U...> >(A);
+}
+
+template <template <class...> class B, class Args, class... U,
+          class... T, class... Tuples>
+void makeBenchmarkImpl(const Args& A, std::tuple<U...>, std::tuple<T...>,
+                       Tuples... rest) {
+  (internal::makeBenchmarkImpl<B>(A, std::tuple<U..., T>(), rest...), ...);
+}
+
+template <class R, class T>
+void allValueCombinations(R& Result, const T& Final) {
+  return Result.push_back(Final);
+}
+
+template <class R, class T, class V, class... Vs>
+void allValueCombinations(R& Result, const T& Prev, const V& Value,
+                          const Vs&... Values) {
+  for (const auto& E : Value) {
+    allValueCombinations(Result, std::tuple_cat(Prev, std::make_tuple(E)),
+                         Values...);
+  }
 }
 }
 
 
 }  // namespace internal
 }  // namespace internal
@@ -67,17 +98,29 @@ using EnumValuesAsTuple =
         std::make_index_sequence<NumLabels>{}));
         std::make_index_sequence<NumLabels>{}));
 
 
 // Instantiates B<T0, T1, ..., TN> where <Ti...> are the combinations in the
 // Instantiates B<T0, T1, ..., TN> where <Ti...> are the combinations in the
-// cartesian product of `Tuples...`
+// cartesian product of `Tuples...`, and pass (arg0, ..., argN) as constructor
+// arguments where `(argi...)` are the combination in the cartesian product of
+// the runtime values of `A...`.
 // B<T...> requires:
 // B<T...> requires:
-//  - static std::string name(): The name of the benchmark.
-//  - static void run(benchmark::State&): The body of the benchmark.
+//  - std::string name(args...): The name of the benchmark.
+//  - void run(benchmark::State&, args...): The body of the benchmark.
 // It can also optionally provide:
 // It can also optionally provide:
-//  - static bool skip(): When `true`, skips the combination. Default is false.
+//  - bool skip(args...): When `true`, skips the combination. Default is false.
 //
 //
 // Returns int to facilitate registration. The return value is unspecified.
 // Returns int to facilitate registration. The return value is unspecified.
-template <template <class...> class B, class... Tuples>
-int makeCartesianProductBenchmark() {
-  internal::makeBenchmarkImpl<B>(std::tuple<>(), Tuples()...);
+template <template <class...> class B, class... Tuples, class... Args>
+int makeCartesianProductBenchmark(const Args&... A) {
+  std::vector<std::tuple<typename Args::value_type...> > V;
+  internal::allValueCombinations(V, std::tuple<>(), A...);
+  internal::makeBenchmarkImpl<B>(V, std::tuple<>(), Tuples()...);
+  return 0;
+}
+
+template <class B, class... Args>
+int makeCartesianProductBenchmark(const Args&... A) {
+  std::vector<std::tuple<typename Args::value_type...> > V;
+  internal::allValueCombinations(V, std::tuple<>(), A...);
+  internal::makeBenchmarkFromValues<B>(V);
   return 0;
   return 0;
 }
 }
 
 

+ 249 - 0
benchmarks/ordered_set.bench.cpp

@@ -0,0 +1,249 @@
+//===----------------------------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+#include <random>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "CartesianBenchmarks.hpp"
+#include "benchmark/benchmark.h"
+#include "test_macros.h"
+
+namespace {
+
+enum class HitType { Hit, Miss };
+
+struct AllHitTypes : EnumValuesAsTuple<AllHitTypes, HitType, 2> {
+  static constexpr const char* Names[] = {"Hit", "Miss"};
+};
+
+enum class AccessPattern { Ordered, Random };
+
+struct AllAccessPattern
+    : EnumValuesAsTuple<AllAccessPattern, AccessPattern, 2> {
+  static constexpr const char* Names[] = {"Ordered", "Random"};
+};
+
+void sortKeysBy(std::vector<uint64_t>& Keys, AccessPattern AP) {
+  if (AP == AccessPattern::Random) {
+    std::random_device R;
+    std::mt19937 M(R());
+    std::shuffle(std::begin(Keys), std::end(Keys), M);
+  }
+}
+
+struct TestSets {
+  std::vector<std::set<uint64_t> > Sets;
+  std::vector<uint64_t> Keys;
+};
+
+TestSets makeTestingSets(size_t TableSize, size_t NumTables, HitType Hit,
+                         AccessPattern Access) {
+  TestSets R;
+  R.Sets.resize(1);
+
+  for (uint64_t I = 0; I < TableSize; ++I) {
+    R.Sets[0].insert(2 * I);
+    R.Keys.push_back(Hit == HitType::Hit ? 2 * I : 2 * I + 1);
+  }
+  R.Sets.resize(NumTables, R.Sets[0]);
+  sortKeysBy(R.Keys, Access);
+
+  return R;
+}
+
+struct Base {
+  size_t TableSize;
+  size_t NumTables;
+  Base(size_t T, size_t N) : TableSize(T), NumTables(N) {}
+
+  bool skip() const {
+    size_t Total = TableSize * NumTables;
+    return Total < 100 || Total > 1000000;
+  }
+
+  std::string baseName() const {
+    return "_TableSize" + std::to_string(TableSize) + "_NumTables" +
+           std::to_string(NumTables);
+  }
+};
+
+template <class Access>
+struct Create : Base {
+  using Base::Base;
+
+  void run(benchmark::State& State) const {
+    std::vector<size_t> Keys(TableSize);
+    std::iota(Keys.begin(), Keys.end(), size_t{0});
+    sortKeysBy(Keys, Access());
+
+    while (State.KeepRunningBatch(TableSize * NumTables)) {
+      std::vector<std::set<size_t>> Sets(NumTables);
+      for (auto K : Keys) {
+        for (auto& Set : Sets) {
+          benchmark::DoNotOptimize(Set.insert(K));
+        }
+      }
+    }
+  }
+
+  std::string name() const {
+    return "BM_Create" + Access::name() + baseName();
+  }
+};
+
+template <class Hit, class Access>
+struct Find : Base {
+  using Base::Base;
+
+  void run(benchmark::State& State) const {
+    auto Data = makeTestingSets(TableSize, NumTables, Hit(), Access());
+
+    while (State.KeepRunningBatch(TableSize * NumTables)) {
+      for (auto K : Data.Keys) {
+        for (auto& Set : Data.Sets) {
+          benchmark::DoNotOptimize(Set.find(K));
+        }
+      }
+    }
+  }
+
+  std::string name() const {
+    return "BM_Find" + Hit::name() + Access::name() + baseName();
+  }
+};
+
+template <class Hit, class Access>
+struct FindNeEnd : Base {
+  using Base::Base;
+
+  void run(benchmark::State& State) const {
+    auto Data = makeTestingSets(TableSize, NumTables, Hit(), Access());
+
+    while (State.KeepRunningBatch(TableSize * NumTables)) {
+      for (auto K : Data.Keys) {
+        for (auto& Set : Data.Sets) {
+          benchmark::DoNotOptimize(Set.find(K) != Set.end());
+        }
+      }
+    }
+  }
+
+  std::string name() const {
+    return "BM_FindNeEnd" + Hit::name() + Access::name() + baseName();
+  }
+};
+
+template <class Access>
+struct InsertHit : Base {
+  using Base::Base;
+
+  void run(benchmark::State& State) const {
+    auto Data = makeTestingSets(TableSize, NumTables, HitType::Hit, Access());
+
+    while (State.KeepRunningBatch(TableSize * NumTables)) {
+      for (auto K : Data.Keys) {
+        for (auto& Set : Data.Sets) {
+          benchmark::DoNotOptimize(Set.insert(K));
+        }
+      }
+    }
+  }
+
+  std::string name() const {
+    return "BM_InsertHit" + Access::name() + baseName();
+  }
+};
+
+template <class Access>
+struct InsertMissAndErase : Base {
+  using Base::Base;
+
+  void run(benchmark::State& State) const {
+    auto Data = makeTestingSets(TableSize, NumTables, HitType::Miss, Access());
+
+    while (State.KeepRunningBatch(TableSize * NumTables)) {
+      for (auto K : Data.Keys) {
+        for (auto& Set : Data.Sets) {
+          benchmark::DoNotOptimize(Set.erase(Set.insert(K).first));
+        }
+      }
+    }
+  }
+
+  std::string name() const {
+    return "BM_InsertMissAndErase" + Access::name() + baseName();
+  }
+};
+
+struct IterateRangeFor : Base {
+  using Base::Base;
+
+  void run(benchmark::State& State) const {
+    auto Data = makeTestingSets(TableSize, NumTables, HitType::Miss,
+                                AccessPattern::Ordered);
+
+    while (State.KeepRunningBatch(TableSize * NumTables)) {
+      for (auto& Set : Data.Sets) {
+        for (auto& V : Set) {
+          benchmark::DoNotOptimize(V);
+        }
+      }
+    }
+  }
+
+  std::string name() const { return "BM_IterateRangeFor" + baseName(); }
+};
+
+struct IterateBeginEnd : Base {
+  using Base::Base;
+
+  void run(benchmark::State& State) const {
+    auto Data = makeTestingSets(TableSize, NumTables, HitType::Miss,
+                                AccessPattern::Ordered);
+
+    while (State.KeepRunningBatch(TableSize * NumTables)) {
+      for (auto& Set : Data.Sets) {
+        for (auto it = Set.begin(); it != Set.end(); ++it) {
+          benchmark::DoNotOptimize(*it);
+        }
+      }
+    }
+  }
+
+  std::string name() const { return "BM_IterateBeginEnd" + baseName(); }
+};
+
+}  // namespace
+
+int main(int argc, char** argv) {
+  benchmark::Initialize(&argc, argv);
+  if (benchmark::ReportUnrecognizedArguments(argc, argv))
+    return 1;
+
+  const std::vector<size_t> TableSize{1, 10, 100, 1000, 10000, 100000, 1000000};
+  const std::vector<size_t> NumTables{1, 10, 100, 1000, 10000, 100000, 1000000};
+
+  makeCartesianProductBenchmark<Create, AllAccessPattern>(TableSize, NumTables);
+  makeCartesianProductBenchmark<Find, AllHitTypes, AllAccessPattern>(
+      TableSize, NumTables);
+  makeCartesianProductBenchmark<FindNeEnd, AllHitTypes, AllAccessPattern>(
+      TableSize, NumTables);
+  makeCartesianProductBenchmark<InsertHit, AllAccessPattern>(
+      TableSize, NumTables);
+  makeCartesianProductBenchmark<InsertMissAndErase, AllAccessPattern>(
+      TableSize, NumTables);
+  makeCartesianProductBenchmark<IterateRangeFor>(TableSize, NumTables);
+  makeCartesianProductBenchmark<IterateBeginEnd>(TableSize, NumTables);
+  benchmark::RunSpecifiedBenchmarks();
+}