map_benchmark.cpp 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include <benchmark/benchmark.h>
  5. #include <type_traits>
  6. #include "absl/container/flat_hash_map.h"
  7. #include "common/map.h"
  8. #include "common/raw_hashtable_benchmark_helpers.h"
  9. #include "llvm/ADT/DenseMap.h"
  10. namespace Carbon {
  11. namespace {
  12. using RawHashtable::CarbonHashDI;
  13. using RawHashtable::GetKeysAndHitKeys;
  14. using RawHashtable::GetKeysAndMissKeys;
  15. using RawHashtable::HitArgs;
  16. using RawHashtable::ReportTableMetrics;
  17. using RawHashtable::SizeArgs;
  18. using RawHashtable::ValueToBool;
  19. // Helpers to synthesize some value of one of the three types we use as value
  20. // types.
  21. template <typename T>
  22. auto MakeValue() -> T {
  23. if constexpr (std::is_same_v<T, llvm::StringRef>) {
  24. return "abc";
  25. } else if constexpr (std::is_pointer_v<T>) {
  26. static std::remove_pointer_t<T> x;
  27. return &x;
  28. } else {
  29. return 42;
  30. }
  31. }
  32. template <typename T>
  33. auto MakeValue2() -> T {
  34. if constexpr (std::is_same_v<T, llvm::StringRef>) {
  35. return "qux";
  36. } else if constexpr (std::is_pointer_v<T>) {
  37. static std::remove_pointer_t<T> y;
  38. return &y;
  39. } else {
  40. return 7;
  41. }
  42. }
  43. template <typename MapT>
  44. struct IsCarbonMapImpl : std::false_type {};
  45. template <typename KT, typename VT, int MinSmallSize>
  46. struct IsCarbonMapImpl<Map<KT, VT, MinSmallSize>> : std::true_type {};
  47. template <typename MapT>
  48. static constexpr bool IsCarbonMap = IsCarbonMapImpl<MapT>::value;
  49. // A wrapper around various map types that we specialize to implement a common
  50. // API used in the benchmarks for various different map data structures that
  51. // support different APIs. The primary template assumes a roughly
  52. // `std::unordered_map` API design, and types with a different API design are
  53. // supported through specializations.
  54. template <typename MapT>
  55. struct MapWrapperImpl {
  56. using KeyT = typename MapT::key_type;
  57. using ValueT = typename MapT::mapped_type;
  58. MapT m;
  59. auto BenchContains(KeyT k) -> bool { return m.find(k) != m.end(); }
  60. auto BenchLookup(KeyT k) -> bool {
  61. auto it = m.find(k);
  62. if (it == m.end()) {
  63. return false;
  64. }
  65. return ValueToBool(it->second);
  66. }
  67. auto BenchInsert(KeyT k, ValueT v) -> bool {
  68. auto result = m.insert({k, v});
  69. return result.second;
  70. }
  71. auto BenchUpdate(KeyT k, ValueT v) -> bool {
  72. auto result = m.insert({k, v});
  73. result.first->second = v;
  74. return result.second;
  75. }
  76. auto BenchErase(KeyT k) -> bool { return m.erase(k) != 0; }
  77. };
  78. // Explicit (partial) specialization for the Carbon map type that uses its
  79. // different API design.
  80. template <typename KT, typename VT, int MinSmallSize>
  81. struct MapWrapperImpl<Map<KT, VT, MinSmallSize>> {
  82. using MapT = Map<KT, VT, MinSmallSize>;
  83. using KeyT = KT;
  84. using ValueT = VT;
  85. MapT m;
  86. auto BenchContains(KeyT k) -> bool { return m.Contains(k); }
  87. auto BenchLookup(KeyT k) -> bool {
  88. auto result = m.Lookup(k);
  89. if (!result) {
  90. return false;
  91. }
  92. return ValueToBool(result.value());
  93. }
  94. auto BenchInsert(KeyT k, ValueT v) -> bool {
  95. auto result = m.Insert(k, v);
  96. return result.is_inserted();
  97. }
  98. auto BenchUpdate(KeyT k, ValueT v) -> bool {
  99. auto result = m.Update(k, v);
  100. return result.is_inserted();
  101. }
  102. auto BenchErase(KeyT k) -> bool { return m.Erase(k); }
  103. };
  104. // Provide a way to override the Carbon Map specific benchmark runs with another
  105. // hashtable implementation. When building, you can use one of these enum names
  106. // in a macro define such as `-DCARBON_MAP_BENCH_OVERRIDE=Name` in order to
  107. // trigger a specific override for the `Map` type benchmarks. This is used to
  108. // get before/after runs that compare the performance of Carbon's Map versus
  109. // other implementations.
  110. enum class MapOverride : uint8_t {
  111. None,
  112. Abseil,
  113. LLVM,
  114. LLVMAndCarbonHash,
  115. };
  116. #ifndef CARBON_MAP_BENCH_OVERRIDE
  117. #define CARBON_MAP_BENCH_OVERRIDE None
  118. #endif
  119. template <typename MapT, MapOverride Override>
  120. struct MapWrapperOverride : MapWrapperImpl<MapT> {};
  121. template <typename KeyT, typename ValueT, int MinSmallSize>
  122. struct MapWrapperOverride<Map<KeyT, ValueT, MinSmallSize>, MapOverride::Abseil>
  123. : MapWrapperImpl<absl::flat_hash_map<KeyT, ValueT>> {};
  124. template <typename KeyT, typename ValueT, int MinSmallSize>
  125. struct MapWrapperOverride<Map<KeyT, ValueT, MinSmallSize>, MapOverride::LLVM>
  126. : MapWrapperImpl<llvm::DenseMap<KeyT, ValueT>> {};
  127. template <typename KeyT, typename ValueT, int MinSmallSize>
  128. struct MapWrapperOverride<Map<KeyT, ValueT, MinSmallSize>,
  129. MapOverride::LLVMAndCarbonHash>
  130. : MapWrapperImpl<llvm::DenseMap<KeyT, ValueT, CarbonHashDI<KeyT>>> {};
  131. template <typename MapT>
  132. using MapWrapper =
  133. MapWrapperOverride<MapT, MapOverride::CARBON_MAP_BENCH_OVERRIDE>;
  134. template <typename MapT>
  135. auto ReportMetrics(const MapWrapper<MapT>& m_wrapper, benchmark::State& state)
  136. -> void {
  137. // Report some extra statistics about the Carbon type.
  138. if constexpr (IsCarbonMap<MapT>) {
  139. ReportTableMetrics(m_wrapper.m, state);
  140. }
  141. }
  142. // NOLINTBEGIN(bugprone-macro-parentheses): Parentheses are incorrect here.
  143. #define MAP_BENCHMARK_ONE_OP_SIZE(NAME, APPLY, KT, VT) \
  144. BENCHMARK(NAME<Map<KT, VT>>)->Apply(APPLY); \
  145. BENCHMARK(NAME<absl::flat_hash_map<KT, VT>>)->Apply(APPLY); \
  146. BENCHMARK(NAME<llvm::DenseMap<KT, VT>>)->Apply(APPLY); \
  147. BENCHMARK(NAME<llvm::DenseMap<KT, VT, CarbonHashDI<KT>>>)->Apply(APPLY)
  148. // NOLINTEND(bugprone-macro-parentheses)
  149. #define MAP_BENCHMARK_ONE_OP(NAME, APPLY) \
  150. MAP_BENCHMARK_ONE_OP_SIZE(NAME, APPLY, int, int); \
  151. MAP_BENCHMARK_ONE_OP_SIZE(NAME, APPLY, int*, int*); \
  152. MAP_BENCHMARK_ONE_OP_SIZE(NAME, APPLY, int, llvm::StringRef); \
  153. MAP_BENCHMARK_ONE_OP_SIZE(NAME, APPLY, llvm::StringRef, int)
  154. // Benchmark the minimal latency of checking if a key is contained within a map,
  155. // when it *is* definitely in that map. Because this is only really measuring
  156. // the *minimal* latency, it is more similar to a throughput benchmark.
  157. //
  158. // While this is structured to observe the latency of testing for presence of a
  159. // key, it is important to understand the reality of what this measures. Because
  160. // the boolean result testing for whether a key is in a map is fundamentally
  161. // provided not by accessing some data, but by branching on data to a control
  162. // flow path which sets the boolean to `true` or `false`, the result can be
  163. // speculatively provided based on predicting the conditional branch without
  164. // waiting for the results of the comparison to become available. And because
  165. // this is a small operation and we arrange for all the candidate keys to be
  166. // present, that branch *should* be predicted extremely well. The result is that
  167. // this measures the un-speculated latency of testing for presence which should
  168. // be small or zero. Which is why this is ultimately more similar to a
  169. // throughput benchmark.
  170. //
  171. // Because of these measurement oddities, the specific measurements here may not
  172. // be very interesting for predicting real-world performance in any way, but
  173. // they are useful for comparing how 'cheap' the operation is across changes to
  174. // the data structure or between similar data structures with similar
  175. // properties.
  176. template <typename MapT>
  177. static void BM_MapContainsHit(benchmark::State& state) {
  178. using MapWrapperT = MapWrapper<MapT>;
  179. using KT = typename MapWrapperT::KeyT;
  180. using VT = typename MapWrapperT::ValueT;
  181. MapWrapperT m;
  182. auto [keys, lookup_keys] =
  183. GetKeysAndHitKeys<KT>(state.range(0), state.range(1));
  184. for (auto k : keys) {
  185. m.BenchInsert(k, MakeValue<VT>());
  186. }
  187. ssize_t lookup_keys_size = lookup_keys.size();
  188. while (state.KeepRunningBatch(lookup_keys_size)) {
  189. for (ssize_t i = 0; i < lookup_keys_size;) {
  190. // We block optimizing `i` as that has proven both more effective at
  191. // blocking the loop from being optimized away and avoiding disruption of
  192. // the generated code that we're benchmarking.
  193. benchmark::DoNotOptimize(i);
  194. bool result = m.BenchContains(lookup_keys[i]);
  195. CARBON_DCHECK(result);
  196. // We use the lookup success to step through keys, establishing a
  197. // dependency between each lookup. This doesn't fully allow us to measure
  198. // latency rather than throughput, as noted above.
  199. i += static_cast<ssize_t>(result);
  200. }
  201. }
  202. ReportMetrics(m, state);
  203. }
  204. MAP_BENCHMARK_ONE_OP(BM_MapContainsHit, HitArgs);
  205. // Similar to `BM_MapContainsHit`, while this is structured as a latency
  206. // benchmark, the critical path is expected to be well predicted and so it
  207. // should turn into something closer to a throughput benchmark.
  208. template <typename MapT>
  209. static void BM_MapContainsMiss(benchmark::State& state) {
  210. using MapWrapperT = MapWrapper<MapT>;
  211. using KT = typename MapWrapperT::KeyT;
  212. using VT = typename MapWrapperT::ValueT;
  213. MapWrapperT m;
  214. auto [keys, lookup_keys] = GetKeysAndMissKeys<KT>(state.range(0));
  215. for (auto k : keys) {
  216. m.BenchInsert(k, MakeValue<VT>());
  217. }
  218. ssize_t lookup_keys_size = lookup_keys.size();
  219. while (state.KeepRunningBatch(lookup_keys_size)) {
  220. for (ssize_t i = 0; i < lookup_keys_size;) {
  221. benchmark::DoNotOptimize(i);
  222. bool result = m.BenchContains(lookup_keys[i]);
  223. CARBON_DCHECK(!result);
  224. i += static_cast<ssize_t>(!result);
  225. }
  226. }
  227. ReportMetrics(m, state);
  228. }
  229. MAP_BENCHMARK_ONE_OP(BM_MapContainsMiss, SizeArgs);
  230. // This is a genuine latency benchmark. We lookup a key in the hashtable and use
  231. // the value associated with that key in the critical path of loading the next
  232. // iteration's key. We still ensure the keys are always present, and so we
  233. // generally expect the data structure branches to be well predicted. But we
  234. // vary the keys aggressively to avoid any prediction artifacts from repeatedly
  235. // examining the same key.
  236. //
  237. // This latency can be very helpful for understanding a range of data structure
  238. // behaviors:
  239. // - Many users of hashtables are directly dependent on the latency of this
  240. // operation, and this micro-benchmark will reflect the expected latency for
  241. // them.
  242. // - Showing how latency varies across different sizes of table and different
  243. // fractions of the table being accessed (and thus needing space in the
  244. // cache).
  245. //
  246. // However, it remains an ultimately synthetic and unrepresentative benchmark.
  247. // It should primarily be used to understand the relative cost of these
  248. // operations between versions of the data structure or between related data
  249. // structures.
  250. //
  251. // We vary both the number of entries in the table and the number of distinct
  252. // keys used when doing lookups. As the table becomes large, the latter dictates
  253. // the fraction of the table that will be accessed and thus the working set size
  254. // of the benchmark. Querying the same small number of keys in even a large
  255. // table doesn't actually encounter any cache pressure, so only a few of these
  256. // benchmarks will show any effects of the caching subsystem.
  257. template <typename MapT>
  258. static void BM_MapLookupHit(benchmark::State& state) {
  259. using MapWrapperT = MapWrapper<MapT>;
  260. using KT = typename MapWrapperT::KeyT;
  261. using VT = typename MapWrapperT::ValueT;
  262. MapWrapperT m;
  263. auto [keys, lookup_keys] =
  264. GetKeysAndHitKeys<KT>(state.range(0), state.range(1));
  265. for (auto k : keys) {
  266. m.BenchInsert(k, MakeValue<VT>());
  267. }
  268. ssize_t lookup_keys_size = lookup_keys.size();
  269. while (state.KeepRunningBatch(lookup_keys_size)) {
  270. for (ssize_t i = 0; i < lookup_keys_size;) {
  271. benchmark::DoNotOptimize(i);
  272. bool result = m.BenchLookup(lookup_keys[i]);
  273. CARBON_DCHECK(result);
  274. i += static_cast<ssize_t>(result);
  275. }
  276. }
  277. ReportMetrics(m, state);
  278. }
  279. MAP_BENCHMARK_ONE_OP(BM_MapLookupHit, HitArgs);
  280. // This is an update throughput benchmark in practice. While whether the key was
  281. // a hit is kept in the critical path, we only use keys that are hits and so
  282. // expect that to be fully predicted and speculated.
  283. //
  284. // However, we expect this fairly closely matches how user code interacts with
  285. // an update-style API. It will have some conditional testing (even if just an
  286. // assert) on whether the key was a hit and otherwise continue executing. As a
  287. // consequence the actual update is expected to not be in a meaningful critical
  288. // path.
  289. //
  290. // This still provides a basic way to measure the cost of this operation,
  291. // especially when comparing between implementations or across different hash
  292. // tables.
  293. template <typename MapT>
  294. static void BM_MapUpdateHit(benchmark::State& state) {
  295. using MapWrapperT = MapWrapper<MapT>;
  296. using KT = typename MapWrapperT::KeyT;
  297. using VT = typename MapWrapperT::ValueT;
  298. MapWrapperT m;
  299. auto [keys, lookup_keys] =
  300. GetKeysAndHitKeys<KT>(state.range(0), state.range(1));
  301. for (auto k : keys) {
  302. m.BenchInsert(k, MakeValue<VT>());
  303. }
  304. ssize_t lookup_keys_size = lookup_keys.size();
  305. while (state.KeepRunningBatch(lookup_keys_size)) {
  306. for (ssize_t i = 0; i < lookup_keys_size; ++i) {
  307. benchmark::DoNotOptimize(i);
  308. bool inserted = m.BenchUpdate(lookup_keys[i], MakeValue2<VT>());
  309. CARBON_DCHECK(!inserted);
  310. }
  311. }
  312. ReportMetrics(m, state);
  313. }
  314. MAP_BENCHMARK_ONE_OP(BM_MapUpdateHit, HitArgs);
  315. // First erase and then insert the key. The code path will always be the same
  316. // here and so we expect this to largely be a throughput benchmark because of
  317. // branch prediction and speculative execution.
  318. //
  319. // We don't expect erase followed by insertion to be a common user code
  320. // sequence, but we don't have a good way of benchmarking either erase or insert
  321. // in isolation -- each would change the size of the table and thus the next
  322. // iteration's benchmark. And if we try to correct the table size outside of the
  323. // timed region, we end up trying to exclude too fine grained of a region from
  324. // timers to get good measurement data.
  325. //
  326. // Our solution is to benchmark both erase and insertion back to back. We can
  327. // then get a good profile of the code sequence of each, and at least measure
  328. // the sum cost of these reliably. Careful profiling can help attribute that
  329. // cost between erase and insert in order to understand which of the two
  330. // operations is contributing most to any performance artifacts observed.
  331. template <typename MapT>
  332. static void BM_MapEraseUpdateHit(benchmark::State& state) {
  333. using MapWrapperT = MapWrapper<MapT>;
  334. using KT = typename MapWrapperT::KeyT;
  335. using VT = typename MapWrapperT::ValueT;
  336. MapWrapperT m;
  337. auto [keys, lookup_keys] =
  338. GetKeysAndHitKeys<KT>(state.range(0), state.range(1));
  339. for (auto k : keys) {
  340. m.BenchInsert(k, MakeValue<VT>());
  341. }
  342. ssize_t lookup_keys_size = lookup_keys.size();
  343. while (state.KeepRunningBatch(lookup_keys_size)) {
  344. for (ssize_t i = 0; i < lookup_keys_size; ++i) {
  345. benchmark::DoNotOptimize(i);
  346. m.BenchErase(lookup_keys[i]);
  347. benchmark::ClobberMemory();
  348. bool inserted = m.BenchUpdate(lookup_keys[i], MakeValue2<VT>());
  349. CARBON_DCHECK(inserted);
  350. }
  351. }
  352. }
  353. MAP_BENCHMARK_ONE_OP(BM_MapEraseUpdateHit, HitArgs);
  354. // NOLINTBEGIN(bugprone-macro-parentheses): Parentheses are incorrect here.
  355. #define MAP_BENCHMARK_OP_SEQ_SIZE(NAME, KT, VT) \
  356. BENCHMARK(NAME<Map<KT, VT>>)->Apply(SizeArgs); \
  357. BENCHMARK(NAME<absl::flat_hash_map<KT, VT>>)->Apply(SizeArgs); \
  358. BENCHMARK(NAME<llvm::DenseMap<KT, VT>>)->Apply(APPLY); \
  359. BENCHMARK(NAME<llvm::DenseMap<KT, VT, CarbonHashDI<KT>>>)->Apply(SizeArgs)
  360. // NOLINTEND(bugprone-macro-parentheses)
  361. #define MAP_BENCHMARK_OP_SEQ(NAME) \
  362. MAP_BENCHMARK_OP_SEQ_SIZE(NAME, int, int); \
  363. MAP_BENCHMARK_OP_SEQ_SIZE(NAME, int*, int*); \
  364. MAP_BENCHMARK_OP_SEQ_SIZE(NAME, int, llvm::StringRef); \
  365. MAP_BENCHMARK_OP_SEQ_SIZE(NAME, llvm::StringRef, int)
  366. // This is an interesting, somewhat specialized benchmark that measures the cost
  367. // of inserting a sequence of key/value pairs into a table with no collisions up
  368. // to some size and then inserting a colliding key and throwing away the table.
  369. //
  370. // This can give an idea of the cost of building up a map of a particular size,
  371. // but without actually using it. Or of algorithms like cycle-detection which
  372. // for some reason need an associative container.
  373. //
  374. // It also covers both the insert-into-an-empty-slot code path that isn't
  375. // covered elsewhere, and the code path for growing a table to a larger size.
  376. //
  377. // Because this benchmark operates on whole maps, we also compute the number of
  378. // probed keys for Carbon's set as that is both a general reflection of the
  379. // efficacy of the underlying hash function, and a direct factor that drives the
  380. // cost of these operations.
  381. template <typename MapT>
  382. static void BM_MapInsertSeq(benchmark::State& state) {
  383. using MapWrapperT = MapWrapper<MapT>;
  384. using KT = typename MapWrapperT::KeyT;
  385. using VT = typename MapWrapperT::ValueT;
  386. constexpr ssize_t LookupKeysSize = 1 << 8;
  387. auto [keys, lookup_keys] =
  388. GetKeysAndHitKeys<KT>(state.range(0), LookupKeysSize);
  389. // Note that we don't force batches that use all the lookup keys because
  390. // there's no difference in cache usage by covering all the different lookup
  391. // keys.
  392. ssize_t i = 0;
  393. for (auto _ : state) {
  394. benchmark::DoNotOptimize(i);
  395. MapWrapperT m;
  396. for (auto k : keys) {
  397. bool inserted = m.BenchInsert(k, MakeValue<VT>());
  398. CARBON_DCHECK(inserted) << "Must be a successful insert!";
  399. }
  400. // Now insert a final random repeated key.
  401. bool inserted = m.BenchInsert(lookup_keys[i], MakeValue2<VT>());
  402. CARBON_DCHECK(!inserted) << "Must already be in the map!";
  403. // Rotate through the shuffled keys.
  404. i = (i + static_cast<ssize_t>(!inserted)) & (LookupKeysSize - 1);
  405. }
  406. // It can be easier in some cases to think of this as a key-throughput rate of
  407. // insertion rather than the latency of inserting N keys, so construct the
  408. // rate counter as well.
  409. state.counters["KeyRate"] = benchmark::Counter(
  410. keys.size(), benchmark::Counter::kIsIterationInvariantRate);
  411. // Report some extra statistics about the Carbon type.
  412. if constexpr (IsCarbonMap<MapT>) {
  413. // Re-build a map outside of the timing loop to look at the statistics
  414. // rather than the timing.
  415. MapWrapperT m;
  416. for (auto k : keys) {
  417. bool inserted = m.BenchInsert(k, MakeValue<VT>());
  418. CARBON_DCHECK(inserted) << "Must be a successful insert!";
  419. }
  420. ReportMetrics(m, state);
  421. // Uncomment this call to print out statistics about the index-collisions
  422. // among these keys for debugging:
  423. //
  424. // RawHashtable::DumpHashStatistics(keys);
  425. }
  426. }
  427. MAP_BENCHMARK_ONE_OP(BM_MapInsertSeq, SizeArgs);
  428. } // namespace
  429. } // namespace Carbon